]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Populate AMD Processor Cache Information for cpuid 0x8000001D
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
30
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
42
43 #include "standard-headers/asm-x86/kvm_para.h"
44
45 #include "sysemu/sysemu.h"
46 #include "hw/qdev-properties.h"
47 #include "hw/i386/topology.h"
48 #ifndef CONFIG_USER_ONLY
49 #include "exec/address-spaces.h"
50 #include "hw/hw.h"
51 #include "hw/xen/xen.h"
52 #include "hw/i386/apic_internal.h"
53 #endif
54
55 #include "disas/capstone.h"
56
57 /* Helpers for building CPUID[2] descriptors: */
58
59 struct CPUID2CacheDescriptorInfo {
60 enum CacheType type;
61 int level;
62 int size;
63 int line_size;
64 int associativity;
65 };
66
67 #define KiB 1024
68 #define MiB (1024 * 1024)
69
70 /*
71 * Known CPUID 2 cache descriptors.
72 * From Intel SDM Volume 2A, CPUID instruction
73 */
74 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
75 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
76 .associativity = 4, .line_size = 32, },
77 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
80 .associativity = 4, .line_size = 64, },
81 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
82 .associativity = 2, .line_size = 32, },
83 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
84 .associativity = 4, .line_size = 32, },
85 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 64, },
87 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
88 .associativity = 6, .line_size = 64, },
89 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
90 .associativity = 2, .line_size = 64, },
91 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
92 .associativity = 8, .line_size = 64, },
93 /* lines per sector is not supported cpuid2_cache_descriptor(),
94 * so descriptors 0x22, 0x23 are not included
95 */
96 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
97 .associativity = 16, .line_size = 64, },
98 /* lines per sector is not supported cpuid2_cache_descriptor(),
99 * so descriptors 0x25, 0x20 are not included
100 */
101 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
102 .associativity = 8, .line_size = 64, },
103 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
106 .associativity = 4, .line_size = 32, },
107 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
112 .associativity = 4, .line_size = 32, },
113 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
116 .associativity = 4, .line_size = 64, },
117 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
118 .associativity = 8, .line_size = 64, },
119 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
120 .associativity = 12, .line_size = 64, },
121 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
122 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
123 .associativity = 12, .line_size = 64, },
124 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
125 .associativity = 16, .line_size = 64, },
126 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
131 .associativity = 24, .line_size = 64, },
132 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
133 .associativity = 8, .line_size = 64, },
134 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
135 .associativity = 4, .line_size = 64, },
136 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
141 .associativity = 4, .line_size = 64, },
142 /* lines per sector is not supported cpuid2_cache_descriptor(),
143 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
144 */
145 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
146 .associativity = 8, .line_size = 64, },
147 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
148 .associativity = 2, .line_size = 64, },
149 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 8, .line_size = 64, },
151 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
152 .associativity = 8, .line_size = 32, },
153 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
156 .associativity = 8, .line_size = 32, },
157 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
160 .associativity = 4, .line_size = 64, },
161 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
162 .associativity = 8, .line_size = 64, },
163 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 8, .line_size = 64, },
171 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
176 .associativity = 12, .line_size = 64, },
177 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
182 .associativity = 16, .line_size = 64, },
183 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
188 .associativity = 24, .line_size = 64, },
189 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
192 .associativity = 24, .line_size = 64, },
193 };
194
195 /*
196 * "CPUID leaf 2 does not report cache descriptor information,
197 * use CPUID leaf 4 to query cache parameters"
198 */
199 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
200
201 /*
202 * Return a CPUID 2 cache descriptor for a given cache.
203 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
204 */
205 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
206 {
207 int i;
208
209 assert(cache->size > 0);
210 assert(cache->level > 0);
211 assert(cache->line_size > 0);
212 assert(cache->associativity > 0);
213 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
214 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
215 if (d->level == cache->level && d->type == cache->type &&
216 d->size == cache->size && d->line_size == cache->line_size &&
217 d->associativity == cache->associativity) {
218 return i;
219 }
220 }
221
222 return CACHE_DESCRIPTOR_UNAVAILABLE;
223 }
224
225 /* CPUID Leaf 4 constants: */
226
227 /* EAX: */
228 #define CACHE_TYPE_D 1
229 #define CACHE_TYPE_I 2
230 #define CACHE_TYPE_UNIFIED 3
231
232 #define CACHE_LEVEL(l) (l << 5)
233
234 #define CACHE_SELF_INIT_LEVEL (1 << 8)
235
236 /* EDX: */
237 #define CACHE_NO_INVD_SHARING (1 << 0)
238 #define CACHE_INCLUSIVE (1 << 1)
239 #define CACHE_COMPLEX_IDX (1 << 2)
240
241 /* Encode CacheType for CPUID[4].EAX */
242 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
243 ((t) == ICACHE) ? CACHE_TYPE_I : \
244 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
245 0 /* Invalid value */)
246
247
248 /* Encode cache info for CPUID[4] */
249 static void encode_cache_cpuid4(CPUCacheInfo *cache,
250 int num_apic_ids, int num_cores,
251 uint32_t *eax, uint32_t *ebx,
252 uint32_t *ecx, uint32_t *edx)
253 {
254 assert(cache->size == cache->line_size * cache->associativity *
255 cache->partitions * cache->sets);
256
257 assert(num_apic_ids > 0);
258 *eax = CACHE_TYPE(cache->type) |
259 CACHE_LEVEL(cache->level) |
260 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
261 ((num_cores - 1) << 26) |
262 ((num_apic_ids - 1) << 14);
263
264 assert(cache->line_size > 0);
265 assert(cache->partitions > 0);
266 assert(cache->associativity > 0);
267 /* We don't implement fully-associative caches */
268 assert(cache->associativity < cache->sets);
269 *ebx = (cache->line_size - 1) |
270 ((cache->partitions - 1) << 12) |
271 ((cache->associativity - 1) << 22);
272
273 assert(cache->sets > 0);
274 *ecx = cache->sets - 1;
275
276 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
277 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
278 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
279 }
280
281 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
282 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
283 {
284 assert(cache->size % 1024 == 0);
285 assert(cache->lines_per_tag > 0);
286 assert(cache->associativity > 0);
287 assert(cache->line_size > 0);
288 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
289 (cache->lines_per_tag << 8) | (cache->line_size);
290 }
291
292 #define ASSOC_FULL 0xFF
293
294 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
295 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
296 a == 2 ? 0x2 : \
297 a == 4 ? 0x4 : \
298 a == 8 ? 0x6 : \
299 a == 16 ? 0x8 : \
300 a == 32 ? 0xA : \
301 a == 48 ? 0xB : \
302 a == 64 ? 0xC : \
303 a == 96 ? 0xD : \
304 a == 128 ? 0xE : \
305 a == ASSOC_FULL ? 0xF : \
306 0 /* invalid value */)
307
308 /*
309 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
310 * @l3 can be NULL.
311 */
312 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
313 CPUCacheInfo *l3,
314 uint32_t *ecx, uint32_t *edx)
315 {
316 assert(l2->size % 1024 == 0);
317 assert(l2->associativity > 0);
318 assert(l2->lines_per_tag > 0);
319 assert(l2->line_size > 0);
320 *ecx = ((l2->size / 1024) << 16) |
321 (AMD_ENC_ASSOC(l2->associativity) << 12) |
322 (l2->lines_per_tag << 8) | (l2->line_size);
323
324 if (l3) {
325 assert(l3->size % (512 * 1024) == 0);
326 assert(l3->associativity > 0);
327 assert(l3->lines_per_tag > 0);
328 assert(l3->line_size > 0);
329 *edx = ((l3->size / (512 * 1024)) << 18) |
330 (AMD_ENC_ASSOC(l3->associativity) << 12) |
331 (l3->lines_per_tag << 8) | (l3->line_size);
332 } else {
333 *edx = 0;
334 }
335 }
336
337 /*
338 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
339 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
340 * Define the constants to build the cpu topology. Right now, TOPOEXT
341 * feature is enabled only on EPYC. So, these constants are based on
342 * EPYC supported configurations. We may need to handle the cases if
343 * these values change in future.
344 */
345 /* Maximum core complexes in a node */
346 #define MAX_CCX 2
347 /* Maximum cores in a core complex */
348 #define MAX_CORES_IN_CCX 4
349 /* Maximum cores in a node */
350 #define MAX_CORES_IN_NODE 8
351 /* Maximum nodes in a socket */
352 #define MAX_NODES_PER_SOCKET 4
353
354 /*
355 * Figure out the number of nodes required to build this config.
356 * Max cores in a node is 8
357 */
358 static int nodes_in_socket(int nr_cores)
359 {
360 int nodes;
361
362 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
363
364 /* Hardware does not support config with 3 nodes, return 4 in that case */
365 return (nodes == 3) ? 4 : nodes;
366 }
367
368 /*
369 * Decide the number of cores in a core complex with the given nr_cores using
370 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
371 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
372 * L3 cache is shared across all cores in a core complex. So, this will also
373 * tell us how many cores are sharing the L3 cache.
374 */
375 static int cores_in_core_complex(int nr_cores)
376 {
377 int nodes;
378
379 /* Check if we can fit all the cores in one core complex */
380 if (nr_cores <= MAX_CORES_IN_CCX) {
381 return nr_cores;
382 }
383 /* Get the number of nodes required to build this config */
384 nodes = nodes_in_socket(nr_cores);
385
386 /*
387 * Divide the cores accros all the core complexes
388 * Return rounded up value
389 */
390 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
391 }
392
393 /* Encode cache info for CPUID[8000001D] */
394 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
395 uint32_t *eax, uint32_t *ebx,
396 uint32_t *ecx, uint32_t *edx)
397 {
398 uint32_t l3_cores;
399 assert(cache->size == cache->line_size * cache->associativity *
400 cache->partitions * cache->sets);
401
402 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
403 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
404
405 /* L3 is shared among multiple cores */
406 if (cache->level == 3) {
407 l3_cores = cores_in_core_complex(cs->nr_cores);
408 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
409 } else {
410 *eax |= ((cs->nr_threads - 1) << 14);
411 }
412
413 assert(cache->line_size > 0);
414 assert(cache->partitions > 0);
415 assert(cache->associativity > 0);
416 /* We don't implement fully-associative caches */
417 assert(cache->associativity < cache->sets);
418 *ebx = (cache->line_size - 1) |
419 ((cache->partitions - 1) << 12) |
420 ((cache->associativity - 1) << 22);
421
422 assert(cache->sets > 0);
423 *ecx = cache->sets - 1;
424
425 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
426 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
427 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
428 }
429
430 /*
431 * Definitions of the hardcoded cache entries we expose:
432 * These are legacy cache values. If there is a need to change any
433 * of these values please use builtin_x86_defs
434 */
435
436 /* L1 data cache: */
437 static CPUCacheInfo legacy_l1d_cache = {
438 .type = DCACHE,
439 .level = 1,
440 .size = 32 * KiB,
441 .self_init = 1,
442 .line_size = 64,
443 .associativity = 8,
444 .sets = 64,
445 .partitions = 1,
446 .no_invd_sharing = true,
447 };
448
449 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
450 static CPUCacheInfo legacy_l1d_cache_amd = {
451 .type = DCACHE,
452 .level = 1,
453 .size = 64 * KiB,
454 .self_init = 1,
455 .line_size = 64,
456 .associativity = 2,
457 .sets = 512,
458 .partitions = 1,
459 .lines_per_tag = 1,
460 .no_invd_sharing = true,
461 };
462
463 /* L1 instruction cache: */
464 static CPUCacheInfo legacy_l1i_cache = {
465 .type = ICACHE,
466 .level = 1,
467 .size = 32 * KiB,
468 .self_init = 1,
469 .line_size = 64,
470 .associativity = 8,
471 .sets = 64,
472 .partitions = 1,
473 .no_invd_sharing = true,
474 };
475
476 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
477 static CPUCacheInfo legacy_l1i_cache_amd = {
478 .type = ICACHE,
479 .level = 1,
480 .size = 64 * KiB,
481 .self_init = 1,
482 .line_size = 64,
483 .associativity = 2,
484 .sets = 512,
485 .partitions = 1,
486 .lines_per_tag = 1,
487 .no_invd_sharing = true,
488 };
489
490 /* Level 2 unified cache: */
491 static CPUCacheInfo legacy_l2_cache = {
492 .type = UNIFIED_CACHE,
493 .level = 2,
494 .size = 4 * MiB,
495 .self_init = 1,
496 .line_size = 64,
497 .associativity = 16,
498 .sets = 4096,
499 .partitions = 1,
500 .no_invd_sharing = true,
501 };
502
503 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
504 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
505 .type = UNIFIED_CACHE,
506 .level = 2,
507 .size = 2 * MiB,
508 .line_size = 64,
509 .associativity = 8,
510 };
511
512
513 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
514 static CPUCacheInfo legacy_l2_cache_amd = {
515 .type = UNIFIED_CACHE,
516 .level = 2,
517 .size = 512 * KiB,
518 .line_size = 64,
519 .lines_per_tag = 1,
520 .associativity = 16,
521 .sets = 512,
522 .partitions = 1,
523 };
524
525 /* Level 3 unified cache: */
526 static CPUCacheInfo legacy_l3_cache = {
527 .type = UNIFIED_CACHE,
528 .level = 3,
529 .size = 16 * MiB,
530 .line_size = 64,
531 .associativity = 16,
532 .sets = 16384,
533 .partitions = 1,
534 .lines_per_tag = 1,
535 .self_init = true,
536 .inclusive = true,
537 .complex_indexing = true,
538 };
539
540 /* TLB definitions: */
541
542 #define L1_DTLB_2M_ASSOC 1
543 #define L1_DTLB_2M_ENTRIES 255
544 #define L1_DTLB_4K_ASSOC 1
545 #define L1_DTLB_4K_ENTRIES 255
546
547 #define L1_ITLB_2M_ASSOC 1
548 #define L1_ITLB_2M_ENTRIES 255
549 #define L1_ITLB_4K_ASSOC 1
550 #define L1_ITLB_4K_ENTRIES 255
551
552 #define L2_DTLB_2M_ASSOC 0 /* disabled */
553 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
554 #define L2_DTLB_4K_ASSOC 4
555 #define L2_DTLB_4K_ENTRIES 512
556
557 #define L2_ITLB_2M_ASSOC 0 /* disabled */
558 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
559 #define L2_ITLB_4K_ASSOC 4
560 #define L2_ITLB_4K_ENTRIES 512
561
562 /* CPUID Leaf 0x14 constants: */
563 #define INTEL_PT_MAX_SUBLEAF 0x1
564 /*
565 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
566 * MSR can be accessed;
567 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
568 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
569 * of Intel PT MSRs across warm reset;
570 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
571 */
572 #define INTEL_PT_MINIMAL_EBX 0xf
573 /*
574 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
575 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
576 * accessed;
577 * bit[01]: ToPA tables can hold any number of output entries, up to the
578 * maximum allowed by the MaskOrTableOffset field of
579 * IA32_RTIT_OUTPUT_MASK_PTRS;
580 * bit[02]: Support Single-Range Output scheme;
581 */
582 #define INTEL_PT_MINIMAL_ECX 0x7
583 /* generated packets which contain IP payloads have LIP values */
584 #define INTEL_PT_IP_LIP (1 << 31)
585 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
586 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
587 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
588 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
589 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
590
591 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
592 uint32_t vendor2, uint32_t vendor3)
593 {
594 int i;
595 for (i = 0; i < 4; i++) {
596 dst[i] = vendor1 >> (8 * i);
597 dst[i + 4] = vendor2 >> (8 * i);
598 dst[i + 8] = vendor3 >> (8 * i);
599 }
600 dst[CPUID_VENDOR_SZ] = '\0';
601 }
602
603 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
604 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
605 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
606 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
607 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
608 CPUID_PSE36 | CPUID_FXSR)
609 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
610 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
611 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
612 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
613 CPUID_PAE | CPUID_SEP | CPUID_APIC)
614
615 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
616 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
617 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
618 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
619 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
620 /* partly implemented:
621 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
622 /* missing:
623 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
624 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
625 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
626 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
627 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
628 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
629 /* missing:
630 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
631 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
632 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
633 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
634 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
635
636 #ifdef TARGET_X86_64
637 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
638 #else
639 #define TCG_EXT2_X86_64_FEATURES 0
640 #endif
641
642 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
643 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
644 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
645 TCG_EXT2_X86_64_FEATURES)
646 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
647 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
648 #define TCG_EXT4_FEATURES 0
649 #define TCG_SVM_FEATURES 0
650 #define TCG_KVM_FEATURES 0
651 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
652 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
653 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
654 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
655 CPUID_7_0_EBX_ERMS)
656 /* missing:
657 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
658 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
659 CPUID_7_0_EBX_RDSEED */
660 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
661 CPUID_7_0_ECX_LA57)
662 #define TCG_7_0_EDX_FEATURES 0
663 #define TCG_APM_FEATURES 0
664 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
665 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
666 /* missing:
667 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
668
669 typedef struct FeatureWordInfo {
670 /* feature flags names are taken from "Intel Processor Identification and
671 * the CPUID Instruction" and AMD's "CPUID Specification".
672 * In cases of disagreement between feature naming conventions,
673 * aliases may be added.
674 */
675 const char *feat_names[32];
676 uint32_t cpuid_eax; /* Input EAX for CPUID */
677 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
678 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
679 int cpuid_reg; /* output register (R_* constant) */
680 uint32_t tcg_features; /* Feature flags supported by TCG */
681 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
682 uint32_t migratable_flags; /* Feature flags known to be migratable */
683 /* Features that shouldn't be auto-enabled by "-cpu host" */
684 uint32_t no_autoenable_flags;
685 } FeatureWordInfo;
686
687 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
688 [FEAT_1_EDX] = {
689 .feat_names = {
690 "fpu", "vme", "de", "pse",
691 "tsc", "msr", "pae", "mce",
692 "cx8", "apic", NULL, "sep",
693 "mtrr", "pge", "mca", "cmov",
694 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
695 NULL, "ds" /* Intel dts */, "acpi", "mmx",
696 "fxsr", "sse", "sse2", "ss",
697 "ht" /* Intel htt */, "tm", "ia64", "pbe",
698 },
699 .cpuid_eax = 1, .cpuid_reg = R_EDX,
700 .tcg_features = TCG_FEATURES,
701 },
702 [FEAT_1_ECX] = {
703 .feat_names = {
704 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
705 "ds-cpl", "vmx", "smx", "est",
706 "tm2", "ssse3", "cid", NULL,
707 "fma", "cx16", "xtpr", "pdcm",
708 NULL, "pcid", "dca", "sse4.1",
709 "sse4.2", "x2apic", "movbe", "popcnt",
710 "tsc-deadline", "aes", "xsave", "osxsave",
711 "avx", "f16c", "rdrand", "hypervisor",
712 },
713 .cpuid_eax = 1, .cpuid_reg = R_ECX,
714 .tcg_features = TCG_EXT_FEATURES,
715 },
716 /* Feature names that are already defined on feature_name[] but
717 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
718 * names on feat_names below. They are copied automatically
719 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
720 */
721 [FEAT_8000_0001_EDX] = {
722 .feat_names = {
723 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
724 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
725 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
726 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
727 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
728 "nx", NULL, "mmxext", NULL /* mmx */,
729 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
730 NULL, "lm", "3dnowext", "3dnow",
731 },
732 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
733 .tcg_features = TCG_EXT2_FEATURES,
734 },
735 [FEAT_8000_0001_ECX] = {
736 .feat_names = {
737 "lahf-lm", "cmp-legacy", "svm", "extapic",
738 "cr8legacy", "abm", "sse4a", "misalignsse",
739 "3dnowprefetch", "osvw", "ibs", "xop",
740 "skinit", "wdt", NULL, "lwp",
741 "fma4", "tce", NULL, "nodeid-msr",
742 NULL, "tbm", "topoext", "perfctr-core",
743 "perfctr-nb", NULL, NULL, NULL,
744 NULL, NULL, NULL, NULL,
745 },
746 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
747 .tcg_features = TCG_EXT3_FEATURES,
748 },
749 [FEAT_C000_0001_EDX] = {
750 .feat_names = {
751 NULL, NULL, "xstore", "xstore-en",
752 NULL, NULL, "xcrypt", "xcrypt-en",
753 "ace2", "ace2-en", "phe", "phe-en",
754 "pmm", "pmm-en", NULL, NULL,
755 NULL, NULL, NULL, NULL,
756 NULL, NULL, NULL, NULL,
757 NULL, NULL, NULL, NULL,
758 NULL, NULL, NULL, NULL,
759 },
760 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
761 .tcg_features = TCG_EXT4_FEATURES,
762 },
763 [FEAT_KVM] = {
764 .feat_names = {
765 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
766 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
767 NULL, "kvm-pv-tlb-flush", NULL, NULL,
768 NULL, NULL, NULL, NULL,
769 NULL, NULL, NULL, NULL,
770 NULL, NULL, NULL, NULL,
771 "kvmclock-stable-bit", NULL, NULL, NULL,
772 NULL, NULL, NULL, NULL,
773 },
774 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
775 .tcg_features = TCG_KVM_FEATURES,
776 },
777 [FEAT_KVM_HINTS] = {
778 .feat_names = {
779 "kvm-hint-dedicated", NULL, NULL, NULL,
780 NULL, NULL, NULL, NULL,
781 NULL, NULL, NULL, NULL,
782 NULL, NULL, NULL, NULL,
783 NULL, NULL, NULL, NULL,
784 NULL, NULL, NULL, NULL,
785 NULL, NULL, NULL, NULL,
786 NULL, NULL, NULL, NULL,
787 },
788 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
789 .tcg_features = TCG_KVM_FEATURES,
790 /*
791 * KVM hints aren't auto-enabled by -cpu host, they need to be
792 * explicitly enabled in the command-line.
793 */
794 .no_autoenable_flags = ~0U,
795 },
796 [FEAT_HYPERV_EAX] = {
797 .feat_names = {
798 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
799 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
800 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
801 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
802 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
803 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
804 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
805 NULL, NULL,
806 NULL, NULL, NULL, NULL,
807 NULL, NULL, NULL, NULL,
808 NULL, NULL, NULL, NULL,
809 NULL, NULL, NULL, NULL,
810 },
811 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
812 },
813 [FEAT_HYPERV_EBX] = {
814 .feat_names = {
815 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
816 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
817 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
818 NULL /* hv_create_port */, NULL /* hv_connect_port */,
819 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
820 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
821 NULL, NULL,
822 NULL, NULL, NULL, NULL,
823 NULL, NULL, NULL, NULL,
824 NULL, NULL, NULL, NULL,
825 NULL, NULL, NULL, NULL,
826 },
827 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
828 },
829 [FEAT_HYPERV_EDX] = {
830 .feat_names = {
831 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
832 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
833 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
834 NULL, NULL,
835 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
836 NULL, NULL, NULL, NULL,
837 NULL, NULL, NULL, NULL,
838 NULL, NULL, NULL, NULL,
839 NULL, NULL, NULL, NULL,
840 NULL, NULL, NULL, NULL,
841 },
842 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
843 },
844 [FEAT_SVM] = {
845 .feat_names = {
846 "npt", "lbrv", "svm-lock", "nrip-save",
847 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
848 NULL, NULL, "pause-filter", NULL,
849 "pfthreshold", NULL, NULL, NULL,
850 NULL, NULL, NULL, NULL,
851 NULL, NULL, NULL, NULL,
852 NULL, NULL, NULL, NULL,
853 NULL, NULL, NULL, NULL,
854 },
855 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
856 .tcg_features = TCG_SVM_FEATURES,
857 },
858 [FEAT_7_0_EBX] = {
859 .feat_names = {
860 "fsgsbase", "tsc-adjust", NULL, "bmi1",
861 "hle", "avx2", NULL, "smep",
862 "bmi2", "erms", "invpcid", "rtm",
863 NULL, NULL, "mpx", NULL,
864 "avx512f", "avx512dq", "rdseed", "adx",
865 "smap", "avx512ifma", "pcommit", "clflushopt",
866 "clwb", "intel-pt", "avx512pf", "avx512er",
867 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
868 },
869 .cpuid_eax = 7,
870 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
871 .cpuid_reg = R_EBX,
872 .tcg_features = TCG_7_0_EBX_FEATURES,
873 },
874 [FEAT_7_0_ECX] = {
875 .feat_names = {
876 NULL, "avx512vbmi", "umip", "pku",
877 "ospke", NULL, "avx512vbmi2", NULL,
878 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
879 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
880 "la57", NULL, NULL, NULL,
881 NULL, NULL, "rdpid", NULL,
882 NULL, "cldemote", NULL, NULL,
883 NULL, NULL, NULL, NULL,
884 },
885 .cpuid_eax = 7,
886 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
887 .cpuid_reg = R_ECX,
888 .tcg_features = TCG_7_0_ECX_FEATURES,
889 },
890 [FEAT_7_0_EDX] = {
891 .feat_names = {
892 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL,
898 NULL, NULL, "spec-ctrl", NULL,
899 NULL, NULL, NULL, "ssbd",
900 },
901 .cpuid_eax = 7,
902 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
903 .cpuid_reg = R_EDX,
904 .tcg_features = TCG_7_0_EDX_FEATURES,
905 },
906 [FEAT_8000_0007_EDX] = {
907 .feat_names = {
908 NULL, NULL, NULL, NULL,
909 NULL, NULL, NULL, NULL,
910 "invtsc", NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 NULL, NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
914 NULL, NULL, NULL, NULL,
915 NULL, NULL, NULL, NULL,
916 },
917 .cpuid_eax = 0x80000007,
918 .cpuid_reg = R_EDX,
919 .tcg_features = TCG_APM_FEATURES,
920 .unmigratable_flags = CPUID_APM_INVTSC,
921 },
922 [FEAT_8000_0008_EBX] = {
923 .feat_names = {
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 "ibpb", NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 NULL, NULL, NULL, NULL,
930 NULL, "virt-ssbd", NULL, NULL,
931 NULL, NULL, NULL, NULL,
932 },
933 .cpuid_eax = 0x80000008,
934 .cpuid_reg = R_EBX,
935 .tcg_features = 0,
936 .unmigratable_flags = 0,
937 },
938 [FEAT_XSAVE] = {
939 .feat_names = {
940 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
941 NULL, NULL, NULL, NULL,
942 NULL, NULL, NULL, NULL,
943 NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL,
945 NULL, NULL, NULL, NULL,
946 NULL, NULL, NULL, NULL,
947 NULL, NULL, NULL, NULL,
948 },
949 .cpuid_eax = 0xd,
950 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
951 .cpuid_reg = R_EAX,
952 .tcg_features = TCG_XSAVE_FEATURES,
953 },
954 [FEAT_6_EAX] = {
955 .feat_names = {
956 NULL, NULL, "arat", NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 NULL, NULL, NULL, NULL,
960 NULL, NULL, NULL, NULL,
961 NULL, NULL, NULL, NULL,
962 NULL, NULL, NULL, NULL,
963 NULL, NULL, NULL, NULL,
964 },
965 .cpuid_eax = 6, .cpuid_reg = R_EAX,
966 .tcg_features = TCG_6_EAX_FEATURES,
967 },
968 [FEAT_XSAVE_COMP_LO] = {
969 .cpuid_eax = 0xD,
970 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
971 .cpuid_reg = R_EAX,
972 .tcg_features = ~0U,
973 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
974 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
975 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
976 XSTATE_PKRU_MASK,
977 },
978 [FEAT_XSAVE_COMP_HI] = {
979 .cpuid_eax = 0xD,
980 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
981 .cpuid_reg = R_EDX,
982 .tcg_features = ~0U,
983 },
984 };
985
986 typedef struct X86RegisterInfo32 {
987 /* Name of register */
988 const char *name;
989 /* QAPI enum value register */
990 X86CPURegister32 qapi_enum;
991 } X86RegisterInfo32;
992
993 #define REGISTER(reg) \
994 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
995 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
996 REGISTER(EAX),
997 REGISTER(ECX),
998 REGISTER(EDX),
999 REGISTER(EBX),
1000 REGISTER(ESP),
1001 REGISTER(EBP),
1002 REGISTER(ESI),
1003 REGISTER(EDI),
1004 };
1005 #undef REGISTER
1006
1007 typedef struct ExtSaveArea {
1008 uint32_t feature, bits;
1009 uint32_t offset, size;
1010 } ExtSaveArea;
1011
1012 static const ExtSaveArea x86_ext_save_areas[] = {
1013 [XSTATE_FP_BIT] = {
1014 /* x87 FP state component is always enabled if XSAVE is supported */
1015 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1016 /* x87 state is in the legacy region of the XSAVE area */
1017 .offset = 0,
1018 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1019 },
1020 [XSTATE_SSE_BIT] = {
1021 /* SSE state component is always enabled if XSAVE is supported */
1022 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1023 /* SSE state is in the legacy region of the XSAVE area */
1024 .offset = 0,
1025 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1026 },
1027 [XSTATE_YMM_BIT] =
1028 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1029 .offset = offsetof(X86XSaveArea, avx_state),
1030 .size = sizeof(XSaveAVX) },
1031 [XSTATE_BNDREGS_BIT] =
1032 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1033 .offset = offsetof(X86XSaveArea, bndreg_state),
1034 .size = sizeof(XSaveBNDREG) },
1035 [XSTATE_BNDCSR_BIT] =
1036 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1037 .offset = offsetof(X86XSaveArea, bndcsr_state),
1038 .size = sizeof(XSaveBNDCSR) },
1039 [XSTATE_OPMASK_BIT] =
1040 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1041 .offset = offsetof(X86XSaveArea, opmask_state),
1042 .size = sizeof(XSaveOpmask) },
1043 [XSTATE_ZMM_Hi256_BIT] =
1044 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1045 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1046 .size = sizeof(XSaveZMM_Hi256) },
1047 [XSTATE_Hi16_ZMM_BIT] =
1048 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1049 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1050 .size = sizeof(XSaveHi16_ZMM) },
1051 [XSTATE_PKRU_BIT] =
1052 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1053 .offset = offsetof(X86XSaveArea, pkru_state),
1054 .size = sizeof(XSavePKRU) },
1055 };
1056
1057 static uint32_t xsave_area_size(uint64_t mask)
1058 {
1059 int i;
1060 uint64_t ret = 0;
1061
1062 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1063 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1064 if ((mask >> i) & 1) {
1065 ret = MAX(ret, esa->offset + esa->size);
1066 }
1067 }
1068 return ret;
1069 }
1070
1071 static inline bool accel_uses_host_cpuid(void)
1072 {
1073 return kvm_enabled() || hvf_enabled();
1074 }
1075
1076 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1077 {
1078 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1079 cpu->env.features[FEAT_XSAVE_COMP_LO];
1080 }
1081
1082 const char *get_register_name_32(unsigned int reg)
1083 {
1084 if (reg >= CPU_NB_REGS32) {
1085 return NULL;
1086 }
1087 return x86_reg_info_32[reg].name;
1088 }
1089
1090 /*
1091 * Returns the set of feature flags that are supported and migratable by
1092 * QEMU, for a given FeatureWord.
1093 */
1094 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1095 {
1096 FeatureWordInfo *wi = &feature_word_info[w];
1097 uint32_t r = 0;
1098 int i;
1099
1100 for (i = 0; i < 32; i++) {
1101 uint32_t f = 1U << i;
1102
1103 /* If the feature name is known, it is implicitly considered migratable,
1104 * unless it is explicitly set in unmigratable_flags */
1105 if ((wi->migratable_flags & f) ||
1106 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1107 r |= f;
1108 }
1109 }
1110 return r;
1111 }
1112
1113 void host_cpuid(uint32_t function, uint32_t count,
1114 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1115 {
1116 uint32_t vec[4];
1117
1118 #ifdef __x86_64__
1119 asm volatile("cpuid"
1120 : "=a"(vec[0]), "=b"(vec[1]),
1121 "=c"(vec[2]), "=d"(vec[3])
1122 : "0"(function), "c"(count) : "cc");
1123 #elif defined(__i386__)
1124 asm volatile("pusha \n\t"
1125 "cpuid \n\t"
1126 "mov %%eax, 0(%2) \n\t"
1127 "mov %%ebx, 4(%2) \n\t"
1128 "mov %%ecx, 8(%2) \n\t"
1129 "mov %%edx, 12(%2) \n\t"
1130 "popa"
1131 : : "a"(function), "c"(count), "S"(vec)
1132 : "memory", "cc");
1133 #else
1134 abort();
1135 #endif
1136
1137 if (eax)
1138 *eax = vec[0];
1139 if (ebx)
1140 *ebx = vec[1];
1141 if (ecx)
1142 *ecx = vec[2];
1143 if (edx)
1144 *edx = vec[3];
1145 }
1146
1147 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1148 {
1149 uint32_t eax, ebx, ecx, edx;
1150
1151 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1152 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1153
1154 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1155 if (family) {
1156 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1157 }
1158 if (model) {
1159 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1160 }
1161 if (stepping) {
1162 *stepping = eax & 0x0F;
1163 }
1164 }
1165
1166 /* CPU class name definitions: */
1167
1168 /* Return type name for a given CPU model name
1169 * Caller is responsible for freeing the returned string.
1170 */
1171 static char *x86_cpu_type_name(const char *model_name)
1172 {
1173 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1174 }
1175
1176 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1177 {
1178 ObjectClass *oc;
1179 char *typename = x86_cpu_type_name(cpu_model);
1180 oc = object_class_by_name(typename);
1181 g_free(typename);
1182 return oc;
1183 }
1184
1185 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1186 {
1187 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1188 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1189 return g_strndup(class_name,
1190 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1191 }
1192
1193 struct X86CPUDefinition {
1194 const char *name;
1195 uint32_t level;
1196 uint32_t xlevel;
1197 /* vendor is zero-terminated, 12 character ASCII string */
1198 char vendor[CPUID_VENDOR_SZ + 1];
1199 int family;
1200 int model;
1201 int stepping;
1202 FeatureWordArray features;
1203 const char *model_id;
1204 CPUCaches *cache_info;
1205 };
1206
1207 static CPUCaches epyc_cache_info = {
1208 .l1d_cache = &(CPUCacheInfo) {
1209 .type = DCACHE,
1210 .level = 1,
1211 .size = 32 * KiB,
1212 .line_size = 64,
1213 .associativity = 8,
1214 .partitions = 1,
1215 .sets = 64,
1216 .lines_per_tag = 1,
1217 .self_init = 1,
1218 .no_invd_sharing = true,
1219 },
1220 .l1i_cache = &(CPUCacheInfo) {
1221 .type = ICACHE,
1222 .level = 1,
1223 .size = 64 * KiB,
1224 .line_size = 64,
1225 .associativity = 4,
1226 .partitions = 1,
1227 .sets = 256,
1228 .lines_per_tag = 1,
1229 .self_init = 1,
1230 .no_invd_sharing = true,
1231 },
1232 .l2_cache = &(CPUCacheInfo) {
1233 .type = UNIFIED_CACHE,
1234 .level = 2,
1235 .size = 512 * KiB,
1236 .line_size = 64,
1237 .associativity = 8,
1238 .partitions = 1,
1239 .sets = 1024,
1240 .lines_per_tag = 1,
1241 },
1242 .l3_cache = &(CPUCacheInfo) {
1243 .type = UNIFIED_CACHE,
1244 .level = 3,
1245 .size = 8 * MiB,
1246 .line_size = 64,
1247 .associativity = 16,
1248 .partitions = 1,
1249 .sets = 8192,
1250 .lines_per_tag = 1,
1251 .self_init = true,
1252 .inclusive = true,
1253 .complex_indexing = true,
1254 },
1255 };
1256
1257 static X86CPUDefinition builtin_x86_defs[] = {
1258 {
1259 .name = "qemu64",
1260 .level = 0xd,
1261 .vendor = CPUID_VENDOR_AMD,
1262 .family = 6,
1263 .model = 6,
1264 .stepping = 3,
1265 .features[FEAT_1_EDX] =
1266 PPRO_FEATURES |
1267 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1268 CPUID_PSE36,
1269 .features[FEAT_1_ECX] =
1270 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1271 .features[FEAT_8000_0001_EDX] =
1272 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1273 .features[FEAT_8000_0001_ECX] =
1274 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1275 .xlevel = 0x8000000A,
1276 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1277 },
1278 {
1279 .name = "phenom",
1280 .level = 5,
1281 .vendor = CPUID_VENDOR_AMD,
1282 .family = 16,
1283 .model = 2,
1284 .stepping = 3,
1285 /* Missing: CPUID_HT */
1286 .features[FEAT_1_EDX] =
1287 PPRO_FEATURES |
1288 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1289 CPUID_PSE36 | CPUID_VME,
1290 .features[FEAT_1_ECX] =
1291 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1292 CPUID_EXT_POPCNT,
1293 .features[FEAT_8000_0001_EDX] =
1294 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1295 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1296 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1297 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1298 CPUID_EXT3_CR8LEG,
1299 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1300 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1301 .features[FEAT_8000_0001_ECX] =
1302 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1303 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1304 /* Missing: CPUID_SVM_LBRV */
1305 .features[FEAT_SVM] =
1306 CPUID_SVM_NPT,
1307 .xlevel = 0x8000001A,
1308 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1309 },
1310 {
1311 .name = "core2duo",
1312 .level = 10,
1313 .vendor = CPUID_VENDOR_INTEL,
1314 .family = 6,
1315 .model = 15,
1316 .stepping = 11,
1317 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1318 .features[FEAT_1_EDX] =
1319 PPRO_FEATURES |
1320 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1321 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1322 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1323 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1324 .features[FEAT_1_ECX] =
1325 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1326 CPUID_EXT_CX16,
1327 .features[FEAT_8000_0001_EDX] =
1328 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1329 .features[FEAT_8000_0001_ECX] =
1330 CPUID_EXT3_LAHF_LM,
1331 .xlevel = 0x80000008,
1332 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1333 },
1334 {
1335 .name = "kvm64",
1336 .level = 0xd,
1337 .vendor = CPUID_VENDOR_INTEL,
1338 .family = 15,
1339 .model = 6,
1340 .stepping = 1,
1341 /* Missing: CPUID_HT */
1342 .features[FEAT_1_EDX] =
1343 PPRO_FEATURES | CPUID_VME |
1344 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1345 CPUID_PSE36,
1346 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1347 .features[FEAT_1_ECX] =
1348 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1349 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1350 .features[FEAT_8000_0001_EDX] =
1351 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1352 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1353 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1354 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1355 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1356 .features[FEAT_8000_0001_ECX] =
1357 0,
1358 .xlevel = 0x80000008,
1359 .model_id = "Common KVM processor"
1360 },
1361 {
1362 .name = "qemu32",
1363 .level = 4,
1364 .vendor = CPUID_VENDOR_INTEL,
1365 .family = 6,
1366 .model = 6,
1367 .stepping = 3,
1368 .features[FEAT_1_EDX] =
1369 PPRO_FEATURES,
1370 .features[FEAT_1_ECX] =
1371 CPUID_EXT_SSE3,
1372 .xlevel = 0x80000004,
1373 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1374 },
1375 {
1376 .name = "kvm32",
1377 .level = 5,
1378 .vendor = CPUID_VENDOR_INTEL,
1379 .family = 15,
1380 .model = 6,
1381 .stepping = 1,
1382 .features[FEAT_1_EDX] =
1383 PPRO_FEATURES | CPUID_VME |
1384 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1385 .features[FEAT_1_ECX] =
1386 CPUID_EXT_SSE3,
1387 .features[FEAT_8000_0001_ECX] =
1388 0,
1389 .xlevel = 0x80000008,
1390 .model_id = "Common 32-bit KVM processor"
1391 },
1392 {
1393 .name = "coreduo",
1394 .level = 10,
1395 .vendor = CPUID_VENDOR_INTEL,
1396 .family = 6,
1397 .model = 14,
1398 .stepping = 8,
1399 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1400 .features[FEAT_1_EDX] =
1401 PPRO_FEATURES | CPUID_VME |
1402 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1403 CPUID_SS,
1404 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1405 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1406 .features[FEAT_1_ECX] =
1407 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1408 .features[FEAT_8000_0001_EDX] =
1409 CPUID_EXT2_NX,
1410 .xlevel = 0x80000008,
1411 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1412 },
1413 {
1414 .name = "486",
1415 .level = 1,
1416 .vendor = CPUID_VENDOR_INTEL,
1417 .family = 4,
1418 .model = 8,
1419 .stepping = 0,
1420 .features[FEAT_1_EDX] =
1421 I486_FEATURES,
1422 .xlevel = 0,
1423 .model_id = "",
1424 },
1425 {
1426 .name = "pentium",
1427 .level = 1,
1428 .vendor = CPUID_VENDOR_INTEL,
1429 .family = 5,
1430 .model = 4,
1431 .stepping = 3,
1432 .features[FEAT_1_EDX] =
1433 PENTIUM_FEATURES,
1434 .xlevel = 0,
1435 .model_id = "",
1436 },
1437 {
1438 .name = "pentium2",
1439 .level = 2,
1440 .vendor = CPUID_VENDOR_INTEL,
1441 .family = 6,
1442 .model = 5,
1443 .stepping = 2,
1444 .features[FEAT_1_EDX] =
1445 PENTIUM2_FEATURES,
1446 .xlevel = 0,
1447 .model_id = "",
1448 },
1449 {
1450 .name = "pentium3",
1451 .level = 3,
1452 .vendor = CPUID_VENDOR_INTEL,
1453 .family = 6,
1454 .model = 7,
1455 .stepping = 3,
1456 .features[FEAT_1_EDX] =
1457 PENTIUM3_FEATURES,
1458 .xlevel = 0,
1459 .model_id = "",
1460 },
1461 {
1462 .name = "athlon",
1463 .level = 2,
1464 .vendor = CPUID_VENDOR_AMD,
1465 .family = 6,
1466 .model = 2,
1467 .stepping = 3,
1468 .features[FEAT_1_EDX] =
1469 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1470 CPUID_MCA,
1471 .features[FEAT_8000_0001_EDX] =
1472 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1473 .xlevel = 0x80000008,
1474 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1475 },
1476 {
1477 .name = "n270",
1478 .level = 10,
1479 .vendor = CPUID_VENDOR_INTEL,
1480 .family = 6,
1481 .model = 28,
1482 .stepping = 2,
1483 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1484 .features[FEAT_1_EDX] =
1485 PPRO_FEATURES |
1486 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1487 CPUID_ACPI | CPUID_SS,
1488 /* Some CPUs got no CPUID_SEP */
1489 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1490 * CPUID_EXT_XTPR */
1491 .features[FEAT_1_ECX] =
1492 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1493 CPUID_EXT_MOVBE,
1494 .features[FEAT_8000_0001_EDX] =
1495 CPUID_EXT2_NX,
1496 .features[FEAT_8000_0001_ECX] =
1497 CPUID_EXT3_LAHF_LM,
1498 .xlevel = 0x80000008,
1499 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1500 },
1501 {
1502 .name = "Conroe",
1503 .level = 10,
1504 .vendor = CPUID_VENDOR_INTEL,
1505 .family = 6,
1506 .model = 15,
1507 .stepping = 3,
1508 .features[FEAT_1_EDX] =
1509 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1510 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1511 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1512 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1513 CPUID_DE | CPUID_FP87,
1514 .features[FEAT_1_ECX] =
1515 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1516 .features[FEAT_8000_0001_EDX] =
1517 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1518 .features[FEAT_8000_0001_ECX] =
1519 CPUID_EXT3_LAHF_LM,
1520 .xlevel = 0x80000008,
1521 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1522 },
1523 {
1524 .name = "Penryn",
1525 .level = 10,
1526 .vendor = CPUID_VENDOR_INTEL,
1527 .family = 6,
1528 .model = 23,
1529 .stepping = 3,
1530 .features[FEAT_1_EDX] =
1531 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1532 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1533 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1534 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1535 CPUID_DE | CPUID_FP87,
1536 .features[FEAT_1_ECX] =
1537 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1538 CPUID_EXT_SSE3,
1539 .features[FEAT_8000_0001_EDX] =
1540 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1541 .features[FEAT_8000_0001_ECX] =
1542 CPUID_EXT3_LAHF_LM,
1543 .xlevel = 0x80000008,
1544 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1545 },
1546 {
1547 .name = "Nehalem",
1548 .level = 11,
1549 .vendor = CPUID_VENDOR_INTEL,
1550 .family = 6,
1551 .model = 26,
1552 .stepping = 3,
1553 .features[FEAT_1_EDX] =
1554 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1555 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1556 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1557 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1558 CPUID_DE | CPUID_FP87,
1559 .features[FEAT_1_ECX] =
1560 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1561 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1562 .features[FEAT_8000_0001_EDX] =
1563 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1564 .features[FEAT_8000_0001_ECX] =
1565 CPUID_EXT3_LAHF_LM,
1566 .xlevel = 0x80000008,
1567 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1568 },
1569 {
1570 .name = "Nehalem-IBRS",
1571 .level = 11,
1572 .vendor = CPUID_VENDOR_INTEL,
1573 .family = 6,
1574 .model = 26,
1575 .stepping = 3,
1576 .features[FEAT_1_EDX] =
1577 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1578 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1579 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1580 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1581 CPUID_DE | CPUID_FP87,
1582 .features[FEAT_1_ECX] =
1583 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1584 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1585 .features[FEAT_7_0_EDX] =
1586 CPUID_7_0_EDX_SPEC_CTRL,
1587 .features[FEAT_8000_0001_EDX] =
1588 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1589 .features[FEAT_8000_0001_ECX] =
1590 CPUID_EXT3_LAHF_LM,
1591 .xlevel = 0x80000008,
1592 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1593 },
1594 {
1595 .name = "Westmere",
1596 .level = 11,
1597 .vendor = CPUID_VENDOR_INTEL,
1598 .family = 6,
1599 .model = 44,
1600 .stepping = 1,
1601 .features[FEAT_1_EDX] =
1602 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1603 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1604 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1605 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1606 CPUID_DE | CPUID_FP87,
1607 .features[FEAT_1_ECX] =
1608 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1609 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1610 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1611 .features[FEAT_8000_0001_EDX] =
1612 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1613 .features[FEAT_8000_0001_ECX] =
1614 CPUID_EXT3_LAHF_LM,
1615 .features[FEAT_6_EAX] =
1616 CPUID_6_EAX_ARAT,
1617 .xlevel = 0x80000008,
1618 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1619 },
1620 {
1621 .name = "Westmere-IBRS",
1622 .level = 11,
1623 .vendor = CPUID_VENDOR_INTEL,
1624 .family = 6,
1625 .model = 44,
1626 .stepping = 1,
1627 .features[FEAT_1_EDX] =
1628 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1629 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1630 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1631 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1632 CPUID_DE | CPUID_FP87,
1633 .features[FEAT_1_ECX] =
1634 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1635 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1636 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1637 .features[FEAT_8000_0001_EDX] =
1638 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1639 .features[FEAT_8000_0001_ECX] =
1640 CPUID_EXT3_LAHF_LM,
1641 .features[FEAT_7_0_EDX] =
1642 CPUID_7_0_EDX_SPEC_CTRL,
1643 .features[FEAT_6_EAX] =
1644 CPUID_6_EAX_ARAT,
1645 .xlevel = 0x80000008,
1646 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1647 },
1648 {
1649 .name = "SandyBridge",
1650 .level = 0xd,
1651 .vendor = CPUID_VENDOR_INTEL,
1652 .family = 6,
1653 .model = 42,
1654 .stepping = 1,
1655 .features[FEAT_1_EDX] =
1656 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1657 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1658 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1659 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1660 CPUID_DE | CPUID_FP87,
1661 .features[FEAT_1_ECX] =
1662 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1663 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1664 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1665 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1666 CPUID_EXT_SSE3,
1667 .features[FEAT_8000_0001_EDX] =
1668 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1669 CPUID_EXT2_SYSCALL,
1670 .features[FEAT_8000_0001_ECX] =
1671 CPUID_EXT3_LAHF_LM,
1672 .features[FEAT_XSAVE] =
1673 CPUID_XSAVE_XSAVEOPT,
1674 .features[FEAT_6_EAX] =
1675 CPUID_6_EAX_ARAT,
1676 .xlevel = 0x80000008,
1677 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1678 },
1679 {
1680 .name = "SandyBridge-IBRS",
1681 .level = 0xd,
1682 .vendor = CPUID_VENDOR_INTEL,
1683 .family = 6,
1684 .model = 42,
1685 .stepping = 1,
1686 .features[FEAT_1_EDX] =
1687 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1688 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1689 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1690 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1691 CPUID_DE | CPUID_FP87,
1692 .features[FEAT_1_ECX] =
1693 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1694 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1695 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1696 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1697 CPUID_EXT_SSE3,
1698 .features[FEAT_8000_0001_EDX] =
1699 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1700 CPUID_EXT2_SYSCALL,
1701 .features[FEAT_8000_0001_ECX] =
1702 CPUID_EXT3_LAHF_LM,
1703 .features[FEAT_7_0_EDX] =
1704 CPUID_7_0_EDX_SPEC_CTRL,
1705 .features[FEAT_XSAVE] =
1706 CPUID_XSAVE_XSAVEOPT,
1707 .features[FEAT_6_EAX] =
1708 CPUID_6_EAX_ARAT,
1709 .xlevel = 0x80000008,
1710 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1711 },
1712 {
1713 .name = "IvyBridge",
1714 .level = 0xd,
1715 .vendor = CPUID_VENDOR_INTEL,
1716 .family = 6,
1717 .model = 58,
1718 .stepping = 9,
1719 .features[FEAT_1_EDX] =
1720 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1721 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1722 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1723 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1724 CPUID_DE | CPUID_FP87,
1725 .features[FEAT_1_ECX] =
1726 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1727 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1728 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1729 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1730 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1731 .features[FEAT_7_0_EBX] =
1732 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1733 CPUID_7_0_EBX_ERMS,
1734 .features[FEAT_8000_0001_EDX] =
1735 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1736 CPUID_EXT2_SYSCALL,
1737 .features[FEAT_8000_0001_ECX] =
1738 CPUID_EXT3_LAHF_LM,
1739 .features[FEAT_XSAVE] =
1740 CPUID_XSAVE_XSAVEOPT,
1741 .features[FEAT_6_EAX] =
1742 CPUID_6_EAX_ARAT,
1743 .xlevel = 0x80000008,
1744 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1745 },
1746 {
1747 .name = "IvyBridge-IBRS",
1748 .level = 0xd,
1749 .vendor = CPUID_VENDOR_INTEL,
1750 .family = 6,
1751 .model = 58,
1752 .stepping = 9,
1753 .features[FEAT_1_EDX] =
1754 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1755 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1756 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1757 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1758 CPUID_DE | CPUID_FP87,
1759 .features[FEAT_1_ECX] =
1760 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1761 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1762 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1763 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1764 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1765 .features[FEAT_7_0_EBX] =
1766 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1767 CPUID_7_0_EBX_ERMS,
1768 .features[FEAT_8000_0001_EDX] =
1769 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1770 CPUID_EXT2_SYSCALL,
1771 .features[FEAT_8000_0001_ECX] =
1772 CPUID_EXT3_LAHF_LM,
1773 .features[FEAT_7_0_EDX] =
1774 CPUID_7_0_EDX_SPEC_CTRL,
1775 .features[FEAT_XSAVE] =
1776 CPUID_XSAVE_XSAVEOPT,
1777 .features[FEAT_6_EAX] =
1778 CPUID_6_EAX_ARAT,
1779 .xlevel = 0x80000008,
1780 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1781 },
1782 {
1783 .name = "Haswell-noTSX",
1784 .level = 0xd,
1785 .vendor = CPUID_VENDOR_INTEL,
1786 .family = 6,
1787 .model = 60,
1788 .stepping = 1,
1789 .features[FEAT_1_EDX] =
1790 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1791 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1792 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1793 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1794 CPUID_DE | CPUID_FP87,
1795 .features[FEAT_1_ECX] =
1796 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1797 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1798 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1799 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1800 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1801 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1802 .features[FEAT_8000_0001_EDX] =
1803 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1804 CPUID_EXT2_SYSCALL,
1805 .features[FEAT_8000_0001_ECX] =
1806 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1807 .features[FEAT_7_0_EBX] =
1808 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1809 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1810 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1811 .features[FEAT_XSAVE] =
1812 CPUID_XSAVE_XSAVEOPT,
1813 .features[FEAT_6_EAX] =
1814 CPUID_6_EAX_ARAT,
1815 .xlevel = 0x80000008,
1816 .model_id = "Intel Core Processor (Haswell, no TSX)",
1817 },
1818 {
1819 .name = "Haswell-noTSX-IBRS",
1820 .level = 0xd,
1821 .vendor = CPUID_VENDOR_INTEL,
1822 .family = 6,
1823 .model = 60,
1824 .stepping = 1,
1825 .features[FEAT_1_EDX] =
1826 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1827 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1828 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1829 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1830 CPUID_DE | CPUID_FP87,
1831 .features[FEAT_1_ECX] =
1832 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1833 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1834 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1835 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1836 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1837 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1838 .features[FEAT_8000_0001_EDX] =
1839 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1840 CPUID_EXT2_SYSCALL,
1841 .features[FEAT_8000_0001_ECX] =
1842 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1843 .features[FEAT_7_0_EDX] =
1844 CPUID_7_0_EDX_SPEC_CTRL,
1845 .features[FEAT_7_0_EBX] =
1846 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1847 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1848 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1849 .features[FEAT_XSAVE] =
1850 CPUID_XSAVE_XSAVEOPT,
1851 .features[FEAT_6_EAX] =
1852 CPUID_6_EAX_ARAT,
1853 .xlevel = 0x80000008,
1854 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1855 },
1856 {
1857 .name = "Haswell",
1858 .level = 0xd,
1859 .vendor = CPUID_VENDOR_INTEL,
1860 .family = 6,
1861 .model = 60,
1862 .stepping = 4,
1863 .features[FEAT_1_EDX] =
1864 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1865 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1866 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1867 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1868 CPUID_DE | CPUID_FP87,
1869 .features[FEAT_1_ECX] =
1870 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1871 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1872 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1873 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1874 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1875 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1876 .features[FEAT_8000_0001_EDX] =
1877 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1878 CPUID_EXT2_SYSCALL,
1879 .features[FEAT_8000_0001_ECX] =
1880 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1881 .features[FEAT_7_0_EBX] =
1882 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1883 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1884 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1885 CPUID_7_0_EBX_RTM,
1886 .features[FEAT_XSAVE] =
1887 CPUID_XSAVE_XSAVEOPT,
1888 .features[FEAT_6_EAX] =
1889 CPUID_6_EAX_ARAT,
1890 .xlevel = 0x80000008,
1891 .model_id = "Intel Core Processor (Haswell)",
1892 },
1893 {
1894 .name = "Haswell-IBRS",
1895 .level = 0xd,
1896 .vendor = CPUID_VENDOR_INTEL,
1897 .family = 6,
1898 .model = 60,
1899 .stepping = 4,
1900 .features[FEAT_1_EDX] =
1901 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1902 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1903 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1904 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1905 CPUID_DE | CPUID_FP87,
1906 .features[FEAT_1_ECX] =
1907 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1908 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1909 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1910 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1911 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1912 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1913 .features[FEAT_8000_0001_EDX] =
1914 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1915 CPUID_EXT2_SYSCALL,
1916 .features[FEAT_8000_0001_ECX] =
1917 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1918 .features[FEAT_7_0_EDX] =
1919 CPUID_7_0_EDX_SPEC_CTRL,
1920 .features[FEAT_7_0_EBX] =
1921 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1922 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1923 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1924 CPUID_7_0_EBX_RTM,
1925 .features[FEAT_XSAVE] =
1926 CPUID_XSAVE_XSAVEOPT,
1927 .features[FEAT_6_EAX] =
1928 CPUID_6_EAX_ARAT,
1929 .xlevel = 0x80000008,
1930 .model_id = "Intel Core Processor (Haswell, IBRS)",
1931 },
1932 {
1933 .name = "Broadwell-noTSX",
1934 .level = 0xd,
1935 .vendor = CPUID_VENDOR_INTEL,
1936 .family = 6,
1937 .model = 61,
1938 .stepping = 2,
1939 .features[FEAT_1_EDX] =
1940 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1941 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1942 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1943 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1944 CPUID_DE | CPUID_FP87,
1945 .features[FEAT_1_ECX] =
1946 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1947 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1948 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1949 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1950 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1951 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1952 .features[FEAT_8000_0001_EDX] =
1953 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1954 CPUID_EXT2_SYSCALL,
1955 .features[FEAT_8000_0001_ECX] =
1956 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1957 .features[FEAT_7_0_EBX] =
1958 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1959 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1960 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1961 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1962 CPUID_7_0_EBX_SMAP,
1963 .features[FEAT_XSAVE] =
1964 CPUID_XSAVE_XSAVEOPT,
1965 .features[FEAT_6_EAX] =
1966 CPUID_6_EAX_ARAT,
1967 .xlevel = 0x80000008,
1968 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1969 },
1970 {
1971 .name = "Broadwell-noTSX-IBRS",
1972 .level = 0xd,
1973 .vendor = CPUID_VENDOR_INTEL,
1974 .family = 6,
1975 .model = 61,
1976 .stepping = 2,
1977 .features[FEAT_1_EDX] =
1978 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1979 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1980 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1981 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1982 CPUID_DE | CPUID_FP87,
1983 .features[FEAT_1_ECX] =
1984 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1985 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1986 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1987 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1988 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1989 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1990 .features[FEAT_8000_0001_EDX] =
1991 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1992 CPUID_EXT2_SYSCALL,
1993 .features[FEAT_8000_0001_ECX] =
1994 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1995 .features[FEAT_7_0_EDX] =
1996 CPUID_7_0_EDX_SPEC_CTRL,
1997 .features[FEAT_7_0_EBX] =
1998 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1999 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2000 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2001 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2002 CPUID_7_0_EBX_SMAP,
2003 .features[FEAT_XSAVE] =
2004 CPUID_XSAVE_XSAVEOPT,
2005 .features[FEAT_6_EAX] =
2006 CPUID_6_EAX_ARAT,
2007 .xlevel = 0x80000008,
2008 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2009 },
2010 {
2011 .name = "Broadwell",
2012 .level = 0xd,
2013 .vendor = CPUID_VENDOR_INTEL,
2014 .family = 6,
2015 .model = 61,
2016 .stepping = 2,
2017 .features[FEAT_1_EDX] =
2018 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2019 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2020 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2021 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2022 CPUID_DE | CPUID_FP87,
2023 .features[FEAT_1_ECX] =
2024 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2025 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2026 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2027 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2028 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2029 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2030 .features[FEAT_8000_0001_EDX] =
2031 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2032 CPUID_EXT2_SYSCALL,
2033 .features[FEAT_8000_0001_ECX] =
2034 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2035 .features[FEAT_7_0_EBX] =
2036 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2037 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2038 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2039 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2040 CPUID_7_0_EBX_SMAP,
2041 .features[FEAT_XSAVE] =
2042 CPUID_XSAVE_XSAVEOPT,
2043 .features[FEAT_6_EAX] =
2044 CPUID_6_EAX_ARAT,
2045 .xlevel = 0x80000008,
2046 .model_id = "Intel Core Processor (Broadwell)",
2047 },
2048 {
2049 .name = "Broadwell-IBRS",
2050 .level = 0xd,
2051 .vendor = CPUID_VENDOR_INTEL,
2052 .family = 6,
2053 .model = 61,
2054 .stepping = 2,
2055 .features[FEAT_1_EDX] =
2056 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2057 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2058 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2059 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2060 CPUID_DE | CPUID_FP87,
2061 .features[FEAT_1_ECX] =
2062 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2063 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2064 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2065 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2066 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2067 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2068 .features[FEAT_8000_0001_EDX] =
2069 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2070 CPUID_EXT2_SYSCALL,
2071 .features[FEAT_8000_0001_ECX] =
2072 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2073 .features[FEAT_7_0_EDX] =
2074 CPUID_7_0_EDX_SPEC_CTRL,
2075 .features[FEAT_7_0_EBX] =
2076 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2077 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2078 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2079 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2080 CPUID_7_0_EBX_SMAP,
2081 .features[FEAT_XSAVE] =
2082 CPUID_XSAVE_XSAVEOPT,
2083 .features[FEAT_6_EAX] =
2084 CPUID_6_EAX_ARAT,
2085 .xlevel = 0x80000008,
2086 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2087 },
2088 {
2089 .name = "Skylake-Client",
2090 .level = 0xd,
2091 .vendor = CPUID_VENDOR_INTEL,
2092 .family = 6,
2093 .model = 94,
2094 .stepping = 3,
2095 .features[FEAT_1_EDX] =
2096 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2097 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2098 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2099 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2100 CPUID_DE | CPUID_FP87,
2101 .features[FEAT_1_ECX] =
2102 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2103 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2104 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2105 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2106 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2107 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2108 .features[FEAT_8000_0001_EDX] =
2109 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2110 CPUID_EXT2_SYSCALL,
2111 .features[FEAT_8000_0001_ECX] =
2112 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2113 .features[FEAT_7_0_EBX] =
2114 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2115 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2116 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2117 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2118 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2119 /* Missing: XSAVES (not supported by some Linux versions,
2120 * including v4.1 to v4.12).
2121 * KVM doesn't yet expose any XSAVES state save component,
2122 * and the only one defined in Skylake (processor tracing)
2123 * probably will block migration anyway.
2124 */
2125 .features[FEAT_XSAVE] =
2126 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2127 CPUID_XSAVE_XGETBV1,
2128 .features[FEAT_6_EAX] =
2129 CPUID_6_EAX_ARAT,
2130 .xlevel = 0x80000008,
2131 .model_id = "Intel Core Processor (Skylake)",
2132 },
2133 {
2134 .name = "Skylake-Client-IBRS",
2135 .level = 0xd,
2136 .vendor = CPUID_VENDOR_INTEL,
2137 .family = 6,
2138 .model = 94,
2139 .stepping = 3,
2140 .features[FEAT_1_EDX] =
2141 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2142 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2143 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2144 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2145 CPUID_DE | CPUID_FP87,
2146 .features[FEAT_1_ECX] =
2147 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2148 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2149 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2150 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2151 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2152 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2153 .features[FEAT_8000_0001_EDX] =
2154 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2155 CPUID_EXT2_SYSCALL,
2156 .features[FEAT_8000_0001_ECX] =
2157 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2158 .features[FEAT_7_0_EDX] =
2159 CPUID_7_0_EDX_SPEC_CTRL,
2160 .features[FEAT_7_0_EBX] =
2161 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2162 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2163 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2164 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2165 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2166 /* Missing: XSAVES (not supported by some Linux versions,
2167 * including v4.1 to v4.12).
2168 * KVM doesn't yet expose any XSAVES state save component,
2169 * and the only one defined in Skylake (processor tracing)
2170 * probably will block migration anyway.
2171 */
2172 .features[FEAT_XSAVE] =
2173 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2174 CPUID_XSAVE_XGETBV1,
2175 .features[FEAT_6_EAX] =
2176 CPUID_6_EAX_ARAT,
2177 .xlevel = 0x80000008,
2178 .model_id = "Intel Core Processor (Skylake, IBRS)",
2179 },
2180 {
2181 .name = "Skylake-Server",
2182 .level = 0xd,
2183 .vendor = CPUID_VENDOR_INTEL,
2184 .family = 6,
2185 .model = 85,
2186 .stepping = 4,
2187 .features[FEAT_1_EDX] =
2188 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2189 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2190 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2191 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2192 CPUID_DE | CPUID_FP87,
2193 .features[FEAT_1_ECX] =
2194 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2195 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2196 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2197 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2198 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2199 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2200 .features[FEAT_8000_0001_EDX] =
2201 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2202 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2203 .features[FEAT_8000_0001_ECX] =
2204 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2205 .features[FEAT_7_0_EBX] =
2206 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2207 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2208 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2209 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2210 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2211 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2212 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2213 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2214 /* Missing: XSAVES (not supported by some Linux versions,
2215 * including v4.1 to v4.12).
2216 * KVM doesn't yet expose any XSAVES state save component,
2217 * and the only one defined in Skylake (processor tracing)
2218 * probably will block migration anyway.
2219 */
2220 .features[FEAT_XSAVE] =
2221 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2222 CPUID_XSAVE_XGETBV1,
2223 .features[FEAT_6_EAX] =
2224 CPUID_6_EAX_ARAT,
2225 .xlevel = 0x80000008,
2226 .model_id = "Intel Xeon Processor (Skylake)",
2227 },
2228 {
2229 .name = "Skylake-Server-IBRS",
2230 .level = 0xd,
2231 .vendor = CPUID_VENDOR_INTEL,
2232 .family = 6,
2233 .model = 85,
2234 .stepping = 4,
2235 .features[FEAT_1_EDX] =
2236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2240 CPUID_DE | CPUID_FP87,
2241 .features[FEAT_1_ECX] =
2242 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2243 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2244 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2245 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2246 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2247 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2248 .features[FEAT_8000_0001_EDX] =
2249 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2250 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2251 .features[FEAT_8000_0001_ECX] =
2252 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2253 .features[FEAT_7_0_EDX] =
2254 CPUID_7_0_EDX_SPEC_CTRL,
2255 .features[FEAT_7_0_EBX] =
2256 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2257 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2258 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2259 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2260 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2261 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2262 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2263 CPUID_7_0_EBX_AVX512VL,
2264 /* Missing: XSAVES (not supported by some Linux versions,
2265 * including v4.1 to v4.12).
2266 * KVM doesn't yet expose any XSAVES state save component,
2267 * and the only one defined in Skylake (processor tracing)
2268 * probably will block migration anyway.
2269 */
2270 .features[FEAT_XSAVE] =
2271 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2272 CPUID_XSAVE_XGETBV1,
2273 .features[FEAT_6_EAX] =
2274 CPUID_6_EAX_ARAT,
2275 .xlevel = 0x80000008,
2276 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2277 },
2278 {
2279 .name = "KnightsMill",
2280 .level = 0xd,
2281 .vendor = CPUID_VENDOR_INTEL,
2282 .family = 6,
2283 .model = 133,
2284 .stepping = 0,
2285 .features[FEAT_1_EDX] =
2286 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2287 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2288 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2289 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2290 CPUID_PSE | CPUID_DE | CPUID_FP87,
2291 .features[FEAT_1_ECX] =
2292 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2293 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2294 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2295 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2296 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2297 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2298 .features[FEAT_8000_0001_EDX] =
2299 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2300 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2301 .features[FEAT_8000_0001_ECX] =
2302 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2303 .features[FEAT_7_0_EBX] =
2304 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2305 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2306 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2307 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2308 CPUID_7_0_EBX_AVX512ER,
2309 .features[FEAT_7_0_ECX] =
2310 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2311 .features[FEAT_7_0_EDX] =
2312 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2313 .features[FEAT_XSAVE] =
2314 CPUID_XSAVE_XSAVEOPT,
2315 .features[FEAT_6_EAX] =
2316 CPUID_6_EAX_ARAT,
2317 .xlevel = 0x80000008,
2318 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2319 },
2320 {
2321 .name = "Opteron_G1",
2322 .level = 5,
2323 .vendor = CPUID_VENDOR_AMD,
2324 .family = 15,
2325 .model = 6,
2326 .stepping = 1,
2327 .features[FEAT_1_EDX] =
2328 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2329 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2330 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2331 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2332 CPUID_DE | CPUID_FP87,
2333 .features[FEAT_1_ECX] =
2334 CPUID_EXT_SSE3,
2335 .features[FEAT_8000_0001_EDX] =
2336 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2337 .xlevel = 0x80000008,
2338 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2339 },
2340 {
2341 .name = "Opteron_G2",
2342 .level = 5,
2343 .vendor = CPUID_VENDOR_AMD,
2344 .family = 15,
2345 .model = 6,
2346 .stepping = 1,
2347 .features[FEAT_1_EDX] =
2348 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2349 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2350 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2351 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2352 CPUID_DE | CPUID_FP87,
2353 .features[FEAT_1_ECX] =
2354 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2355 /* Missing: CPUID_EXT2_RDTSCP */
2356 .features[FEAT_8000_0001_EDX] =
2357 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2358 .features[FEAT_8000_0001_ECX] =
2359 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2360 .xlevel = 0x80000008,
2361 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2362 },
2363 {
2364 .name = "Opteron_G3",
2365 .level = 5,
2366 .vendor = CPUID_VENDOR_AMD,
2367 .family = 16,
2368 .model = 2,
2369 .stepping = 3,
2370 .features[FEAT_1_EDX] =
2371 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2372 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2373 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2374 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2375 CPUID_DE | CPUID_FP87,
2376 .features[FEAT_1_ECX] =
2377 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2378 CPUID_EXT_SSE3,
2379 /* Missing: CPUID_EXT2_RDTSCP */
2380 .features[FEAT_8000_0001_EDX] =
2381 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2382 .features[FEAT_8000_0001_ECX] =
2383 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2384 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2385 .xlevel = 0x80000008,
2386 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2387 },
2388 {
2389 .name = "Opteron_G4",
2390 .level = 0xd,
2391 .vendor = CPUID_VENDOR_AMD,
2392 .family = 21,
2393 .model = 1,
2394 .stepping = 2,
2395 .features[FEAT_1_EDX] =
2396 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2397 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2398 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2399 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2400 CPUID_DE | CPUID_FP87,
2401 .features[FEAT_1_ECX] =
2402 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2403 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2404 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2405 CPUID_EXT_SSE3,
2406 /* Missing: CPUID_EXT2_RDTSCP */
2407 .features[FEAT_8000_0001_EDX] =
2408 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2409 CPUID_EXT2_SYSCALL,
2410 .features[FEAT_8000_0001_ECX] =
2411 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2412 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2413 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2414 CPUID_EXT3_LAHF_LM,
2415 /* no xsaveopt! */
2416 .xlevel = 0x8000001A,
2417 .model_id = "AMD Opteron 62xx class CPU",
2418 },
2419 {
2420 .name = "Opteron_G5",
2421 .level = 0xd,
2422 .vendor = CPUID_VENDOR_AMD,
2423 .family = 21,
2424 .model = 2,
2425 .stepping = 0,
2426 .features[FEAT_1_EDX] =
2427 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2428 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2429 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2430 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2431 CPUID_DE | CPUID_FP87,
2432 .features[FEAT_1_ECX] =
2433 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2434 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2435 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2436 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2437 /* Missing: CPUID_EXT2_RDTSCP */
2438 .features[FEAT_8000_0001_EDX] =
2439 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2440 CPUID_EXT2_SYSCALL,
2441 .features[FEAT_8000_0001_ECX] =
2442 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2443 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2444 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2445 CPUID_EXT3_LAHF_LM,
2446 /* no xsaveopt! */
2447 .xlevel = 0x8000001A,
2448 .model_id = "AMD Opteron 63xx class CPU",
2449 },
2450 {
2451 .name = "EPYC",
2452 .level = 0xd,
2453 .vendor = CPUID_VENDOR_AMD,
2454 .family = 23,
2455 .model = 1,
2456 .stepping = 2,
2457 .features[FEAT_1_EDX] =
2458 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2459 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2460 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2461 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2462 CPUID_VME | CPUID_FP87,
2463 .features[FEAT_1_ECX] =
2464 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2465 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2466 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2467 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2468 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2469 .features[FEAT_8000_0001_EDX] =
2470 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2471 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2472 CPUID_EXT2_SYSCALL,
2473 .features[FEAT_8000_0001_ECX] =
2474 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2475 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2476 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2477 .features[FEAT_7_0_EBX] =
2478 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2479 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2480 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2481 CPUID_7_0_EBX_SHA_NI,
2482 /* Missing: XSAVES (not supported by some Linux versions,
2483 * including v4.1 to v4.12).
2484 * KVM doesn't yet expose any XSAVES state save component.
2485 */
2486 .features[FEAT_XSAVE] =
2487 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2488 CPUID_XSAVE_XGETBV1,
2489 .features[FEAT_6_EAX] =
2490 CPUID_6_EAX_ARAT,
2491 .xlevel = 0x8000000A,
2492 .model_id = "AMD EPYC Processor",
2493 .cache_info = &epyc_cache_info,
2494 },
2495 {
2496 .name = "EPYC-IBPB",
2497 .level = 0xd,
2498 .vendor = CPUID_VENDOR_AMD,
2499 .family = 23,
2500 .model = 1,
2501 .stepping = 2,
2502 .features[FEAT_1_EDX] =
2503 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2504 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2505 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2506 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2507 CPUID_VME | CPUID_FP87,
2508 .features[FEAT_1_ECX] =
2509 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2510 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2511 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2512 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2513 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2514 .features[FEAT_8000_0001_EDX] =
2515 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2516 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2517 CPUID_EXT2_SYSCALL,
2518 .features[FEAT_8000_0001_ECX] =
2519 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2520 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2521 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2522 .features[FEAT_8000_0008_EBX] =
2523 CPUID_8000_0008_EBX_IBPB,
2524 .features[FEAT_7_0_EBX] =
2525 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2526 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2527 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2528 CPUID_7_0_EBX_SHA_NI,
2529 /* Missing: XSAVES (not supported by some Linux versions,
2530 * including v4.1 to v4.12).
2531 * KVM doesn't yet expose any XSAVES state save component.
2532 */
2533 .features[FEAT_XSAVE] =
2534 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2535 CPUID_XSAVE_XGETBV1,
2536 .features[FEAT_6_EAX] =
2537 CPUID_6_EAX_ARAT,
2538 .xlevel = 0x8000000A,
2539 .model_id = "AMD EPYC Processor (with IBPB)",
2540 .cache_info = &epyc_cache_info,
2541 },
2542 };
2543
2544 typedef struct PropValue {
2545 const char *prop, *value;
2546 } PropValue;
2547
2548 /* KVM-specific features that are automatically added/removed
2549 * from all CPU models when KVM is enabled.
2550 */
2551 static PropValue kvm_default_props[] = {
2552 { "kvmclock", "on" },
2553 { "kvm-nopiodelay", "on" },
2554 { "kvm-asyncpf", "on" },
2555 { "kvm-steal-time", "on" },
2556 { "kvm-pv-eoi", "on" },
2557 { "kvmclock-stable-bit", "on" },
2558 { "x2apic", "on" },
2559 { "acpi", "off" },
2560 { "monitor", "off" },
2561 { "svm", "off" },
2562 { NULL, NULL },
2563 };
2564
2565 /* TCG-specific defaults that override all CPU models when using TCG
2566 */
2567 static PropValue tcg_default_props[] = {
2568 { "vme", "off" },
2569 { NULL, NULL },
2570 };
2571
2572
2573 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2574 {
2575 PropValue *pv;
2576 for (pv = kvm_default_props; pv->prop; pv++) {
2577 if (!strcmp(pv->prop, prop)) {
2578 pv->value = value;
2579 break;
2580 }
2581 }
2582
2583 /* It is valid to call this function only for properties that
2584 * are already present in the kvm_default_props table.
2585 */
2586 assert(pv->prop);
2587 }
2588
2589 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2590 bool migratable_only);
2591
2592 static bool lmce_supported(void)
2593 {
2594 uint64_t mce_cap = 0;
2595
2596 #ifdef CONFIG_KVM
2597 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2598 return false;
2599 }
2600 #endif
2601
2602 return !!(mce_cap & MCG_LMCE_P);
2603 }
2604
2605 #define CPUID_MODEL_ID_SZ 48
2606
2607 /**
2608 * cpu_x86_fill_model_id:
2609 * Get CPUID model ID string from host CPU.
2610 *
2611 * @str should have at least CPUID_MODEL_ID_SZ bytes
2612 *
2613 * The function does NOT add a null terminator to the string
2614 * automatically.
2615 */
2616 static int cpu_x86_fill_model_id(char *str)
2617 {
2618 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2619 int i;
2620
2621 for (i = 0; i < 3; i++) {
2622 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2623 memcpy(str + i * 16 + 0, &eax, 4);
2624 memcpy(str + i * 16 + 4, &ebx, 4);
2625 memcpy(str + i * 16 + 8, &ecx, 4);
2626 memcpy(str + i * 16 + 12, &edx, 4);
2627 }
2628 return 0;
2629 }
2630
2631 static Property max_x86_cpu_properties[] = {
2632 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2633 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2634 DEFINE_PROP_END_OF_LIST()
2635 };
2636
2637 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2638 {
2639 DeviceClass *dc = DEVICE_CLASS(oc);
2640 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2641
2642 xcc->ordering = 9;
2643
2644 xcc->model_description =
2645 "Enables all features supported by the accelerator in the current host";
2646
2647 dc->props = max_x86_cpu_properties;
2648 }
2649
2650 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2651
2652 static void max_x86_cpu_initfn(Object *obj)
2653 {
2654 X86CPU *cpu = X86_CPU(obj);
2655 CPUX86State *env = &cpu->env;
2656 KVMState *s = kvm_state;
2657
2658 /* We can't fill the features array here because we don't know yet if
2659 * "migratable" is true or false.
2660 */
2661 cpu->max_features = true;
2662
2663 if (accel_uses_host_cpuid()) {
2664 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2665 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2666 int family, model, stepping;
2667 X86CPUDefinition host_cpudef = { };
2668 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2669
2670 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2671 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2672
2673 host_vendor_fms(vendor, &family, &model, &stepping);
2674
2675 cpu_x86_fill_model_id(model_id);
2676
2677 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2678 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2679 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2680 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2681 &error_abort);
2682 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2683 &error_abort);
2684
2685 if (kvm_enabled()) {
2686 env->cpuid_min_level =
2687 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2688 env->cpuid_min_xlevel =
2689 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2690 env->cpuid_min_xlevel2 =
2691 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2692 } else {
2693 env->cpuid_min_level =
2694 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2695 env->cpuid_min_xlevel =
2696 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2697 env->cpuid_min_xlevel2 =
2698 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2699 }
2700
2701 if (lmce_supported()) {
2702 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2703 }
2704 } else {
2705 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2706 "vendor", &error_abort);
2707 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2708 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2709 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2710 object_property_set_str(OBJECT(cpu),
2711 "QEMU TCG CPU version " QEMU_HW_VERSION,
2712 "model-id", &error_abort);
2713 }
2714
2715 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2716 }
2717
2718 static const TypeInfo max_x86_cpu_type_info = {
2719 .name = X86_CPU_TYPE_NAME("max"),
2720 .parent = TYPE_X86_CPU,
2721 .instance_init = max_x86_cpu_initfn,
2722 .class_init = max_x86_cpu_class_init,
2723 };
2724
2725 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2726 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2727 {
2728 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2729
2730 xcc->host_cpuid_required = true;
2731 xcc->ordering = 8;
2732
2733 if (kvm_enabled()) {
2734 xcc->model_description =
2735 "KVM processor with all supported host features ";
2736 } else if (hvf_enabled()) {
2737 xcc->model_description =
2738 "HVF processor with all supported host features ";
2739 }
2740 }
2741
2742 static const TypeInfo host_x86_cpu_type_info = {
2743 .name = X86_CPU_TYPE_NAME("host"),
2744 .parent = X86_CPU_TYPE_NAME("max"),
2745 .class_init = host_x86_cpu_class_init,
2746 };
2747
2748 #endif
2749
2750 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2751 {
2752 FeatureWordInfo *f = &feature_word_info[w];
2753 int i;
2754
2755 for (i = 0; i < 32; ++i) {
2756 if ((1UL << i) & mask) {
2757 const char *reg = get_register_name_32(f->cpuid_reg);
2758 assert(reg);
2759 warn_report("%s doesn't support requested feature: "
2760 "CPUID.%02XH:%s%s%s [bit %d]",
2761 accel_uses_host_cpuid() ? "host" : "TCG",
2762 f->cpuid_eax, reg,
2763 f->feat_names[i] ? "." : "",
2764 f->feat_names[i] ? f->feat_names[i] : "", i);
2765 }
2766 }
2767 }
2768
2769 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2770 const char *name, void *opaque,
2771 Error **errp)
2772 {
2773 X86CPU *cpu = X86_CPU(obj);
2774 CPUX86State *env = &cpu->env;
2775 int64_t value;
2776
2777 value = (env->cpuid_version >> 8) & 0xf;
2778 if (value == 0xf) {
2779 value += (env->cpuid_version >> 20) & 0xff;
2780 }
2781 visit_type_int(v, name, &value, errp);
2782 }
2783
2784 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2785 const char *name, void *opaque,
2786 Error **errp)
2787 {
2788 X86CPU *cpu = X86_CPU(obj);
2789 CPUX86State *env = &cpu->env;
2790 const int64_t min = 0;
2791 const int64_t max = 0xff + 0xf;
2792 Error *local_err = NULL;
2793 int64_t value;
2794
2795 visit_type_int(v, name, &value, &local_err);
2796 if (local_err) {
2797 error_propagate(errp, local_err);
2798 return;
2799 }
2800 if (value < min || value > max) {
2801 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2802 name ? name : "null", value, min, max);
2803 return;
2804 }
2805
2806 env->cpuid_version &= ~0xff00f00;
2807 if (value > 0x0f) {
2808 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2809 } else {
2810 env->cpuid_version |= value << 8;
2811 }
2812 }
2813
2814 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2815 const char *name, void *opaque,
2816 Error **errp)
2817 {
2818 X86CPU *cpu = X86_CPU(obj);
2819 CPUX86State *env = &cpu->env;
2820 int64_t value;
2821
2822 value = (env->cpuid_version >> 4) & 0xf;
2823 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2824 visit_type_int(v, name, &value, errp);
2825 }
2826
2827 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2828 const char *name, void *opaque,
2829 Error **errp)
2830 {
2831 X86CPU *cpu = X86_CPU(obj);
2832 CPUX86State *env = &cpu->env;
2833 const int64_t min = 0;
2834 const int64_t max = 0xff;
2835 Error *local_err = NULL;
2836 int64_t value;
2837
2838 visit_type_int(v, name, &value, &local_err);
2839 if (local_err) {
2840 error_propagate(errp, local_err);
2841 return;
2842 }
2843 if (value < min || value > max) {
2844 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2845 name ? name : "null", value, min, max);
2846 return;
2847 }
2848
2849 env->cpuid_version &= ~0xf00f0;
2850 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2851 }
2852
2853 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2854 const char *name, void *opaque,
2855 Error **errp)
2856 {
2857 X86CPU *cpu = X86_CPU(obj);
2858 CPUX86State *env = &cpu->env;
2859 int64_t value;
2860
2861 value = env->cpuid_version & 0xf;
2862 visit_type_int(v, name, &value, errp);
2863 }
2864
2865 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2866 const char *name, void *opaque,
2867 Error **errp)
2868 {
2869 X86CPU *cpu = X86_CPU(obj);
2870 CPUX86State *env = &cpu->env;
2871 const int64_t min = 0;
2872 const int64_t max = 0xf;
2873 Error *local_err = NULL;
2874 int64_t value;
2875
2876 visit_type_int(v, name, &value, &local_err);
2877 if (local_err) {
2878 error_propagate(errp, local_err);
2879 return;
2880 }
2881 if (value < min || value > max) {
2882 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2883 name ? name : "null", value, min, max);
2884 return;
2885 }
2886
2887 env->cpuid_version &= ~0xf;
2888 env->cpuid_version |= value & 0xf;
2889 }
2890
2891 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2892 {
2893 X86CPU *cpu = X86_CPU(obj);
2894 CPUX86State *env = &cpu->env;
2895 char *value;
2896
2897 value = g_malloc(CPUID_VENDOR_SZ + 1);
2898 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2899 env->cpuid_vendor3);
2900 return value;
2901 }
2902
2903 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2904 Error **errp)
2905 {
2906 X86CPU *cpu = X86_CPU(obj);
2907 CPUX86State *env = &cpu->env;
2908 int i;
2909
2910 if (strlen(value) != CPUID_VENDOR_SZ) {
2911 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2912 return;
2913 }
2914
2915 env->cpuid_vendor1 = 0;
2916 env->cpuid_vendor2 = 0;
2917 env->cpuid_vendor3 = 0;
2918 for (i = 0; i < 4; i++) {
2919 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2920 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2921 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2922 }
2923 }
2924
2925 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2926 {
2927 X86CPU *cpu = X86_CPU(obj);
2928 CPUX86State *env = &cpu->env;
2929 char *value;
2930 int i;
2931
2932 value = g_malloc(48 + 1);
2933 for (i = 0; i < 48; i++) {
2934 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2935 }
2936 value[48] = '\0';
2937 return value;
2938 }
2939
2940 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2941 Error **errp)
2942 {
2943 X86CPU *cpu = X86_CPU(obj);
2944 CPUX86State *env = &cpu->env;
2945 int c, len, i;
2946
2947 if (model_id == NULL) {
2948 model_id = "";
2949 }
2950 len = strlen(model_id);
2951 memset(env->cpuid_model, 0, 48);
2952 for (i = 0; i < 48; i++) {
2953 if (i >= len) {
2954 c = '\0';
2955 } else {
2956 c = (uint8_t)model_id[i];
2957 }
2958 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2959 }
2960 }
2961
2962 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2963 void *opaque, Error **errp)
2964 {
2965 X86CPU *cpu = X86_CPU(obj);
2966 int64_t value;
2967
2968 value = cpu->env.tsc_khz * 1000;
2969 visit_type_int(v, name, &value, errp);
2970 }
2971
2972 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2973 void *opaque, Error **errp)
2974 {
2975 X86CPU *cpu = X86_CPU(obj);
2976 const int64_t min = 0;
2977 const int64_t max = INT64_MAX;
2978 Error *local_err = NULL;
2979 int64_t value;
2980
2981 visit_type_int(v, name, &value, &local_err);
2982 if (local_err) {
2983 error_propagate(errp, local_err);
2984 return;
2985 }
2986 if (value < min || value > max) {
2987 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2988 name ? name : "null", value, min, max);
2989 return;
2990 }
2991
2992 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2993 }
2994
2995 /* Generic getter for "feature-words" and "filtered-features" properties */
2996 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2997 const char *name, void *opaque,
2998 Error **errp)
2999 {
3000 uint32_t *array = (uint32_t *)opaque;
3001 FeatureWord w;
3002 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3003 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3004 X86CPUFeatureWordInfoList *list = NULL;
3005
3006 for (w = 0; w < FEATURE_WORDS; w++) {
3007 FeatureWordInfo *wi = &feature_word_info[w];
3008 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3009 qwi->cpuid_input_eax = wi->cpuid_eax;
3010 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
3011 qwi->cpuid_input_ecx = wi->cpuid_ecx;
3012 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
3013 qwi->features = array[w];
3014
3015 /* List will be in reverse order, but order shouldn't matter */
3016 list_entries[w].next = list;
3017 list_entries[w].value = &word_infos[w];
3018 list = &list_entries[w];
3019 }
3020
3021 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3022 }
3023
3024 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3025 void *opaque, Error **errp)
3026 {
3027 X86CPU *cpu = X86_CPU(obj);
3028 int64_t value = cpu->hyperv_spinlock_attempts;
3029
3030 visit_type_int(v, name, &value, errp);
3031 }
3032
3033 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3034 void *opaque, Error **errp)
3035 {
3036 const int64_t min = 0xFFF;
3037 const int64_t max = UINT_MAX;
3038 X86CPU *cpu = X86_CPU(obj);
3039 Error *err = NULL;
3040 int64_t value;
3041
3042 visit_type_int(v, name, &value, &err);
3043 if (err) {
3044 error_propagate(errp, err);
3045 return;
3046 }
3047
3048 if (value < min || value > max) {
3049 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3050 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3051 object_get_typename(obj), name ? name : "null",
3052 value, min, max);
3053 return;
3054 }
3055 cpu->hyperv_spinlock_attempts = value;
3056 }
3057
3058 static const PropertyInfo qdev_prop_spinlocks = {
3059 .name = "int",
3060 .get = x86_get_hv_spinlocks,
3061 .set = x86_set_hv_spinlocks,
3062 };
3063
3064 /* Convert all '_' in a feature string option name to '-', to make feature
3065 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3066 */
3067 static inline void feat2prop(char *s)
3068 {
3069 while ((s = strchr(s, '_'))) {
3070 *s = '-';
3071 }
3072 }
3073
3074 /* Return the feature property name for a feature flag bit */
3075 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3076 {
3077 /* XSAVE components are automatically enabled by other features,
3078 * so return the original feature name instead
3079 */
3080 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3081 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3082
3083 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3084 x86_ext_save_areas[comp].bits) {
3085 w = x86_ext_save_areas[comp].feature;
3086 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3087 }
3088 }
3089
3090 assert(bitnr < 32);
3091 assert(w < FEATURE_WORDS);
3092 return feature_word_info[w].feat_names[bitnr];
3093 }
3094
3095 /* Compatibily hack to maintain legacy +-feat semantic,
3096 * where +-feat overwrites any feature set by
3097 * feat=on|feat even if the later is parsed after +-feat
3098 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3099 */
3100 static GList *plus_features, *minus_features;
3101
3102 static gint compare_string(gconstpointer a, gconstpointer b)
3103 {
3104 return g_strcmp0(a, b);
3105 }
3106
3107 /* Parse "+feature,-feature,feature=foo" CPU feature string
3108 */
3109 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3110 Error **errp)
3111 {
3112 char *featurestr; /* Single 'key=value" string being parsed */
3113 static bool cpu_globals_initialized;
3114 bool ambiguous = false;
3115
3116 if (cpu_globals_initialized) {
3117 return;
3118 }
3119 cpu_globals_initialized = true;
3120
3121 if (!features) {
3122 return;
3123 }
3124
3125 for (featurestr = strtok(features, ",");
3126 featurestr;
3127 featurestr = strtok(NULL, ",")) {
3128 const char *name;
3129 const char *val = NULL;
3130 char *eq = NULL;
3131 char num[32];
3132 GlobalProperty *prop;
3133
3134 /* Compatibility syntax: */
3135 if (featurestr[0] == '+') {
3136 plus_features = g_list_append(plus_features,
3137 g_strdup(featurestr + 1));
3138 continue;
3139 } else if (featurestr[0] == '-') {
3140 minus_features = g_list_append(minus_features,
3141 g_strdup(featurestr + 1));
3142 continue;
3143 }
3144
3145 eq = strchr(featurestr, '=');
3146 if (eq) {
3147 *eq++ = 0;
3148 val = eq;
3149 } else {
3150 val = "on";
3151 }
3152
3153 feat2prop(featurestr);
3154 name = featurestr;
3155
3156 if (g_list_find_custom(plus_features, name, compare_string)) {
3157 warn_report("Ambiguous CPU model string. "
3158 "Don't mix both \"+%s\" and \"%s=%s\"",
3159 name, name, val);
3160 ambiguous = true;
3161 }
3162 if (g_list_find_custom(minus_features, name, compare_string)) {
3163 warn_report("Ambiguous CPU model string. "
3164 "Don't mix both \"-%s\" and \"%s=%s\"",
3165 name, name, val);
3166 ambiguous = true;
3167 }
3168
3169 /* Special case: */
3170 if (!strcmp(name, "tsc-freq")) {
3171 int ret;
3172 uint64_t tsc_freq;
3173
3174 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3175 if (ret < 0 || tsc_freq > INT64_MAX) {
3176 error_setg(errp, "bad numerical value %s", val);
3177 return;
3178 }
3179 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3180 val = num;
3181 name = "tsc-frequency";
3182 }
3183
3184 prop = g_new0(typeof(*prop), 1);
3185 prop->driver = typename;
3186 prop->property = g_strdup(name);
3187 prop->value = g_strdup(val);
3188 prop->errp = &error_fatal;
3189 qdev_prop_register_global(prop);
3190 }
3191
3192 if (ambiguous) {
3193 warn_report("Compatibility of ambiguous CPU model "
3194 "strings won't be kept on future QEMU versions");
3195 }
3196 }
3197
3198 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3199 static int x86_cpu_filter_features(X86CPU *cpu);
3200
3201 /* Check for missing features that may prevent the CPU class from
3202 * running using the current machine and accelerator.
3203 */
3204 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3205 strList **missing_feats)
3206 {
3207 X86CPU *xc;
3208 FeatureWord w;
3209 Error *err = NULL;
3210 strList **next = missing_feats;
3211
3212 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3213 strList *new = g_new0(strList, 1);
3214 new->value = g_strdup("kvm");
3215 *missing_feats = new;
3216 return;
3217 }
3218
3219 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3220
3221 x86_cpu_expand_features(xc, &err);
3222 if (err) {
3223 /* Errors at x86_cpu_expand_features should never happen,
3224 * but in case it does, just report the model as not
3225 * runnable at all using the "type" property.
3226 */
3227 strList *new = g_new0(strList, 1);
3228 new->value = g_strdup("type");
3229 *next = new;
3230 next = &new->next;
3231 }
3232
3233 x86_cpu_filter_features(xc);
3234
3235 for (w = 0; w < FEATURE_WORDS; w++) {
3236 uint32_t filtered = xc->filtered_features[w];
3237 int i;
3238 for (i = 0; i < 32; i++) {
3239 if (filtered & (1UL << i)) {
3240 strList *new = g_new0(strList, 1);
3241 new->value = g_strdup(x86_cpu_feature_name(w, i));
3242 *next = new;
3243 next = &new->next;
3244 }
3245 }
3246 }
3247
3248 object_unref(OBJECT(xc));
3249 }
3250
3251 /* Print all cpuid feature names in featureset
3252 */
3253 static void listflags(FILE *f, fprintf_function print, const char **featureset)
3254 {
3255 int bit;
3256 bool first = true;
3257
3258 for (bit = 0; bit < 32; bit++) {
3259 if (featureset[bit]) {
3260 print(f, "%s%s", first ? "" : " ", featureset[bit]);
3261 first = false;
3262 }
3263 }
3264 }
3265
3266 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3267 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3268 {
3269 ObjectClass *class_a = (ObjectClass *)a;
3270 ObjectClass *class_b = (ObjectClass *)b;
3271 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3272 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3273 const char *name_a, *name_b;
3274
3275 if (cc_a->ordering != cc_b->ordering) {
3276 return cc_a->ordering - cc_b->ordering;
3277 } else {
3278 name_a = object_class_get_name(class_a);
3279 name_b = object_class_get_name(class_b);
3280 return strcmp(name_a, name_b);
3281 }
3282 }
3283
3284 static GSList *get_sorted_cpu_model_list(void)
3285 {
3286 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3287 list = g_slist_sort(list, x86_cpu_list_compare);
3288 return list;
3289 }
3290
3291 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3292 {
3293 ObjectClass *oc = data;
3294 X86CPUClass *cc = X86_CPU_CLASS(oc);
3295 CPUListState *s = user_data;
3296 char *name = x86_cpu_class_get_model_name(cc);
3297 const char *desc = cc->model_description;
3298 if (!desc && cc->cpu_def) {
3299 desc = cc->cpu_def->model_id;
3300 }
3301
3302 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
3303 name, desc);
3304 g_free(name);
3305 }
3306
3307 /* list available CPU models and flags */
3308 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3309 {
3310 int i;
3311 CPUListState s = {
3312 .file = f,
3313 .cpu_fprintf = cpu_fprintf,
3314 };
3315 GSList *list;
3316
3317 (*cpu_fprintf)(f, "Available CPUs:\n");
3318 list = get_sorted_cpu_model_list();
3319 g_slist_foreach(list, x86_cpu_list_entry, &s);
3320 g_slist_free(list);
3321
3322 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3323 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3324 FeatureWordInfo *fw = &feature_word_info[i];
3325
3326 (*cpu_fprintf)(f, " ");
3327 listflags(f, cpu_fprintf, fw->feat_names);
3328 (*cpu_fprintf)(f, "\n");
3329 }
3330 }
3331
3332 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3333 {
3334 ObjectClass *oc = data;
3335 X86CPUClass *cc = X86_CPU_CLASS(oc);
3336 CpuDefinitionInfoList **cpu_list = user_data;
3337 CpuDefinitionInfoList *entry;
3338 CpuDefinitionInfo *info;
3339
3340 info = g_malloc0(sizeof(*info));
3341 info->name = x86_cpu_class_get_model_name(cc);
3342 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3343 info->has_unavailable_features = true;
3344 info->q_typename = g_strdup(object_class_get_name(oc));
3345 info->migration_safe = cc->migration_safe;
3346 info->has_migration_safe = true;
3347 info->q_static = cc->static_model;
3348
3349 entry = g_malloc0(sizeof(*entry));
3350 entry->value = info;
3351 entry->next = *cpu_list;
3352 *cpu_list = entry;
3353 }
3354
3355 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3356 {
3357 CpuDefinitionInfoList *cpu_list = NULL;
3358 GSList *list = get_sorted_cpu_model_list();
3359 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3360 g_slist_free(list);
3361 return cpu_list;
3362 }
3363
3364 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3365 bool migratable_only)
3366 {
3367 FeatureWordInfo *wi = &feature_word_info[w];
3368 uint32_t r;
3369
3370 if (kvm_enabled()) {
3371 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3372 wi->cpuid_ecx,
3373 wi->cpuid_reg);
3374 } else if (hvf_enabled()) {
3375 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3376 wi->cpuid_ecx,
3377 wi->cpuid_reg);
3378 } else if (tcg_enabled()) {
3379 r = wi->tcg_features;
3380 } else {
3381 return ~0;
3382 }
3383 if (migratable_only) {
3384 r &= x86_cpu_get_migratable_flags(w);
3385 }
3386 return r;
3387 }
3388
3389 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3390 {
3391 FeatureWord w;
3392
3393 for (w = 0; w < FEATURE_WORDS; w++) {
3394 report_unavailable_features(w, cpu->filtered_features[w]);
3395 }
3396 }
3397
3398 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3399 {
3400 PropValue *pv;
3401 for (pv = props; pv->prop; pv++) {
3402 if (!pv->value) {
3403 continue;
3404 }
3405 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3406 &error_abort);
3407 }
3408 }
3409
3410 /* Load data from X86CPUDefinition into a X86CPU object
3411 */
3412 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3413 {
3414 CPUX86State *env = &cpu->env;
3415 const char *vendor;
3416 char host_vendor[CPUID_VENDOR_SZ + 1];
3417 FeatureWord w;
3418
3419 /*NOTE: any property set by this function should be returned by
3420 * x86_cpu_static_props(), so static expansion of
3421 * query-cpu-model-expansion is always complete.
3422 */
3423
3424 /* CPU models only set _minimum_ values for level/xlevel: */
3425 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3426 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3427
3428 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3429 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3430 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3431 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3432 for (w = 0; w < FEATURE_WORDS; w++) {
3433 env->features[w] = def->features[w];
3434 }
3435
3436 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3437 cpu->legacy_cache = !def->cache_info;
3438
3439 /* Special cases not set in the X86CPUDefinition structs: */
3440 /* TODO: in-kernel irqchip for hvf */
3441 if (kvm_enabled()) {
3442 if (!kvm_irqchip_in_kernel()) {
3443 x86_cpu_change_kvm_default("x2apic", "off");
3444 }
3445
3446 x86_cpu_apply_props(cpu, kvm_default_props);
3447 } else if (tcg_enabled()) {
3448 x86_cpu_apply_props(cpu, tcg_default_props);
3449 }
3450
3451 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3452
3453 /* sysenter isn't supported in compatibility mode on AMD,
3454 * syscall isn't supported in compatibility mode on Intel.
3455 * Normally we advertise the actual CPU vendor, but you can
3456 * override this using the 'vendor' property if you want to use
3457 * KVM's sysenter/syscall emulation in compatibility mode and
3458 * when doing cross vendor migration
3459 */
3460 vendor = def->vendor;
3461 if (accel_uses_host_cpuid()) {
3462 uint32_t ebx = 0, ecx = 0, edx = 0;
3463 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3464 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3465 vendor = host_vendor;
3466 }
3467
3468 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3469
3470 }
3471
3472 /* Return a QDict containing keys for all properties that can be included
3473 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3474 * must be included in the dictionary.
3475 */
3476 static QDict *x86_cpu_static_props(void)
3477 {
3478 FeatureWord w;
3479 int i;
3480 static const char *props[] = {
3481 "min-level",
3482 "min-xlevel",
3483 "family",
3484 "model",
3485 "stepping",
3486 "model-id",
3487 "vendor",
3488 "lmce",
3489 NULL,
3490 };
3491 static QDict *d;
3492
3493 if (d) {
3494 return d;
3495 }
3496
3497 d = qdict_new();
3498 for (i = 0; props[i]; i++) {
3499 qdict_put_null(d, props[i]);
3500 }
3501
3502 for (w = 0; w < FEATURE_WORDS; w++) {
3503 FeatureWordInfo *fi = &feature_word_info[w];
3504 int bit;
3505 for (bit = 0; bit < 32; bit++) {
3506 if (!fi->feat_names[bit]) {
3507 continue;
3508 }
3509 qdict_put_null(d, fi->feat_names[bit]);
3510 }
3511 }
3512
3513 return d;
3514 }
3515
3516 /* Add an entry to @props dict, with the value for property. */
3517 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3518 {
3519 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3520 &error_abort);
3521
3522 qdict_put_obj(props, prop, value);
3523 }
3524
3525 /* Convert CPU model data from X86CPU object to a property dictionary
3526 * that can recreate exactly the same CPU model.
3527 */
3528 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3529 {
3530 QDict *sprops = x86_cpu_static_props();
3531 const QDictEntry *e;
3532
3533 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3534 const char *prop = qdict_entry_key(e);
3535 x86_cpu_expand_prop(cpu, props, prop);
3536 }
3537 }
3538
3539 /* Convert CPU model data from X86CPU object to a property dictionary
3540 * that can recreate exactly the same CPU model, including every
3541 * writeable QOM property.
3542 */
3543 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3544 {
3545 ObjectPropertyIterator iter;
3546 ObjectProperty *prop;
3547
3548 object_property_iter_init(&iter, OBJECT(cpu));
3549 while ((prop = object_property_iter_next(&iter))) {
3550 /* skip read-only or write-only properties */
3551 if (!prop->get || !prop->set) {
3552 continue;
3553 }
3554
3555 /* "hotplugged" is the only property that is configurable
3556 * on the command-line but will be set differently on CPUs
3557 * created using "-cpu ... -smp ..." and by CPUs created
3558 * on the fly by x86_cpu_from_model() for querying. Skip it.
3559 */
3560 if (!strcmp(prop->name, "hotplugged")) {
3561 continue;
3562 }
3563 x86_cpu_expand_prop(cpu, props, prop->name);
3564 }
3565 }
3566
3567 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3568 {
3569 const QDictEntry *prop;
3570 Error *err = NULL;
3571
3572 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3573 object_property_set_qobject(obj, qdict_entry_value(prop),
3574 qdict_entry_key(prop), &err);
3575 if (err) {
3576 break;
3577 }
3578 }
3579
3580 error_propagate(errp, err);
3581 }
3582
3583 /* Create X86CPU object according to model+props specification */
3584 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3585 {
3586 X86CPU *xc = NULL;
3587 X86CPUClass *xcc;
3588 Error *err = NULL;
3589
3590 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3591 if (xcc == NULL) {
3592 error_setg(&err, "CPU model '%s' not found", model);
3593 goto out;
3594 }
3595
3596 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3597 if (props) {
3598 object_apply_props(OBJECT(xc), props, &err);
3599 if (err) {
3600 goto out;
3601 }
3602 }
3603
3604 x86_cpu_expand_features(xc, &err);
3605 if (err) {
3606 goto out;
3607 }
3608
3609 out:
3610 if (err) {
3611 error_propagate(errp, err);
3612 object_unref(OBJECT(xc));
3613 xc = NULL;
3614 }
3615 return xc;
3616 }
3617
3618 CpuModelExpansionInfo *
3619 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3620 CpuModelInfo *model,
3621 Error **errp)
3622 {
3623 X86CPU *xc = NULL;
3624 Error *err = NULL;
3625 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3626 QDict *props = NULL;
3627 const char *base_name;
3628
3629 xc = x86_cpu_from_model(model->name,
3630 model->has_props ?
3631 qobject_to(QDict, model->props) :
3632 NULL, &err);
3633 if (err) {
3634 goto out;
3635 }
3636
3637 props = qdict_new();
3638
3639 switch (type) {
3640 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3641 /* Static expansion will be based on "base" only */
3642 base_name = "base";
3643 x86_cpu_to_dict(xc, props);
3644 break;
3645 case CPU_MODEL_EXPANSION_TYPE_FULL:
3646 /* As we don't return every single property, full expansion needs
3647 * to keep the original model name+props, and add extra
3648 * properties on top of that.
3649 */
3650 base_name = model->name;
3651 x86_cpu_to_dict_full(xc, props);
3652 break;
3653 default:
3654 error_setg(&err, "Unsupportted expansion type");
3655 goto out;
3656 }
3657
3658 if (!props) {
3659 props = qdict_new();
3660 }
3661 x86_cpu_to_dict(xc, props);
3662
3663 ret->model = g_new0(CpuModelInfo, 1);
3664 ret->model->name = g_strdup(base_name);
3665 ret->model->props = QOBJECT(props);
3666 ret->model->has_props = true;
3667
3668 out:
3669 object_unref(OBJECT(xc));
3670 if (err) {
3671 error_propagate(errp, err);
3672 qapi_free_CpuModelExpansionInfo(ret);
3673 ret = NULL;
3674 }
3675 return ret;
3676 }
3677
3678 static gchar *x86_gdb_arch_name(CPUState *cs)
3679 {
3680 #ifdef TARGET_X86_64
3681 return g_strdup("i386:x86-64");
3682 #else
3683 return g_strdup("i386");
3684 #endif
3685 }
3686
3687 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3688 {
3689 X86CPUDefinition *cpudef = data;
3690 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3691
3692 xcc->cpu_def = cpudef;
3693 xcc->migration_safe = true;
3694 }
3695
3696 static void x86_register_cpudef_type(X86CPUDefinition *def)
3697 {
3698 char *typename = x86_cpu_type_name(def->name);
3699 TypeInfo ti = {
3700 .name = typename,
3701 .parent = TYPE_X86_CPU,
3702 .class_init = x86_cpu_cpudef_class_init,
3703 .class_data = def,
3704 };
3705
3706 /* AMD aliases are handled at runtime based on CPUID vendor, so
3707 * they shouldn't be set on the CPU model table.
3708 */
3709 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3710 /* catch mistakes instead of silently truncating model_id when too long */
3711 assert(def->model_id && strlen(def->model_id) <= 48);
3712
3713
3714 type_register(&ti);
3715 g_free(typename);
3716 }
3717
3718 #if !defined(CONFIG_USER_ONLY)
3719
3720 void cpu_clear_apic_feature(CPUX86State *env)
3721 {
3722 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3723 }
3724
3725 #endif /* !CONFIG_USER_ONLY */
3726
3727 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3728 uint32_t *eax, uint32_t *ebx,
3729 uint32_t *ecx, uint32_t *edx)
3730 {
3731 X86CPU *cpu = x86_env_get_cpu(env);
3732 CPUState *cs = CPU(cpu);
3733 uint32_t pkg_offset;
3734 uint32_t limit;
3735 uint32_t signature[3];
3736
3737 /* Calculate & apply limits for different index ranges */
3738 if (index >= 0xC0000000) {
3739 limit = env->cpuid_xlevel2;
3740 } else if (index >= 0x80000000) {
3741 limit = env->cpuid_xlevel;
3742 } else if (index >= 0x40000000) {
3743 limit = 0x40000001;
3744 } else {
3745 limit = env->cpuid_level;
3746 }
3747
3748 if (index > limit) {
3749 /* Intel documentation states that invalid EAX input will
3750 * return the same information as EAX=cpuid_level
3751 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3752 */
3753 index = env->cpuid_level;
3754 }
3755
3756 switch(index) {
3757 case 0:
3758 *eax = env->cpuid_level;
3759 *ebx = env->cpuid_vendor1;
3760 *edx = env->cpuid_vendor2;
3761 *ecx = env->cpuid_vendor3;
3762 break;
3763 case 1:
3764 *eax = env->cpuid_version;
3765 *ebx = (cpu->apic_id << 24) |
3766 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3767 *ecx = env->features[FEAT_1_ECX];
3768 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3769 *ecx |= CPUID_EXT_OSXSAVE;
3770 }
3771 *edx = env->features[FEAT_1_EDX];
3772 if (cs->nr_cores * cs->nr_threads > 1) {
3773 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3774 *edx |= CPUID_HT;
3775 }
3776 break;
3777 case 2:
3778 /* cache info: needed for Pentium Pro compatibility */
3779 if (cpu->cache_info_passthrough) {
3780 host_cpuid(index, 0, eax, ebx, ecx, edx);
3781 break;
3782 }
3783 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3784 *ebx = 0;
3785 if (!cpu->enable_l3_cache) {
3786 *ecx = 0;
3787 } else {
3788 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
3789 }
3790 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
3791 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
3792 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
3793 break;
3794 case 4:
3795 /* cache info: needed for Core compatibility */
3796 if (cpu->cache_info_passthrough) {
3797 host_cpuid(index, count, eax, ebx, ecx, edx);
3798 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3799 *eax &= ~0xFC000000;
3800 if ((*eax & 31) && cs->nr_cores > 1) {
3801 *eax |= (cs->nr_cores - 1) << 26;
3802 }
3803 } else {
3804 *eax = 0;
3805 switch (count) {
3806 case 0: /* L1 dcache info */
3807 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
3808 1, cs->nr_cores,
3809 eax, ebx, ecx, edx);
3810 break;
3811 case 1: /* L1 icache info */
3812 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
3813 1, cs->nr_cores,
3814 eax, ebx, ecx, edx);
3815 break;
3816 case 2: /* L2 cache info */
3817 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
3818 cs->nr_threads, cs->nr_cores,
3819 eax, ebx, ecx, edx);
3820 break;
3821 case 3: /* L3 cache info */
3822 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3823 if (cpu->enable_l3_cache) {
3824 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
3825 (1 << pkg_offset), cs->nr_cores,
3826 eax, ebx, ecx, edx);
3827 break;
3828 }
3829 /* fall through */
3830 default: /* end of info */
3831 *eax = *ebx = *ecx = *edx = 0;
3832 break;
3833 }
3834 }
3835 break;
3836 case 5:
3837 /* mwait info: needed for Core compatibility */
3838 *eax = 0; /* Smallest monitor-line size in bytes */
3839 *ebx = 0; /* Largest monitor-line size in bytes */
3840 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3841 *edx = 0;
3842 break;
3843 case 6:
3844 /* Thermal and Power Leaf */
3845 *eax = env->features[FEAT_6_EAX];
3846 *ebx = 0;
3847 *ecx = 0;
3848 *edx = 0;
3849 break;
3850 case 7:
3851 /* Structured Extended Feature Flags Enumeration Leaf */
3852 if (count == 0) {
3853 *eax = 0; /* Maximum ECX value for sub-leaves */
3854 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3855 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3856 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3857 *ecx |= CPUID_7_0_ECX_OSPKE;
3858 }
3859 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3860 } else {
3861 *eax = 0;
3862 *ebx = 0;
3863 *ecx = 0;
3864 *edx = 0;
3865 }
3866 break;
3867 case 9:
3868 /* Direct Cache Access Information Leaf */
3869 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3870 *ebx = 0;
3871 *ecx = 0;
3872 *edx = 0;
3873 break;
3874 case 0xA:
3875 /* Architectural Performance Monitoring Leaf */
3876 if (kvm_enabled() && cpu->enable_pmu) {
3877 KVMState *s = cs->kvm_state;
3878
3879 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3880 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3881 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3882 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3883 } else if (hvf_enabled() && cpu->enable_pmu) {
3884 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3885 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3886 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3887 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3888 } else {
3889 *eax = 0;
3890 *ebx = 0;
3891 *ecx = 0;
3892 *edx = 0;
3893 }
3894 break;
3895 case 0xB:
3896 /* Extended Topology Enumeration Leaf */
3897 if (!cpu->enable_cpuid_0xb) {
3898 *eax = *ebx = *ecx = *edx = 0;
3899 break;
3900 }
3901
3902 *ecx = count & 0xff;
3903 *edx = cpu->apic_id;
3904
3905 switch (count) {
3906 case 0:
3907 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3908 *ebx = cs->nr_threads;
3909 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3910 break;
3911 case 1:
3912 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3913 *ebx = cs->nr_cores * cs->nr_threads;
3914 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3915 break;
3916 default:
3917 *eax = 0;
3918 *ebx = 0;
3919 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3920 }
3921
3922 assert(!(*eax & ~0x1f));
3923 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3924 break;
3925 case 0xD: {
3926 /* Processor Extended State */
3927 *eax = 0;
3928 *ebx = 0;
3929 *ecx = 0;
3930 *edx = 0;
3931 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3932 break;
3933 }
3934
3935 if (count == 0) {
3936 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3937 *eax = env->features[FEAT_XSAVE_COMP_LO];
3938 *edx = env->features[FEAT_XSAVE_COMP_HI];
3939 *ebx = *ecx;
3940 } else if (count == 1) {
3941 *eax = env->features[FEAT_XSAVE];
3942 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3943 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3944 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3945 *eax = esa->size;
3946 *ebx = esa->offset;
3947 }
3948 }
3949 break;
3950 }
3951 case 0x14: {
3952 /* Intel Processor Trace Enumeration */
3953 *eax = 0;
3954 *ebx = 0;
3955 *ecx = 0;
3956 *edx = 0;
3957 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3958 !kvm_enabled()) {
3959 break;
3960 }
3961
3962 if (count == 0) {
3963 *eax = INTEL_PT_MAX_SUBLEAF;
3964 *ebx = INTEL_PT_MINIMAL_EBX;
3965 *ecx = INTEL_PT_MINIMAL_ECX;
3966 } else if (count == 1) {
3967 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3968 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3969 }
3970 break;
3971 }
3972 case 0x40000000:
3973 /*
3974 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3975 * set here, but we restrict to TCG none the less.
3976 */
3977 if (tcg_enabled() && cpu->expose_tcg) {
3978 memcpy(signature, "TCGTCGTCGTCG", 12);
3979 *eax = 0x40000001;
3980 *ebx = signature[0];
3981 *ecx = signature[1];
3982 *edx = signature[2];
3983 } else {
3984 *eax = 0;
3985 *ebx = 0;
3986 *ecx = 0;
3987 *edx = 0;
3988 }
3989 break;
3990 case 0x40000001:
3991 *eax = 0;
3992 *ebx = 0;
3993 *ecx = 0;
3994 *edx = 0;
3995 break;
3996 case 0x80000000:
3997 *eax = env->cpuid_xlevel;
3998 *ebx = env->cpuid_vendor1;
3999 *edx = env->cpuid_vendor2;
4000 *ecx = env->cpuid_vendor3;
4001 break;
4002 case 0x80000001:
4003 *eax = env->cpuid_version;
4004 *ebx = 0;
4005 *ecx = env->features[FEAT_8000_0001_ECX];
4006 *edx = env->features[FEAT_8000_0001_EDX];
4007
4008 /* The Linux kernel checks for the CMPLegacy bit and
4009 * discards multiple thread information if it is set.
4010 * So don't set it here for Intel to make Linux guests happy.
4011 */
4012 if (cs->nr_cores * cs->nr_threads > 1) {
4013 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4014 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4015 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4016 *ecx |= 1 << 1; /* CmpLegacy bit */
4017 }
4018 }
4019 break;
4020 case 0x80000002:
4021 case 0x80000003:
4022 case 0x80000004:
4023 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4024 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4025 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4026 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4027 break;
4028 case 0x80000005:
4029 /* cache info (L1 cache) */
4030 if (cpu->cache_info_passthrough) {
4031 host_cpuid(index, 0, eax, ebx, ecx, edx);
4032 break;
4033 }
4034 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4035 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4036 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4037 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4038 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4039 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4040 break;
4041 case 0x80000006:
4042 /* cache info (L2 cache) */
4043 if (cpu->cache_info_passthrough) {
4044 host_cpuid(index, 0, eax, ebx, ecx, edx);
4045 break;
4046 }
4047 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4048 (L2_DTLB_2M_ENTRIES << 16) | \
4049 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4050 (L2_ITLB_2M_ENTRIES);
4051 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4052 (L2_DTLB_4K_ENTRIES << 16) | \
4053 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4054 (L2_ITLB_4K_ENTRIES);
4055 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4056 cpu->enable_l3_cache ?
4057 env->cache_info_amd.l3_cache : NULL,
4058 ecx, edx);
4059 break;
4060 case 0x80000007:
4061 *eax = 0;
4062 *ebx = 0;
4063 *ecx = 0;
4064 *edx = env->features[FEAT_8000_0007_EDX];
4065 break;
4066 case 0x80000008:
4067 /* virtual & phys address size in low 2 bytes. */
4068 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4069 /* 64 bit processor */
4070 *eax = cpu->phys_bits; /* configurable physical bits */
4071 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4072 *eax |= 0x00003900; /* 57 bits virtual */
4073 } else {
4074 *eax |= 0x00003000; /* 48 bits virtual */
4075 }
4076 } else {
4077 *eax = cpu->phys_bits;
4078 }
4079 *ebx = env->features[FEAT_8000_0008_EBX];
4080 *ecx = 0;
4081 *edx = 0;
4082 if (cs->nr_cores * cs->nr_threads > 1) {
4083 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4084 }
4085 break;
4086 case 0x8000000A:
4087 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4088 *eax = 0x00000001; /* SVM Revision */
4089 *ebx = 0x00000010; /* nr of ASIDs */
4090 *ecx = 0;
4091 *edx = env->features[FEAT_SVM]; /* optional features */
4092 } else {
4093 *eax = 0;
4094 *ebx = 0;
4095 *ecx = 0;
4096 *edx = 0;
4097 }
4098 break;
4099 case 0x8000001D:
4100 *eax = 0;
4101 switch (count) {
4102 case 0: /* L1 dcache info */
4103 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4104 eax, ebx, ecx, edx);
4105 break;
4106 case 1: /* L1 icache info */
4107 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4108 eax, ebx, ecx, edx);
4109 break;
4110 case 2: /* L2 cache info */
4111 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4112 eax, ebx, ecx, edx);
4113 break;
4114 case 3: /* L3 cache info */
4115 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4116 eax, ebx, ecx, edx);
4117 break;
4118 default: /* end of info */
4119 *eax = *ebx = *ecx = *edx = 0;
4120 break;
4121 }
4122 break;
4123 case 0xC0000000:
4124 *eax = env->cpuid_xlevel2;
4125 *ebx = 0;
4126 *ecx = 0;
4127 *edx = 0;
4128 break;
4129 case 0xC0000001:
4130 /* Support for VIA CPU's CPUID instruction */
4131 *eax = env->cpuid_version;
4132 *ebx = 0;
4133 *ecx = 0;
4134 *edx = env->features[FEAT_C000_0001_EDX];
4135 break;
4136 case 0xC0000002:
4137 case 0xC0000003:
4138 case 0xC0000004:
4139 /* Reserved for the future, and now filled with zero */
4140 *eax = 0;
4141 *ebx = 0;
4142 *ecx = 0;
4143 *edx = 0;
4144 break;
4145 case 0x8000001F:
4146 *eax = sev_enabled() ? 0x2 : 0;
4147 *ebx = sev_get_cbit_position();
4148 *ebx |= sev_get_reduced_phys_bits() << 6;
4149 *ecx = 0;
4150 *edx = 0;
4151 break;
4152 default:
4153 /* reserved values: zero */
4154 *eax = 0;
4155 *ebx = 0;
4156 *ecx = 0;
4157 *edx = 0;
4158 break;
4159 }
4160 }
4161
4162 /* CPUClass::reset() */
4163 static void x86_cpu_reset(CPUState *s)
4164 {
4165 X86CPU *cpu = X86_CPU(s);
4166 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4167 CPUX86State *env = &cpu->env;
4168 target_ulong cr4;
4169 uint64_t xcr0;
4170 int i;
4171
4172 xcc->parent_reset(s);
4173
4174 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4175
4176 env->old_exception = -1;
4177
4178 /* init to reset state */
4179
4180 env->hflags2 |= HF2_GIF_MASK;
4181
4182 cpu_x86_update_cr0(env, 0x60000010);
4183 env->a20_mask = ~0x0;
4184 env->smbase = 0x30000;
4185 env->msr_smi_count = 0;
4186
4187 env->idt.limit = 0xffff;
4188 env->gdt.limit = 0xffff;
4189 env->ldt.limit = 0xffff;
4190 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4191 env->tr.limit = 0xffff;
4192 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4193
4194 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4195 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4196 DESC_R_MASK | DESC_A_MASK);
4197 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4198 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4199 DESC_A_MASK);
4200 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4201 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4202 DESC_A_MASK);
4203 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4204 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4205 DESC_A_MASK);
4206 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4207 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4208 DESC_A_MASK);
4209 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4210 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4211 DESC_A_MASK);
4212
4213 env->eip = 0xfff0;
4214 env->regs[R_EDX] = env->cpuid_version;
4215
4216 env->eflags = 0x2;
4217
4218 /* FPU init */
4219 for (i = 0; i < 8; i++) {
4220 env->fptags[i] = 1;
4221 }
4222 cpu_set_fpuc(env, 0x37f);
4223
4224 env->mxcsr = 0x1f80;
4225 /* All units are in INIT state. */
4226 env->xstate_bv = 0;
4227
4228 env->pat = 0x0007040600070406ULL;
4229 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4230
4231 memset(env->dr, 0, sizeof(env->dr));
4232 env->dr[6] = DR6_FIXED_1;
4233 env->dr[7] = DR7_FIXED_1;
4234 cpu_breakpoint_remove_all(s, BP_CPU);
4235 cpu_watchpoint_remove_all(s, BP_CPU);
4236
4237 cr4 = 0;
4238 xcr0 = XSTATE_FP_MASK;
4239
4240 #ifdef CONFIG_USER_ONLY
4241 /* Enable all the features for user-mode. */
4242 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4243 xcr0 |= XSTATE_SSE_MASK;
4244 }
4245 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4246 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4247 if (env->features[esa->feature] & esa->bits) {
4248 xcr0 |= 1ull << i;
4249 }
4250 }
4251
4252 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4253 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4254 }
4255 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4256 cr4 |= CR4_FSGSBASE_MASK;
4257 }
4258 #endif
4259
4260 env->xcr0 = xcr0;
4261 cpu_x86_update_cr4(env, cr4);
4262
4263 /*
4264 * SDM 11.11.5 requires:
4265 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4266 * - IA32_MTRR_PHYSMASKn.V = 0
4267 * All other bits are undefined. For simplification, zero it all.
4268 */
4269 env->mtrr_deftype = 0;
4270 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4271 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4272
4273 env->interrupt_injected = -1;
4274 env->exception_injected = -1;
4275 env->nmi_injected = false;
4276 #if !defined(CONFIG_USER_ONLY)
4277 /* We hard-wire the BSP to the first CPU. */
4278 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4279
4280 s->halted = !cpu_is_bsp(cpu);
4281
4282 if (kvm_enabled()) {
4283 kvm_arch_reset_vcpu(cpu);
4284 }
4285 else if (hvf_enabled()) {
4286 hvf_reset_vcpu(s);
4287 }
4288 #endif
4289 }
4290
4291 #ifndef CONFIG_USER_ONLY
4292 bool cpu_is_bsp(X86CPU *cpu)
4293 {
4294 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4295 }
4296
4297 /* TODO: remove me, when reset over QOM tree is implemented */
4298 static void x86_cpu_machine_reset_cb(void *opaque)
4299 {
4300 X86CPU *cpu = opaque;
4301 cpu_reset(CPU(cpu));
4302 }
4303 #endif
4304
4305 static void mce_init(X86CPU *cpu)
4306 {
4307 CPUX86State *cenv = &cpu->env;
4308 unsigned int bank;
4309
4310 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4311 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4312 (CPUID_MCE | CPUID_MCA)) {
4313 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4314 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4315 cenv->mcg_ctl = ~(uint64_t)0;
4316 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4317 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4318 }
4319 }
4320 }
4321
4322 #ifndef CONFIG_USER_ONLY
4323 APICCommonClass *apic_get_class(void)
4324 {
4325 const char *apic_type = "apic";
4326
4327 /* TODO: in-kernel irqchip for hvf */
4328 if (kvm_apic_in_kernel()) {
4329 apic_type = "kvm-apic";
4330 } else if (xen_enabled()) {
4331 apic_type = "xen-apic";
4332 }
4333
4334 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4335 }
4336
4337 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4338 {
4339 APICCommonState *apic;
4340 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4341
4342 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4343
4344 object_property_add_child(OBJECT(cpu), "lapic",
4345 OBJECT(cpu->apic_state), &error_abort);
4346 object_unref(OBJECT(cpu->apic_state));
4347
4348 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4349 /* TODO: convert to link<> */
4350 apic = APIC_COMMON(cpu->apic_state);
4351 apic->cpu = cpu;
4352 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4353 }
4354
4355 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4356 {
4357 APICCommonState *apic;
4358 static bool apic_mmio_map_once;
4359
4360 if (cpu->apic_state == NULL) {
4361 return;
4362 }
4363 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4364 errp);
4365
4366 /* Map APIC MMIO area */
4367 apic = APIC_COMMON(cpu->apic_state);
4368 if (!apic_mmio_map_once) {
4369 memory_region_add_subregion_overlap(get_system_memory(),
4370 apic->apicbase &
4371 MSR_IA32_APICBASE_BASE,
4372 &apic->io_memory,
4373 0x1000);
4374 apic_mmio_map_once = true;
4375 }
4376 }
4377
4378 static void x86_cpu_machine_done(Notifier *n, void *unused)
4379 {
4380 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4381 MemoryRegion *smram =
4382 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4383
4384 if (smram) {
4385 cpu->smram = g_new(MemoryRegion, 1);
4386 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4387 smram, 0, 1ull << 32);
4388 memory_region_set_enabled(cpu->smram, true);
4389 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4390 }
4391 }
4392 #else
4393 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4394 {
4395 }
4396 #endif
4397
4398 /* Note: Only safe for use on x86(-64) hosts */
4399 static uint32_t x86_host_phys_bits(void)
4400 {
4401 uint32_t eax;
4402 uint32_t host_phys_bits;
4403
4404 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4405 if (eax >= 0x80000008) {
4406 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4407 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4408 * at 23:16 that can specify a maximum physical address bits for
4409 * the guest that can override this value; but I've not seen
4410 * anything with that set.
4411 */
4412 host_phys_bits = eax & 0xff;
4413 } else {
4414 /* It's an odd 64 bit machine that doesn't have the leaf for
4415 * physical address bits; fall back to 36 that's most older
4416 * Intel.
4417 */
4418 host_phys_bits = 36;
4419 }
4420
4421 return host_phys_bits;
4422 }
4423
4424 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4425 {
4426 if (*min < value) {
4427 *min = value;
4428 }
4429 }
4430
4431 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4432 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4433 {
4434 CPUX86State *env = &cpu->env;
4435 FeatureWordInfo *fi = &feature_word_info[w];
4436 uint32_t eax = fi->cpuid_eax;
4437 uint32_t region = eax & 0xF0000000;
4438
4439 if (!env->features[w]) {
4440 return;
4441 }
4442
4443 switch (region) {
4444 case 0x00000000:
4445 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4446 break;
4447 case 0x80000000:
4448 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4449 break;
4450 case 0xC0000000:
4451 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4452 break;
4453 }
4454 }
4455
4456 /* Calculate XSAVE components based on the configured CPU feature flags */
4457 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4458 {
4459 CPUX86State *env = &cpu->env;
4460 int i;
4461 uint64_t mask;
4462
4463 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4464 return;
4465 }
4466
4467 mask = 0;
4468 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4469 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4470 if (env->features[esa->feature] & esa->bits) {
4471 mask |= (1ULL << i);
4472 }
4473 }
4474
4475 env->features[FEAT_XSAVE_COMP_LO] = mask;
4476 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4477 }
4478
4479 /***** Steps involved on loading and filtering CPUID data
4480 *
4481 * When initializing and realizing a CPU object, the steps
4482 * involved in setting up CPUID data are:
4483 *
4484 * 1) Loading CPU model definition (X86CPUDefinition). This is
4485 * implemented by x86_cpu_load_def() and should be completely
4486 * transparent, as it is done automatically by instance_init.
4487 * No code should need to look at X86CPUDefinition structs
4488 * outside instance_init.
4489 *
4490 * 2) CPU expansion. This is done by realize before CPUID
4491 * filtering, and will make sure host/accelerator data is
4492 * loaded for CPU models that depend on host capabilities
4493 * (e.g. "host"). Done by x86_cpu_expand_features().
4494 *
4495 * 3) CPUID filtering. This initializes extra data related to
4496 * CPUID, and checks if the host supports all capabilities
4497 * required by the CPU. Runnability of a CPU model is
4498 * determined at this step. Done by x86_cpu_filter_features().
4499 *
4500 * Some operations don't require all steps to be performed.
4501 * More precisely:
4502 *
4503 * - CPU instance creation (instance_init) will run only CPU
4504 * model loading. CPU expansion can't run at instance_init-time
4505 * because host/accelerator data may be not available yet.
4506 * - CPU realization will perform both CPU model expansion and CPUID
4507 * filtering, and return an error in case one of them fails.
4508 * - query-cpu-definitions needs to run all 3 steps. It needs
4509 * to run CPUID filtering, as the 'unavailable-features'
4510 * field is set based on the filtering results.
4511 * - The query-cpu-model-expansion QMP command only needs to run
4512 * CPU model loading and CPU expansion. It should not filter
4513 * any CPUID data based on host capabilities.
4514 */
4515
4516 /* Expand CPU configuration data, based on configured features
4517 * and host/accelerator capabilities when appropriate.
4518 */
4519 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4520 {
4521 CPUX86State *env = &cpu->env;
4522 FeatureWord w;
4523 GList *l;
4524 Error *local_err = NULL;
4525
4526 /*TODO: Now cpu->max_features doesn't overwrite features
4527 * set using QOM properties, and we can convert
4528 * plus_features & minus_features to global properties
4529 * inside x86_cpu_parse_featurestr() too.
4530 */
4531 if (cpu->max_features) {
4532 for (w = 0; w < FEATURE_WORDS; w++) {
4533 /* Override only features that weren't set explicitly
4534 * by the user.
4535 */
4536 env->features[w] |=
4537 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4538 ~env->user_features[w] & \
4539 ~feature_word_info[w].no_autoenable_flags;
4540 }
4541 }
4542
4543 for (l = plus_features; l; l = l->next) {
4544 const char *prop = l->data;
4545 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4546 if (local_err) {
4547 goto out;
4548 }
4549 }
4550
4551 for (l = minus_features; l; l = l->next) {
4552 const char *prop = l->data;
4553 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4554 if (local_err) {
4555 goto out;
4556 }
4557 }
4558
4559 if (!kvm_enabled() || !cpu->expose_kvm) {
4560 env->features[FEAT_KVM] = 0;
4561 }
4562
4563 x86_cpu_enable_xsave_components(cpu);
4564
4565 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4566 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4567 if (cpu->full_cpuid_auto_level) {
4568 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4569 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4570 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4571 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4572 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4573 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4574 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4575 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4576 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4577 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4578 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4579 /* SVM requires CPUID[0x8000000A] */
4580 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4581 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4582 }
4583
4584 /* SEV requires CPUID[0x8000001F] */
4585 if (sev_enabled()) {
4586 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4587 }
4588 }
4589
4590 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4591 if (env->cpuid_level == UINT32_MAX) {
4592 env->cpuid_level = env->cpuid_min_level;
4593 }
4594 if (env->cpuid_xlevel == UINT32_MAX) {
4595 env->cpuid_xlevel = env->cpuid_min_xlevel;
4596 }
4597 if (env->cpuid_xlevel2 == UINT32_MAX) {
4598 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4599 }
4600
4601 out:
4602 if (local_err != NULL) {
4603 error_propagate(errp, local_err);
4604 }
4605 }
4606
4607 /*
4608 * Finishes initialization of CPUID data, filters CPU feature
4609 * words based on host availability of each feature.
4610 *
4611 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4612 */
4613 static int x86_cpu_filter_features(X86CPU *cpu)
4614 {
4615 CPUX86State *env = &cpu->env;
4616 FeatureWord w;
4617 int rv = 0;
4618
4619 for (w = 0; w < FEATURE_WORDS; w++) {
4620 uint32_t host_feat =
4621 x86_cpu_get_supported_feature_word(w, false);
4622 uint32_t requested_features = env->features[w];
4623 env->features[w] &= host_feat;
4624 cpu->filtered_features[w] = requested_features & ~env->features[w];
4625 if (cpu->filtered_features[w]) {
4626 rv = 1;
4627 }
4628 }
4629
4630 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4631 kvm_enabled()) {
4632 KVMState *s = CPU(cpu)->kvm_state;
4633 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4634 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4635 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4636 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4637 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4638
4639 if (!eax_0 ||
4640 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4641 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4642 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4643 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4644 INTEL_PT_ADDR_RANGES_NUM) ||
4645 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4646 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4647 (ecx_0 & INTEL_PT_IP_LIP)) {
4648 /*
4649 * Processor Trace capabilities aren't configurable, so if the
4650 * host can't emulate the capabilities we report on
4651 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4652 */
4653 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4654 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4655 rv = 1;
4656 }
4657 }
4658
4659 return rv;
4660 }
4661
4662 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4663 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4664 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4665 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4666 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4667 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4668 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4669 {
4670 CPUState *cs = CPU(dev);
4671 X86CPU *cpu = X86_CPU(dev);
4672 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4673 CPUX86State *env = &cpu->env;
4674 Error *local_err = NULL;
4675 static bool ht_warned;
4676
4677 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4678 char *name = x86_cpu_class_get_model_name(xcc);
4679 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4680 g_free(name);
4681 goto out;
4682 }
4683
4684 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4685 error_setg(errp, "apic-id property was not initialized properly");
4686 return;
4687 }
4688
4689 x86_cpu_expand_features(cpu, &local_err);
4690 if (local_err) {
4691 goto out;
4692 }
4693
4694 if (x86_cpu_filter_features(cpu) &&
4695 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4696 x86_cpu_report_filtered_features(cpu);
4697 if (cpu->enforce_cpuid) {
4698 error_setg(&local_err,
4699 accel_uses_host_cpuid() ?
4700 "Host doesn't support requested features" :
4701 "TCG doesn't support requested features");
4702 goto out;
4703 }
4704 }
4705
4706 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4707 * CPUID[1].EDX.
4708 */
4709 if (IS_AMD_CPU(env)) {
4710 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4711 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4712 & CPUID_EXT2_AMD_ALIASES);
4713 }
4714
4715 /* For 64bit systems think about the number of physical bits to present.
4716 * ideally this should be the same as the host; anything other than matching
4717 * the host can cause incorrect guest behaviour.
4718 * QEMU used to pick the magic value of 40 bits that corresponds to
4719 * consumer AMD devices but nothing else.
4720 */
4721 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4722 if (accel_uses_host_cpuid()) {
4723 uint32_t host_phys_bits = x86_host_phys_bits();
4724 static bool warned;
4725
4726 if (cpu->host_phys_bits) {
4727 /* The user asked for us to use the host physical bits */
4728 cpu->phys_bits = host_phys_bits;
4729 }
4730
4731 /* Print a warning if the user set it to a value that's not the
4732 * host value.
4733 */
4734 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4735 !warned) {
4736 warn_report("Host physical bits (%u)"
4737 " does not match phys-bits property (%u)",
4738 host_phys_bits, cpu->phys_bits);
4739 warned = true;
4740 }
4741
4742 if (cpu->phys_bits &&
4743 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4744 cpu->phys_bits < 32)) {
4745 error_setg(errp, "phys-bits should be between 32 and %u "
4746 " (but is %u)",
4747 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4748 return;
4749 }
4750 } else {
4751 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4752 error_setg(errp, "TCG only supports phys-bits=%u",
4753 TCG_PHYS_ADDR_BITS);
4754 return;
4755 }
4756 }
4757 /* 0 means it was not explicitly set by the user (or by machine
4758 * compat_props or by the host code above). In this case, the default
4759 * is the value used by TCG (40).
4760 */
4761 if (cpu->phys_bits == 0) {
4762 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4763 }
4764 } else {
4765 /* For 32 bit systems don't use the user set value, but keep
4766 * phys_bits consistent with what we tell the guest.
4767 */
4768 if (cpu->phys_bits != 0) {
4769 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4770 return;
4771 }
4772
4773 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4774 cpu->phys_bits = 36;
4775 } else {
4776 cpu->phys_bits = 32;
4777 }
4778 }
4779
4780 /* Cache information initialization */
4781 if (!cpu->legacy_cache) {
4782 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
4783 char *name = x86_cpu_class_get_model_name(xcc);
4784 error_setg(errp,
4785 "CPU model '%s' doesn't support legacy-cache=off", name);
4786 g_free(name);
4787 return;
4788 }
4789 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
4790 *xcc->cpu_def->cache_info;
4791 } else {
4792 /* Build legacy cache information */
4793 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
4794 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
4795 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
4796 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
4797
4798 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
4799 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
4800 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
4801 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
4802
4803 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
4804 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
4805 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
4806 env->cache_info_amd.l3_cache = &legacy_l3_cache;
4807 }
4808
4809
4810 cpu_exec_realizefn(cs, &local_err);
4811 if (local_err != NULL) {
4812 error_propagate(errp, local_err);
4813 return;
4814 }
4815
4816 #ifndef CONFIG_USER_ONLY
4817 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4818
4819 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4820 x86_cpu_apic_create(cpu, &local_err);
4821 if (local_err != NULL) {
4822 goto out;
4823 }
4824 }
4825 #endif
4826
4827 mce_init(cpu);
4828
4829 #ifndef CONFIG_USER_ONLY
4830 if (tcg_enabled()) {
4831 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4832 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4833
4834 /* Outer container... */
4835 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4836 memory_region_set_enabled(cpu->cpu_as_root, true);
4837
4838 /* ... with two regions inside: normal system memory with low
4839 * priority, and...
4840 */
4841 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4842 get_system_memory(), 0, ~0ull);
4843 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4844 memory_region_set_enabled(cpu->cpu_as_mem, true);
4845
4846 cs->num_ases = 2;
4847 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4848 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4849
4850 /* ... SMRAM with higher priority, linked from /machine/smram. */
4851 cpu->machine_done.notify = x86_cpu_machine_done;
4852 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4853 }
4854 #endif
4855
4856 qemu_init_vcpu(cs);
4857
4858 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4859 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4860 * based on inputs (sockets,cores,threads), it is still better to gives
4861 * users a warning.
4862 *
4863 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4864 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4865 */
4866 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4867 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4868 " -smp options properly.");
4869 ht_warned = true;
4870 }
4871
4872 x86_cpu_apic_realize(cpu, &local_err);
4873 if (local_err != NULL) {
4874 goto out;
4875 }
4876 cpu_reset(cs);
4877
4878 xcc->parent_realize(dev, &local_err);
4879
4880 out:
4881 if (local_err != NULL) {
4882 error_propagate(errp, local_err);
4883 return;
4884 }
4885 }
4886
4887 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4888 {
4889 X86CPU *cpu = X86_CPU(dev);
4890 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4891 Error *local_err = NULL;
4892
4893 #ifndef CONFIG_USER_ONLY
4894 cpu_remove_sync(CPU(dev));
4895 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4896 #endif
4897
4898 if (cpu->apic_state) {
4899 object_unparent(OBJECT(cpu->apic_state));
4900 cpu->apic_state = NULL;
4901 }
4902
4903 xcc->parent_unrealize(dev, &local_err);
4904 if (local_err != NULL) {
4905 error_propagate(errp, local_err);
4906 return;
4907 }
4908 }
4909
4910 typedef struct BitProperty {
4911 FeatureWord w;
4912 uint32_t mask;
4913 } BitProperty;
4914
4915 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4916 void *opaque, Error **errp)
4917 {
4918 X86CPU *cpu = X86_CPU(obj);
4919 BitProperty *fp = opaque;
4920 uint32_t f = cpu->env.features[fp->w];
4921 bool value = (f & fp->mask) == fp->mask;
4922 visit_type_bool(v, name, &value, errp);
4923 }
4924
4925 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4926 void *opaque, Error **errp)
4927 {
4928 DeviceState *dev = DEVICE(obj);
4929 X86CPU *cpu = X86_CPU(obj);
4930 BitProperty *fp = opaque;
4931 Error *local_err = NULL;
4932 bool value;
4933
4934 if (dev->realized) {
4935 qdev_prop_set_after_realize(dev, name, errp);
4936 return;
4937 }
4938
4939 visit_type_bool(v, name, &value, &local_err);
4940 if (local_err) {
4941 error_propagate(errp, local_err);
4942 return;
4943 }
4944
4945 if (value) {
4946 cpu->env.features[fp->w] |= fp->mask;
4947 } else {
4948 cpu->env.features[fp->w] &= ~fp->mask;
4949 }
4950 cpu->env.user_features[fp->w] |= fp->mask;
4951 }
4952
4953 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4954 void *opaque)
4955 {
4956 BitProperty *prop = opaque;
4957 g_free(prop);
4958 }
4959
4960 /* Register a boolean property to get/set a single bit in a uint32_t field.
4961 *
4962 * The same property name can be registered multiple times to make it affect
4963 * multiple bits in the same FeatureWord. In that case, the getter will return
4964 * true only if all bits are set.
4965 */
4966 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4967 const char *prop_name,
4968 FeatureWord w,
4969 int bitnr)
4970 {
4971 BitProperty *fp;
4972 ObjectProperty *op;
4973 uint32_t mask = (1UL << bitnr);
4974
4975 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4976 if (op) {
4977 fp = op->opaque;
4978 assert(fp->w == w);
4979 fp->mask |= mask;
4980 } else {
4981 fp = g_new0(BitProperty, 1);
4982 fp->w = w;
4983 fp->mask = mask;
4984 object_property_add(OBJECT(cpu), prop_name, "bool",
4985 x86_cpu_get_bit_prop,
4986 x86_cpu_set_bit_prop,
4987 x86_cpu_release_bit_prop, fp, &error_abort);
4988 }
4989 }
4990
4991 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4992 FeatureWord w,
4993 int bitnr)
4994 {
4995 FeatureWordInfo *fi = &feature_word_info[w];
4996 const char *name = fi->feat_names[bitnr];
4997
4998 if (!name) {
4999 return;
5000 }
5001
5002 /* Property names should use "-" instead of "_".
5003 * Old names containing underscores are registered as aliases
5004 * using object_property_add_alias()
5005 */
5006 assert(!strchr(name, '_'));
5007 /* aliases don't use "|" delimiters anymore, they are registered
5008 * manually using object_property_add_alias() */
5009 assert(!strchr(name, '|'));
5010 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5011 }
5012
5013 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5014 {
5015 X86CPU *cpu = X86_CPU(cs);
5016 CPUX86State *env = &cpu->env;
5017 GuestPanicInformation *panic_info = NULL;
5018
5019 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5020 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5021
5022 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5023
5024 assert(HV_CRASH_PARAMS >= 5);
5025 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5026 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5027 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5028 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5029 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5030 }
5031
5032 return panic_info;
5033 }
5034 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5035 const char *name, void *opaque,
5036 Error **errp)
5037 {
5038 CPUState *cs = CPU(obj);
5039 GuestPanicInformation *panic_info;
5040
5041 if (!cs->crash_occurred) {
5042 error_setg(errp, "No crash occured");
5043 return;
5044 }
5045
5046 panic_info = x86_cpu_get_crash_info(cs);
5047 if (panic_info == NULL) {
5048 error_setg(errp, "No crash information");
5049 return;
5050 }
5051
5052 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5053 errp);
5054 qapi_free_GuestPanicInformation(panic_info);
5055 }
5056
5057 static void x86_cpu_initfn(Object *obj)
5058 {
5059 CPUState *cs = CPU(obj);
5060 X86CPU *cpu = X86_CPU(obj);
5061 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5062 CPUX86State *env = &cpu->env;
5063 FeatureWord w;
5064
5065 cs->env_ptr = env;
5066
5067 object_property_add(obj, "family", "int",
5068 x86_cpuid_version_get_family,
5069 x86_cpuid_version_set_family, NULL, NULL, NULL);
5070 object_property_add(obj, "model", "int",
5071 x86_cpuid_version_get_model,
5072 x86_cpuid_version_set_model, NULL, NULL, NULL);
5073 object_property_add(obj, "stepping", "int",
5074 x86_cpuid_version_get_stepping,
5075 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5076 object_property_add_str(obj, "vendor",
5077 x86_cpuid_get_vendor,
5078 x86_cpuid_set_vendor, NULL);
5079 object_property_add_str(obj, "model-id",
5080 x86_cpuid_get_model_id,
5081 x86_cpuid_set_model_id, NULL);
5082 object_property_add(obj, "tsc-frequency", "int",
5083 x86_cpuid_get_tsc_freq,
5084 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5085 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5086 x86_cpu_get_feature_words,
5087 NULL, NULL, (void *)env->features, NULL);
5088 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5089 x86_cpu_get_feature_words,
5090 NULL, NULL, (void *)cpu->filtered_features, NULL);
5091
5092 object_property_add(obj, "crash-information", "GuestPanicInformation",
5093 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5094
5095 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5096
5097 for (w = 0; w < FEATURE_WORDS; w++) {
5098 int bitnr;
5099
5100 for (bitnr = 0; bitnr < 32; bitnr++) {
5101 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5102 }
5103 }
5104
5105 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5106 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5107 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5108 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5109 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5110 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5111 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5112
5113 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5114 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5115 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5116 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5117 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5118 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5119 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5120 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5121 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5122 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5123 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5124 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5125 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5126 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5127 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5128 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5129 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5130 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5131 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5132 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5133 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5134
5135 if (xcc->cpu_def) {
5136 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5137 }
5138 }
5139
5140 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5141 {
5142 X86CPU *cpu = X86_CPU(cs);
5143
5144 return cpu->apic_id;
5145 }
5146
5147 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5148 {
5149 X86CPU *cpu = X86_CPU(cs);
5150
5151 return cpu->env.cr[0] & CR0_PG_MASK;
5152 }
5153
5154 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5155 {
5156 X86CPU *cpu = X86_CPU(cs);
5157
5158 cpu->env.eip = value;
5159 }
5160
5161 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5162 {
5163 X86CPU *cpu = X86_CPU(cs);
5164
5165 cpu->env.eip = tb->pc - tb->cs_base;
5166 }
5167
5168 static bool x86_cpu_has_work(CPUState *cs)
5169 {
5170 X86CPU *cpu = X86_CPU(cs);
5171 CPUX86State *env = &cpu->env;
5172
5173 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5174 CPU_INTERRUPT_POLL)) &&
5175 (env->eflags & IF_MASK)) ||
5176 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5177 CPU_INTERRUPT_INIT |
5178 CPU_INTERRUPT_SIPI |
5179 CPU_INTERRUPT_MCE)) ||
5180 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5181 !(env->hflags & HF_SMM_MASK));
5182 }
5183
5184 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5185 {
5186 X86CPU *cpu = X86_CPU(cs);
5187 CPUX86State *env = &cpu->env;
5188
5189 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5190 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5191 : bfd_mach_i386_i8086);
5192 info->print_insn = print_insn_i386;
5193
5194 info->cap_arch = CS_ARCH_X86;
5195 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5196 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5197 : CS_MODE_16);
5198 info->cap_insn_unit = 1;
5199 info->cap_insn_split = 8;
5200 }
5201
5202 void x86_update_hflags(CPUX86State *env)
5203 {
5204 uint32_t hflags;
5205 #define HFLAG_COPY_MASK \
5206 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5207 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5208 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5209 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5210
5211 hflags = env->hflags & HFLAG_COPY_MASK;
5212 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5213 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5214 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5215 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5216 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5217
5218 if (env->cr[4] & CR4_OSFXSR_MASK) {
5219 hflags |= HF_OSFXSR_MASK;
5220 }
5221
5222 if (env->efer & MSR_EFER_LMA) {
5223 hflags |= HF_LMA_MASK;
5224 }
5225
5226 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5227 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5228 } else {
5229 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5230 (DESC_B_SHIFT - HF_CS32_SHIFT);
5231 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5232 (DESC_B_SHIFT - HF_SS32_SHIFT);
5233 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5234 !(hflags & HF_CS32_MASK)) {
5235 hflags |= HF_ADDSEG_MASK;
5236 } else {
5237 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5238 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5239 }
5240 }
5241 env->hflags = hflags;
5242 }
5243
5244 static Property x86_cpu_properties[] = {
5245 #ifdef CONFIG_USER_ONLY
5246 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5247 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5248 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5249 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5250 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5251 #else
5252 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5253 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5254 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5255 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5256 #endif
5257 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5258 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5259 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5260 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5261 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5262 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5263 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5264 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5265 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5266 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5267 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5268 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5269 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5270 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5271 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5272 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5273 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5274 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5275 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5276 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5277 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5278 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5279 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5280 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5281 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5282 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5283 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5284 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5285 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5286 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5287 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5288 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5289 false),
5290 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5291 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5292 /*
5293 * lecacy_cache defaults to true unless the CPU model provides its
5294 * own cache information (see x86_cpu_load_def()).
5295 */
5296 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5297
5298 /*
5299 * From "Requirements for Implementing the Microsoft
5300 * Hypervisor Interface":
5301 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5302 *
5303 * "Starting with Windows Server 2012 and Windows 8, if
5304 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5305 * the hypervisor imposes no specific limit to the number of VPs.
5306 * In this case, Windows Server 2012 guest VMs may use more than
5307 * 64 VPs, up to the maximum supported number of processors applicable
5308 * to the specific Windows version being used."
5309 */
5310 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5311 DEFINE_PROP_END_OF_LIST()
5312 };
5313
5314 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5315 {
5316 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5317 CPUClass *cc = CPU_CLASS(oc);
5318 DeviceClass *dc = DEVICE_CLASS(oc);
5319
5320 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5321 &xcc->parent_realize);
5322 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5323 &xcc->parent_unrealize);
5324 dc->props = x86_cpu_properties;
5325
5326 xcc->parent_reset = cc->reset;
5327 cc->reset = x86_cpu_reset;
5328 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5329
5330 cc->class_by_name = x86_cpu_class_by_name;
5331 cc->parse_features = x86_cpu_parse_featurestr;
5332 cc->has_work = x86_cpu_has_work;
5333 #ifdef CONFIG_TCG
5334 cc->do_interrupt = x86_cpu_do_interrupt;
5335 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5336 #endif
5337 cc->dump_state = x86_cpu_dump_state;
5338 cc->get_crash_info = x86_cpu_get_crash_info;
5339 cc->set_pc = x86_cpu_set_pc;
5340 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5341 cc->gdb_read_register = x86_cpu_gdb_read_register;
5342 cc->gdb_write_register = x86_cpu_gdb_write_register;
5343 cc->get_arch_id = x86_cpu_get_arch_id;
5344 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5345 #ifdef CONFIG_USER_ONLY
5346 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5347 #else
5348 cc->asidx_from_attrs = x86_asidx_from_attrs;
5349 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5350 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5351 cc->write_elf64_note = x86_cpu_write_elf64_note;
5352 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5353 cc->write_elf32_note = x86_cpu_write_elf32_note;
5354 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5355 cc->vmsd = &vmstate_x86_cpu;
5356 #endif
5357 cc->gdb_arch_name = x86_gdb_arch_name;
5358 #ifdef TARGET_X86_64
5359 cc->gdb_core_xml_file = "i386-64bit.xml";
5360 cc->gdb_num_core_regs = 57;
5361 #else
5362 cc->gdb_core_xml_file = "i386-32bit.xml";
5363 cc->gdb_num_core_regs = 41;
5364 #endif
5365 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5366 cc->debug_excp_handler = breakpoint_handler;
5367 #endif
5368 cc->cpu_exec_enter = x86_cpu_exec_enter;
5369 cc->cpu_exec_exit = x86_cpu_exec_exit;
5370 #ifdef CONFIG_TCG
5371 cc->tcg_initialize = tcg_x86_init;
5372 #endif
5373 cc->disas_set_info = x86_disas_set_info;
5374
5375 dc->user_creatable = true;
5376 }
5377
5378 static const TypeInfo x86_cpu_type_info = {
5379 .name = TYPE_X86_CPU,
5380 .parent = TYPE_CPU,
5381 .instance_size = sizeof(X86CPU),
5382 .instance_init = x86_cpu_initfn,
5383 .abstract = true,
5384 .class_size = sizeof(X86CPUClass),
5385 .class_init = x86_cpu_common_class_init,
5386 };
5387
5388
5389 /* "base" CPU model, used by query-cpu-model-expansion */
5390 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5391 {
5392 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5393
5394 xcc->static_model = true;
5395 xcc->migration_safe = true;
5396 xcc->model_description = "base CPU model type with no features enabled";
5397 xcc->ordering = 8;
5398 }
5399
5400 static const TypeInfo x86_base_cpu_type_info = {
5401 .name = X86_CPU_TYPE_NAME("base"),
5402 .parent = TYPE_X86_CPU,
5403 .class_init = x86_cpu_base_class_init,
5404 };
5405
5406 static void x86_cpu_register_types(void)
5407 {
5408 int i;
5409
5410 type_register_static(&x86_cpu_type_info);
5411 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5412 x86_register_cpudef_type(&builtin_x86_defs[i]);
5413 }
5414 type_register_static(&max_x86_cpu_type_info);
5415 type_register_static(&x86_base_cpu_type_info);
5416 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5417 type_register_static(&host_x86_cpu_type_info);
5418 #endif
5419 }
5420
5421 type_init(x86_cpu_register_types)