]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - target/i386/cpu.c
i386/kvm: hv-evmcs requires hv-vapic
[mirror_qemu.git] / target / i386 / cpu.c
... / ...
CommitLineData
1/*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "qemu/units.h"
22#include "qemu/cutils.h"
23#include "qemu/bitops.h"
24#include "qemu/qemu-print.h"
25
26#include "cpu.h"
27#include "exec/exec-all.h"
28#include "sysemu/kvm.h"
29#include "sysemu/hvf.h"
30#include "sysemu/cpus.h"
31#include "kvm_i386.h"
32#include "sev_i386.h"
33
34#include "qemu/error-report.h"
35#include "qemu/module.h"
36#include "qemu/option.h"
37#include "qemu/config-file.h"
38#include "qapi/error.h"
39#include "qapi/qapi-visit-misc.h"
40#include "qapi/qapi-visit-run-state.h"
41#include "qapi/qmp/qdict.h"
42#include "qapi/qmp/qerror.h"
43#include "qapi/visitor.h"
44#include "qom/qom-qobject.h"
45#include "sysemu/arch_init.h"
46#include "qapi/qapi-commands-target.h"
47
48#include "standard-headers/asm-x86/kvm_para.h"
49
50#include "sysemu/sysemu.h"
51#include "sysemu/tcg.h"
52#include "hw/qdev-properties.h"
53#include "hw/i386/topology.h"
54#ifndef CONFIG_USER_ONLY
55#include "exec/address-spaces.h"
56#include "hw/hw.h"
57#include "hw/xen/xen.h"
58#include "hw/i386/apic_internal.h"
59#endif
60
61#include "disas/capstone.h"
62
63/* Helpers for building CPUID[2] descriptors: */
64
65struct CPUID2CacheDescriptorInfo {
66 enum CacheType type;
67 int level;
68 int size;
69 int line_size;
70 int associativity;
71};
72
73/*
74 * Known CPUID 2 cache descriptors.
75 * From Intel SDM Volume 2A, CPUID instruction
76 */
77struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
78 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
79 .associativity = 4, .line_size = 32, },
80 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
81 .associativity = 4, .line_size = 32, },
82 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
83 .associativity = 4, .line_size = 64, },
84 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
85 .associativity = 2, .line_size = 32, },
86 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
87 .associativity = 4, .line_size = 32, },
88 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
89 .associativity = 4, .line_size = 64, },
90 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
91 .associativity = 6, .line_size = 64, },
92 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
93 .associativity = 2, .line_size = 64, },
94 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
95 .associativity = 8, .line_size = 64, },
96 /* lines per sector is not supported cpuid2_cache_descriptor(),
97 * so descriptors 0x22, 0x23 are not included
98 */
99 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
100 .associativity = 16, .line_size = 64, },
101 /* lines per sector is not supported cpuid2_cache_descriptor(),
102 * so descriptors 0x25, 0x20 are not included
103 */
104 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
105 .associativity = 8, .line_size = 64, },
106 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
107 .associativity = 8, .line_size = 64, },
108 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
109 .associativity = 4, .line_size = 32, },
110 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
111 .associativity = 4, .line_size = 32, },
112 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
113 .associativity = 4, .line_size = 32, },
114 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
115 .associativity = 4, .line_size = 32, },
116 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
117 .associativity = 4, .line_size = 32, },
118 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
119 .associativity = 4, .line_size = 64, },
120 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
121 .associativity = 8, .line_size = 64, },
122 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
123 .associativity = 12, .line_size = 64, },
124 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
125 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
126 .associativity = 12, .line_size = 64, },
127 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
128 .associativity = 16, .line_size = 64, },
129 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
130 .associativity = 12, .line_size = 64, },
131 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
132 .associativity = 16, .line_size = 64, },
133 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
134 .associativity = 24, .line_size = 64, },
135 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
136 .associativity = 8, .line_size = 64, },
137 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
138 .associativity = 4, .line_size = 64, },
139 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
140 .associativity = 4, .line_size = 64, },
141 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
142 .associativity = 4, .line_size = 64, },
143 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
144 .associativity = 4, .line_size = 64, },
145 /* lines per sector is not supported cpuid2_cache_descriptor(),
146 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
147 */
148 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
149 .associativity = 8, .line_size = 64, },
150 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
151 .associativity = 2, .line_size = 64, },
152 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
153 .associativity = 8, .line_size = 64, },
154 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
155 .associativity = 8, .line_size = 32, },
156 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
157 .associativity = 8, .line_size = 32, },
158 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
159 .associativity = 8, .line_size = 32, },
160 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
161 .associativity = 8, .line_size = 32, },
162 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
163 .associativity = 4, .line_size = 64, },
164 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
165 .associativity = 8, .line_size = 64, },
166 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
167 .associativity = 4, .line_size = 64, },
168 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
169 .associativity = 4, .line_size = 64, },
170 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
171 .associativity = 4, .line_size = 64, },
172 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
173 .associativity = 8, .line_size = 64, },
174 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
175 .associativity = 8, .line_size = 64, },
176 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
177 .associativity = 8, .line_size = 64, },
178 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
179 .associativity = 12, .line_size = 64, },
180 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
181 .associativity = 12, .line_size = 64, },
182 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
183 .associativity = 12, .line_size = 64, },
184 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
185 .associativity = 16, .line_size = 64, },
186 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
187 .associativity = 16, .line_size = 64, },
188 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
189 .associativity = 16, .line_size = 64, },
190 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
191 .associativity = 24, .line_size = 64, },
192 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
193 .associativity = 24, .line_size = 64, },
194 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
195 .associativity = 24, .line_size = 64, },
196};
197
198/*
199 * "CPUID leaf 2 does not report cache descriptor information,
200 * use CPUID leaf 4 to query cache parameters"
201 */
202#define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
203
204/*
205 * Return a CPUID 2 cache descriptor for a given cache.
206 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
207 */
208static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
209{
210 int i;
211
212 assert(cache->size > 0);
213 assert(cache->level > 0);
214 assert(cache->line_size > 0);
215 assert(cache->associativity > 0);
216 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
217 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
218 if (d->level == cache->level && d->type == cache->type &&
219 d->size == cache->size && d->line_size == cache->line_size &&
220 d->associativity == cache->associativity) {
221 return i;
222 }
223 }
224
225 return CACHE_DESCRIPTOR_UNAVAILABLE;
226}
227
228/* CPUID Leaf 4 constants: */
229
230/* EAX: */
231#define CACHE_TYPE_D 1
232#define CACHE_TYPE_I 2
233#define CACHE_TYPE_UNIFIED 3
234
235#define CACHE_LEVEL(l) (l << 5)
236
237#define CACHE_SELF_INIT_LEVEL (1 << 8)
238
239/* EDX: */
240#define CACHE_NO_INVD_SHARING (1 << 0)
241#define CACHE_INCLUSIVE (1 << 1)
242#define CACHE_COMPLEX_IDX (1 << 2)
243
244/* Encode CacheType for CPUID[4].EAX */
245#define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
246 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
247 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
248 0 /* Invalid value */)
249
250
251/* Encode cache info for CPUID[4] */
252static void encode_cache_cpuid4(CPUCacheInfo *cache,
253 int num_apic_ids, int num_cores,
254 uint32_t *eax, uint32_t *ebx,
255 uint32_t *ecx, uint32_t *edx)
256{
257 assert(cache->size == cache->line_size * cache->associativity *
258 cache->partitions * cache->sets);
259
260 assert(num_apic_ids > 0);
261 *eax = CACHE_TYPE(cache->type) |
262 CACHE_LEVEL(cache->level) |
263 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
264 ((num_cores - 1) << 26) |
265 ((num_apic_ids - 1) << 14);
266
267 assert(cache->line_size > 0);
268 assert(cache->partitions > 0);
269 assert(cache->associativity > 0);
270 /* We don't implement fully-associative caches */
271 assert(cache->associativity < cache->sets);
272 *ebx = (cache->line_size - 1) |
273 ((cache->partitions - 1) << 12) |
274 ((cache->associativity - 1) << 22);
275
276 assert(cache->sets > 0);
277 *ecx = cache->sets - 1;
278
279 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
280 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
281 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
282}
283
284/* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
285static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
286{
287 assert(cache->size % 1024 == 0);
288 assert(cache->lines_per_tag > 0);
289 assert(cache->associativity > 0);
290 assert(cache->line_size > 0);
291 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
292 (cache->lines_per_tag << 8) | (cache->line_size);
293}
294
295#define ASSOC_FULL 0xFF
296
297/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
298#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
299 a == 2 ? 0x2 : \
300 a == 4 ? 0x4 : \
301 a == 8 ? 0x6 : \
302 a == 16 ? 0x8 : \
303 a == 32 ? 0xA : \
304 a == 48 ? 0xB : \
305 a == 64 ? 0xC : \
306 a == 96 ? 0xD : \
307 a == 128 ? 0xE : \
308 a == ASSOC_FULL ? 0xF : \
309 0 /* invalid value */)
310
311/*
312 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
313 * @l3 can be NULL.
314 */
315static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
316 CPUCacheInfo *l3,
317 uint32_t *ecx, uint32_t *edx)
318{
319 assert(l2->size % 1024 == 0);
320 assert(l2->associativity > 0);
321 assert(l2->lines_per_tag > 0);
322 assert(l2->line_size > 0);
323 *ecx = ((l2->size / 1024) << 16) |
324 (AMD_ENC_ASSOC(l2->associativity) << 12) |
325 (l2->lines_per_tag << 8) | (l2->line_size);
326
327 if (l3) {
328 assert(l3->size % (512 * 1024) == 0);
329 assert(l3->associativity > 0);
330 assert(l3->lines_per_tag > 0);
331 assert(l3->line_size > 0);
332 *edx = ((l3->size / (512 * 1024)) << 18) |
333 (AMD_ENC_ASSOC(l3->associativity) << 12) |
334 (l3->lines_per_tag << 8) | (l3->line_size);
335 } else {
336 *edx = 0;
337 }
338}
339
340/*
341 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
342 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
343 * Define the constants to build the cpu topology. Right now, TOPOEXT
344 * feature is enabled only on EPYC. So, these constants are based on
345 * EPYC supported configurations. We may need to handle the cases if
346 * these values change in future.
347 */
348/* Maximum core complexes in a node */
349#define MAX_CCX 2
350/* Maximum cores in a core complex */
351#define MAX_CORES_IN_CCX 4
352/* Maximum cores in a node */
353#define MAX_CORES_IN_NODE 8
354/* Maximum nodes in a socket */
355#define MAX_NODES_PER_SOCKET 4
356
357/*
358 * Figure out the number of nodes required to build this config.
359 * Max cores in a node is 8
360 */
361static int nodes_in_socket(int nr_cores)
362{
363 int nodes;
364
365 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
366
367 /* Hardware does not support config with 3 nodes, return 4 in that case */
368 return (nodes == 3) ? 4 : nodes;
369}
370
371/*
372 * Decide the number of cores in a core complex with the given nr_cores using
373 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
374 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
375 * L3 cache is shared across all cores in a core complex. So, this will also
376 * tell us how many cores are sharing the L3 cache.
377 */
378static int cores_in_core_complex(int nr_cores)
379{
380 int nodes;
381
382 /* Check if we can fit all the cores in one core complex */
383 if (nr_cores <= MAX_CORES_IN_CCX) {
384 return nr_cores;
385 }
386 /* Get the number of nodes required to build this config */
387 nodes = nodes_in_socket(nr_cores);
388
389 /*
390 * Divide the cores accros all the core complexes
391 * Return rounded up value
392 */
393 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
394}
395
396/* Encode cache info for CPUID[8000001D] */
397static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
398 uint32_t *eax, uint32_t *ebx,
399 uint32_t *ecx, uint32_t *edx)
400{
401 uint32_t l3_cores;
402 assert(cache->size == cache->line_size * cache->associativity *
403 cache->partitions * cache->sets);
404
405 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
406 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
407
408 /* L3 is shared among multiple cores */
409 if (cache->level == 3) {
410 l3_cores = cores_in_core_complex(cs->nr_cores);
411 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
412 } else {
413 *eax |= ((cs->nr_threads - 1) << 14);
414 }
415
416 assert(cache->line_size > 0);
417 assert(cache->partitions > 0);
418 assert(cache->associativity > 0);
419 /* We don't implement fully-associative caches */
420 assert(cache->associativity < cache->sets);
421 *ebx = (cache->line_size - 1) |
422 ((cache->partitions - 1) << 12) |
423 ((cache->associativity - 1) << 22);
424
425 assert(cache->sets > 0);
426 *ecx = cache->sets - 1;
427
428 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
429 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
430 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
431}
432
433/* Data structure to hold the configuration info for a given core index */
434struct core_topology {
435 /* core complex id of the current core index */
436 int ccx_id;
437 /*
438 * Adjusted core index for this core in the topology
439 * This can be 0,1,2,3 with max 4 cores in a core complex
440 */
441 int core_id;
442 /* Node id for this core index */
443 int node_id;
444 /* Number of nodes in this config */
445 int num_nodes;
446};
447
448/*
449 * Build the configuration closely match the EPYC hardware. Using the EPYC
450 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
451 * right now. This could change in future.
452 * nr_cores : Total number of cores in the config
453 * core_id : Core index of the current CPU
454 * topo : Data structure to hold all the config info for this core index
455 */
456static void build_core_topology(int nr_cores, int core_id,
457 struct core_topology *topo)
458{
459 int nodes, cores_in_ccx;
460
461 /* First get the number of nodes required */
462 nodes = nodes_in_socket(nr_cores);
463
464 cores_in_ccx = cores_in_core_complex(nr_cores);
465
466 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
467 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
468 topo->core_id = core_id % cores_in_ccx;
469 topo->num_nodes = nodes;
470}
471
472/* Encode cache info for CPUID[8000001E] */
473static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
474 uint32_t *eax, uint32_t *ebx,
475 uint32_t *ecx, uint32_t *edx)
476{
477 struct core_topology topo = {0};
478 unsigned long nodes;
479 int shift;
480
481 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
482 *eax = cpu->apic_id;
483 /*
484 * CPUID_Fn8000001E_EBX
485 * 31:16 Reserved
486 * 15:8 Threads per core (The number of threads per core is
487 * Threads per core + 1)
488 * 7:0 Core id (see bit decoding below)
489 * SMT:
490 * 4:3 node id
491 * 2 Core complex id
492 * 1:0 Core id
493 * Non SMT:
494 * 5:4 node id
495 * 3 Core complex id
496 * 1:0 Core id
497 */
498 if (cs->nr_threads - 1) {
499 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
500 (topo.ccx_id << 2) | topo.core_id;
501 } else {
502 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
503 }
504 /*
505 * CPUID_Fn8000001E_ECX
506 * 31:11 Reserved
507 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
508 * 7:0 Node id (see bit decoding below)
509 * 2 Socket id
510 * 1:0 Node id
511 */
512 if (topo.num_nodes <= 4) {
513 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
514 topo.node_id;
515 } else {
516 /*
517 * Node id fix up. Actual hardware supports up to 4 nodes. But with
518 * more than 32 cores, we may end up with more than 4 nodes.
519 * Node id is a combination of socket id and node id. Only requirement
520 * here is that this number should be unique accross the system.
521 * Shift the socket id to accommodate more nodes. We dont expect both
522 * socket id and node id to be big number at the same time. This is not
523 * an ideal config but we need to to support it. Max nodes we can have
524 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
525 * 5 bits for nodes. Find the left most set bit to represent the total
526 * number of nodes. find_last_bit returns last set bit(0 based). Left
527 * shift(+1) the socket id to represent all the nodes.
528 */
529 nodes = topo.num_nodes - 1;
530 shift = find_last_bit(&nodes, 8);
531 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
532 topo.node_id;
533 }
534 *edx = 0;
535}
536
537/*
538 * Definitions of the hardcoded cache entries we expose:
539 * These are legacy cache values. If there is a need to change any
540 * of these values please use builtin_x86_defs
541 */
542
543/* L1 data cache: */
544static CPUCacheInfo legacy_l1d_cache = {
545 .type = DATA_CACHE,
546 .level = 1,
547 .size = 32 * KiB,
548 .self_init = 1,
549 .line_size = 64,
550 .associativity = 8,
551 .sets = 64,
552 .partitions = 1,
553 .no_invd_sharing = true,
554};
555
556/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
557static CPUCacheInfo legacy_l1d_cache_amd = {
558 .type = DATA_CACHE,
559 .level = 1,
560 .size = 64 * KiB,
561 .self_init = 1,
562 .line_size = 64,
563 .associativity = 2,
564 .sets = 512,
565 .partitions = 1,
566 .lines_per_tag = 1,
567 .no_invd_sharing = true,
568};
569
570/* L1 instruction cache: */
571static CPUCacheInfo legacy_l1i_cache = {
572 .type = INSTRUCTION_CACHE,
573 .level = 1,
574 .size = 32 * KiB,
575 .self_init = 1,
576 .line_size = 64,
577 .associativity = 8,
578 .sets = 64,
579 .partitions = 1,
580 .no_invd_sharing = true,
581};
582
583/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
584static CPUCacheInfo legacy_l1i_cache_amd = {
585 .type = INSTRUCTION_CACHE,
586 .level = 1,
587 .size = 64 * KiB,
588 .self_init = 1,
589 .line_size = 64,
590 .associativity = 2,
591 .sets = 512,
592 .partitions = 1,
593 .lines_per_tag = 1,
594 .no_invd_sharing = true,
595};
596
597/* Level 2 unified cache: */
598static CPUCacheInfo legacy_l2_cache = {
599 .type = UNIFIED_CACHE,
600 .level = 2,
601 .size = 4 * MiB,
602 .self_init = 1,
603 .line_size = 64,
604 .associativity = 16,
605 .sets = 4096,
606 .partitions = 1,
607 .no_invd_sharing = true,
608};
609
610/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
611static CPUCacheInfo legacy_l2_cache_cpuid2 = {
612 .type = UNIFIED_CACHE,
613 .level = 2,
614 .size = 2 * MiB,
615 .line_size = 64,
616 .associativity = 8,
617};
618
619
620/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
621static CPUCacheInfo legacy_l2_cache_amd = {
622 .type = UNIFIED_CACHE,
623 .level = 2,
624 .size = 512 * KiB,
625 .line_size = 64,
626 .lines_per_tag = 1,
627 .associativity = 16,
628 .sets = 512,
629 .partitions = 1,
630};
631
632/* Level 3 unified cache: */
633static CPUCacheInfo legacy_l3_cache = {
634 .type = UNIFIED_CACHE,
635 .level = 3,
636 .size = 16 * MiB,
637 .line_size = 64,
638 .associativity = 16,
639 .sets = 16384,
640 .partitions = 1,
641 .lines_per_tag = 1,
642 .self_init = true,
643 .inclusive = true,
644 .complex_indexing = true,
645};
646
647/* TLB definitions: */
648
649#define L1_DTLB_2M_ASSOC 1
650#define L1_DTLB_2M_ENTRIES 255
651#define L1_DTLB_4K_ASSOC 1
652#define L1_DTLB_4K_ENTRIES 255
653
654#define L1_ITLB_2M_ASSOC 1
655#define L1_ITLB_2M_ENTRIES 255
656#define L1_ITLB_4K_ASSOC 1
657#define L1_ITLB_4K_ENTRIES 255
658
659#define L2_DTLB_2M_ASSOC 0 /* disabled */
660#define L2_DTLB_2M_ENTRIES 0 /* disabled */
661#define L2_DTLB_4K_ASSOC 4
662#define L2_DTLB_4K_ENTRIES 512
663
664#define L2_ITLB_2M_ASSOC 0 /* disabled */
665#define L2_ITLB_2M_ENTRIES 0 /* disabled */
666#define L2_ITLB_4K_ASSOC 4
667#define L2_ITLB_4K_ENTRIES 512
668
669/* CPUID Leaf 0x14 constants: */
670#define INTEL_PT_MAX_SUBLEAF 0x1
671/*
672 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
673 * MSR can be accessed;
674 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
675 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
676 * of Intel PT MSRs across warm reset;
677 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
678 */
679#define INTEL_PT_MINIMAL_EBX 0xf
680/*
681 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
682 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
683 * accessed;
684 * bit[01]: ToPA tables can hold any number of output entries, up to the
685 * maximum allowed by the MaskOrTableOffset field of
686 * IA32_RTIT_OUTPUT_MASK_PTRS;
687 * bit[02]: Support Single-Range Output scheme;
688 */
689#define INTEL_PT_MINIMAL_ECX 0x7
690/* generated packets which contain IP payloads have LIP values */
691#define INTEL_PT_IP_LIP (1 << 31)
692#define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
693#define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
694#define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
695#define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
696#define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
697
698static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
699 uint32_t vendor2, uint32_t vendor3)
700{
701 int i;
702 for (i = 0; i < 4; i++) {
703 dst[i] = vendor1 >> (8 * i);
704 dst[i + 4] = vendor2 >> (8 * i);
705 dst[i + 8] = vendor3 >> (8 * i);
706 }
707 dst[CPUID_VENDOR_SZ] = '\0';
708}
709
710#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
711#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
712 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
713#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
714 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
715 CPUID_PSE36 | CPUID_FXSR)
716#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
717#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
718 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
719 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
720 CPUID_PAE | CPUID_SEP | CPUID_APIC)
721
722#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
723 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
724 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
725 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
726 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
727 /* partly implemented:
728 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
729 /* missing:
730 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
731#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
732 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
733 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
734 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
735 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
736 CPUID_EXT_RDRAND)
737 /* missing:
738 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
739 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
740 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
741 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
742 CPUID_EXT_F16C */
743
744#ifdef TARGET_X86_64
745#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
746#else
747#define TCG_EXT2_X86_64_FEATURES 0
748#endif
749
750#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
751 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
752 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
753 TCG_EXT2_X86_64_FEATURES)
754#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
755 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
756#define TCG_EXT4_FEATURES 0
757#define TCG_SVM_FEATURES CPUID_SVM_NPT
758#define TCG_KVM_FEATURES 0
759#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
760 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
761 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
762 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
763 CPUID_7_0_EBX_ERMS)
764 /* missing:
765 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
766 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
767 CPUID_7_0_EBX_RDSEED */
768#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
769 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
770 CPUID_7_0_ECX_LA57)
771#define TCG_7_0_EDX_FEATURES 0
772#define TCG_APM_FEATURES 0
773#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
774#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
775 /* missing:
776 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
777
778typedef enum FeatureWordType {
779 CPUID_FEATURE_WORD,
780 MSR_FEATURE_WORD,
781} FeatureWordType;
782
783typedef struct FeatureWordInfo {
784 FeatureWordType type;
785 /* feature flags names are taken from "Intel Processor Identification and
786 * the CPUID Instruction" and AMD's "CPUID Specification".
787 * In cases of disagreement between feature naming conventions,
788 * aliases may be added.
789 */
790 const char *feat_names[32];
791 union {
792 /* If type==CPUID_FEATURE_WORD */
793 struct {
794 uint32_t eax; /* Input EAX for CPUID */
795 bool needs_ecx; /* CPUID instruction uses ECX as input */
796 uint32_t ecx; /* Input ECX value for CPUID */
797 int reg; /* output register (R_* constant) */
798 } cpuid;
799 /* If type==MSR_FEATURE_WORD */
800 struct {
801 uint32_t index;
802 struct { /*CPUID that enumerate this MSR*/
803 FeatureWord cpuid_class;
804 uint32_t cpuid_flag;
805 } cpuid_dep;
806 } msr;
807 };
808 uint32_t tcg_features; /* Feature flags supported by TCG */
809 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
810 uint32_t migratable_flags; /* Feature flags known to be migratable */
811 /* Features that shouldn't be auto-enabled by "-cpu host" */
812 uint32_t no_autoenable_flags;
813} FeatureWordInfo;
814
815static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
816 [FEAT_1_EDX] = {
817 .type = CPUID_FEATURE_WORD,
818 .feat_names = {
819 "fpu", "vme", "de", "pse",
820 "tsc", "msr", "pae", "mce",
821 "cx8", "apic", NULL, "sep",
822 "mtrr", "pge", "mca", "cmov",
823 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
824 NULL, "ds" /* Intel dts */, "acpi", "mmx",
825 "fxsr", "sse", "sse2", "ss",
826 "ht" /* Intel htt */, "tm", "ia64", "pbe",
827 },
828 .cpuid = {.eax = 1, .reg = R_EDX, },
829 .tcg_features = TCG_FEATURES,
830 },
831 [FEAT_1_ECX] = {
832 .type = CPUID_FEATURE_WORD,
833 .feat_names = {
834 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
835 "ds-cpl", "vmx", "smx", "est",
836 "tm2", "ssse3", "cid", NULL,
837 "fma", "cx16", "xtpr", "pdcm",
838 NULL, "pcid", "dca", "sse4.1",
839 "sse4.2", "x2apic", "movbe", "popcnt",
840 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
841 "avx", "f16c", "rdrand", "hypervisor",
842 },
843 .cpuid = { .eax = 1, .reg = R_ECX, },
844 .tcg_features = TCG_EXT_FEATURES,
845 },
846 /* Feature names that are already defined on feature_name[] but
847 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
848 * names on feat_names below. They are copied automatically
849 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
850 */
851 [FEAT_8000_0001_EDX] = {
852 .type = CPUID_FEATURE_WORD,
853 .feat_names = {
854 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
855 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
856 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
857 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
858 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
859 "nx", NULL, "mmxext", NULL /* mmx */,
860 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
861 NULL, "lm", "3dnowext", "3dnow",
862 },
863 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
864 .tcg_features = TCG_EXT2_FEATURES,
865 },
866 [FEAT_8000_0001_ECX] = {
867 .type = CPUID_FEATURE_WORD,
868 .feat_names = {
869 "lahf-lm", "cmp-legacy", "svm", "extapic",
870 "cr8legacy", "abm", "sse4a", "misalignsse",
871 "3dnowprefetch", "osvw", "ibs", "xop",
872 "skinit", "wdt", NULL, "lwp",
873 "fma4", "tce", NULL, "nodeid-msr",
874 NULL, "tbm", "topoext", "perfctr-core",
875 "perfctr-nb", NULL, NULL, NULL,
876 NULL, NULL, NULL, NULL,
877 },
878 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
879 .tcg_features = TCG_EXT3_FEATURES,
880 /*
881 * TOPOEXT is always allowed but can't be enabled blindly by
882 * "-cpu host", as it requires consistent cache topology info
883 * to be provided so it doesn't confuse guests.
884 */
885 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
886 },
887 [FEAT_C000_0001_EDX] = {
888 .type = CPUID_FEATURE_WORD,
889 .feat_names = {
890 NULL, NULL, "xstore", "xstore-en",
891 NULL, NULL, "xcrypt", "xcrypt-en",
892 "ace2", "ace2-en", "phe", "phe-en",
893 "pmm", "pmm-en", NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL,
898 },
899 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
900 .tcg_features = TCG_EXT4_FEATURES,
901 },
902 [FEAT_KVM] = {
903 .type = CPUID_FEATURE_WORD,
904 .feat_names = {
905 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
906 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
907 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
908 NULL, NULL, NULL, NULL,
909 NULL, NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 "kvmclock-stable-bit", NULL, NULL, NULL,
912 NULL, NULL, NULL, NULL,
913 },
914 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
915 .tcg_features = TCG_KVM_FEATURES,
916 },
917 [FEAT_KVM_HINTS] = {
918 .type = CPUID_FEATURE_WORD,
919 .feat_names = {
920 "kvm-hint-dedicated", NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 },
929 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
930 .tcg_features = TCG_KVM_FEATURES,
931 /*
932 * KVM hints aren't auto-enabled by -cpu host, they need to be
933 * explicitly enabled in the command-line.
934 */
935 .no_autoenable_flags = ~0U,
936 },
937 /*
938 * .feat_names are commented out for Hyper-V enlightenments because we
939 * don't want to have two different ways for enabling them on QEMU command
940 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
941 * enabling several feature bits simultaneously, exposing these bits
942 * individually may just confuse guests.
943 */
944 [FEAT_HYPERV_EAX] = {
945 .type = CPUID_FEATURE_WORD,
946 .feat_names = {
947 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
948 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
949 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
950 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
951 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
952 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
953 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
954 NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 },
960 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
961 },
962 [FEAT_HYPERV_EBX] = {
963 .type = CPUID_FEATURE_WORD,
964 .feat_names = {
965 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
966 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
967 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
968 NULL /* hv_create_port */, NULL /* hv_connect_port */,
969 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
970 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
971 NULL, NULL,
972 NULL, NULL, NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 NULL, NULL, NULL, NULL,
975 NULL, NULL, NULL, NULL,
976 },
977 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
978 },
979 [FEAT_HYPERV_EDX] = {
980 .type = CPUID_FEATURE_WORD,
981 .feat_names = {
982 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
983 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
984 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
985 NULL, NULL,
986 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL, NULL,
991 NULL, NULL, NULL, NULL,
992 },
993 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
994 },
995 [FEAT_HV_RECOMM_EAX] = {
996 .type = CPUID_FEATURE_WORD,
997 .feat_names = {
998 NULL /* hv_recommend_pv_as_switch */,
999 NULL /* hv_recommend_pv_tlbflush_local */,
1000 NULL /* hv_recommend_pv_tlbflush_remote */,
1001 NULL /* hv_recommend_msr_apic_access */,
1002 NULL /* hv_recommend_msr_reset */,
1003 NULL /* hv_recommend_relaxed_timing */,
1004 NULL /* hv_recommend_dma_remapping */,
1005 NULL /* hv_recommend_int_remapping */,
1006 NULL /* hv_recommend_x2apic_msrs */,
1007 NULL /* hv_recommend_autoeoi_deprecation */,
1008 NULL /* hv_recommend_pv_ipi */,
1009 NULL /* hv_recommend_ex_hypercalls */,
1010 NULL /* hv_hypervisor_is_nested */,
1011 NULL /* hv_recommend_int_mbec */,
1012 NULL /* hv_recommend_evmcs */,
1013 NULL,
1014 NULL, NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 },
1019 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1020 },
1021 [FEAT_HV_NESTED_EAX] = {
1022 .type = CPUID_FEATURE_WORD,
1023 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1024 },
1025 [FEAT_SVM] = {
1026 .type = CPUID_FEATURE_WORD,
1027 .feat_names = {
1028 "npt", "lbrv", "svm-lock", "nrip-save",
1029 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1030 NULL, NULL, "pause-filter", NULL,
1031 "pfthreshold", NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 NULL, NULL, NULL, NULL,
1036 },
1037 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1038 .tcg_features = TCG_SVM_FEATURES,
1039 },
1040 [FEAT_7_0_EBX] = {
1041 .type = CPUID_FEATURE_WORD,
1042 .feat_names = {
1043 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1044 "hle", "avx2", NULL, "smep",
1045 "bmi2", "erms", "invpcid", "rtm",
1046 NULL, NULL, "mpx", NULL,
1047 "avx512f", "avx512dq", "rdseed", "adx",
1048 "smap", "avx512ifma", "pcommit", "clflushopt",
1049 "clwb", "intel-pt", "avx512pf", "avx512er",
1050 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1051 },
1052 .cpuid = {
1053 .eax = 7,
1054 .needs_ecx = true, .ecx = 0,
1055 .reg = R_EBX,
1056 },
1057 .tcg_features = TCG_7_0_EBX_FEATURES,
1058 },
1059 [FEAT_7_0_ECX] = {
1060 .type = CPUID_FEATURE_WORD,
1061 .feat_names = {
1062 NULL, "avx512vbmi", "umip", "pku",
1063 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1064 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1065 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1066 "la57", NULL, NULL, NULL,
1067 NULL, NULL, "rdpid", NULL,
1068 NULL, "cldemote", NULL, "movdiri",
1069 "movdir64b", NULL, NULL, NULL,
1070 },
1071 .cpuid = {
1072 .eax = 7,
1073 .needs_ecx = true, .ecx = 0,
1074 .reg = R_ECX,
1075 },
1076 .tcg_features = TCG_7_0_ECX_FEATURES,
1077 },
1078 [FEAT_7_0_EDX] = {
1079 .type = CPUID_FEATURE_WORD,
1080 .feat_names = {
1081 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1082 NULL, NULL, NULL, NULL,
1083 NULL, NULL, "md-clear", NULL,
1084 NULL, NULL, NULL, NULL,
1085 NULL, NULL, NULL, NULL,
1086 NULL, NULL, NULL, NULL,
1087 NULL, NULL, "spec-ctrl", "stibp",
1088 NULL, "arch-capabilities", NULL, "ssbd",
1089 },
1090 .cpuid = {
1091 .eax = 7,
1092 .needs_ecx = true, .ecx = 0,
1093 .reg = R_EDX,
1094 },
1095 .tcg_features = TCG_7_0_EDX_FEATURES,
1096 },
1097 [FEAT_8000_0007_EDX] = {
1098 .type = CPUID_FEATURE_WORD,
1099 .feat_names = {
1100 NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL,
1102 "invtsc", NULL, NULL, NULL,
1103 NULL, NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL,
1108 },
1109 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1110 .tcg_features = TCG_APM_FEATURES,
1111 .unmigratable_flags = CPUID_APM_INVTSC,
1112 },
1113 [FEAT_8000_0008_EBX] = {
1114 .type = CPUID_FEATURE_WORD,
1115 .feat_names = {
1116 NULL, NULL, NULL, NULL,
1117 NULL, NULL, NULL, NULL,
1118 NULL, "wbnoinvd", NULL, NULL,
1119 "ibpb", NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1123 NULL, NULL, NULL, NULL,
1124 },
1125 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1126 .tcg_features = 0,
1127 .unmigratable_flags = 0,
1128 },
1129 [FEAT_XSAVE] = {
1130 .type = CPUID_FEATURE_WORD,
1131 .feat_names = {
1132 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1133 NULL, NULL, NULL, NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 NULL, NULL, NULL, NULL,
1140 },
1141 .cpuid = {
1142 .eax = 0xd,
1143 .needs_ecx = true, .ecx = 1,
1144 .reg = R_EAX,
1145 },
1146 .tcg_features = TCG_XSAVE_FEATURES,
1147 },
1148 [FEAT_6_EAX] = {
1149 .type = CPUID_FEATURE_WORD,
1150 .feat_names = {
1151 NULL, NULL, "arat", NULL,
1152 NULL, NULL, NULL, NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 NULL, NULL, NULL, NULL,
1158 NULL, NULL, NULL, NULL,
1159 },
1160 .cpuid = { .eax = 6, .reg = R_EAX, },
1161 .tcg_features = TCG_6_EAX_FEATURES,
1162 },
1163 [FEAT_XSAVE_COMP_LO] = {
1164 .type = CPUID_FEATURE_WORD,
1165 .cpuid = {
1166 .eax = 0xD,
1167 .needs_ecx = true, .ecx = 0,
1168 .reg = R_EAX,
1169 },
1170 .tcg_features = ~0U,
1171 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1172 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1173 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1174 XSTATE_PKRU_MASK,
1175 },
1176 [FEAT_XSAVE_COMP_HI] = {
1177 .type = CPUID_FEATURE_WORD,
1178 .cpuid = {
1179 .eax = 0xD,
1180 .needs_ecx = true, .ecx = 0,
1181 .reg = R_EDX,
1182 },
1183 .tcg_features = ~0U,
1184 },
1185 /*Below are MSR exposed features*/
1186 [FEAT_ARCH_CAPABILITIES] = {
1187 .type = MSR_FEATURE_WORD,
1188 .feat_names = {
1189 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1190 "ssb-no", "mds-no", NULL, NULL,
1191 NULL, NULL, NULL, NULL,
1192 NULL, NULL, NULL, NULL,
1193 NULL, NULL, NULL, NULL,
1194 NULL, NULL, NULL, NULL,
1195 NULL, NULL, NULL, NULL,
1196 NULL, NULL, NULL, NULL,
1197 },
1198 .msr = {
1199 .index = MSR_IA32_ARCH_CAPABILITIES,
1200 .cpuid_dep = {
1201 FEAT_7_0_EDX,
1202 CPUID_7_0_EDX_ARCH_CAPABILITIES
1203 }
1204 },
1205 },
1206};
1207
1208typedef struct X86RegisterInfo32 {
1209 /* Name of register */
1210 const char *name;
1211 /* QAPI enum value register */
1212 X86CPURegister32 qapi_enum;
1213} X86RegisterInfo32;
1214
1215#define REGISTER(reg) \
1216 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1217static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1218 REGISTER(EAX),
1219 REGISTER(ECX),
1220 REGISTER(EDX),
1221 REGISTER(EBX),
1222 REGISTER(ESP),
1223 REGISTER(EBP),
1224 REGISTER(ESI),
1225 REGISTER(EDI),
1226};
1227#undef REGISTER
1228
1229typedef struct ExtSaveArea {
1230 uint32_t feature, bits;
1231 uint32_t offset, size;
1232} ExtSaveArea;
1233
1234static const ExtSaveArea x86_ext_save_areas[] = {
1235 [XSTATE_FP_BIT] = {
1236 /* x87 FP state component is always enabled if XSAVE is supported */
1237 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1238 /* x87 state is in the legacy region of the XSAVE area */
1239 .offset = 0,
1240 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1241 },
1242 [XSTATE_SSE_BIT] = {
1243 /* SSE state component is always enabled if XSAVE is supported */
1244 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1245 /* SSE state is in the legacy region of the XSAVE area */
1246 .offset = 0,
1247 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1248 },
1249 [XSTATE_YMM_BIT] =
1250 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1251 .offset = offsetof(X86XSaveArea, avx_state),
1252 .size = sizeof(XSaveAVX) },
1253 [XSTATE_BNDREGS_BIT] =
1254 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1255 .offset = offsetof(X86XSaveArea, bndreg_state),
1256 .size = sizeof(XSaveBNDREG) },
1257 [XSTATE_BNDCSR_BIT] =
1258 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1259 .offset = offsetof(X86XSaveArea, bndcsr_state),
1260 .size = sizeof(XSaveBNDCSR) },
1261 [XSTATE_OPMASK_BIT] =
1262 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1263 .offset = offsetof(X86XSaveArea, opmask_state),
1264 .size = sizeof(XSaveOpmask) },
1265 [XSTATE_ZMM_Hi256_BIT] =
1266 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1267 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1268 .size = sizeof(XSaveZMM_Hi256) },
1269 [XSTATE_Hi16_ZMM_BIT] =
1270 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1271 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1272 .size = sizeof(XSaveHi16_ZMM) },
1273 [XSTATE_PKRU_BIT] =
1274 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1275 .offset = offsetof(X86XSaveArea, pkru_state),
1276 .size = sizeof(XSavePKRU) },
1277};
1278
1279static uint32_t xsave_area_size(uint64_t mask)
1280{
1281 int i;
1282 uint64_t ret = 0;
1283
1284 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1285 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1286 if ((mask >> i) & 1) {
1287 ret = MAX(ret, esa->offset + esa->size);
1288 }
1289 }
1290 return ret;
1291}
1292
1293static inline bool accel_uses_host_cpuid(void)
1294{
1295 return kvm_enabled() || hvf_enabled();
1296}
1297
1298static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1299{
1300 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1301 cpu->env.features[FEAT_XSAVE_COMP_LO];
1302}
1303
1304const char *get_register_name_32(unsigned int reg)
1305{
1306 if (reg >= CPU_NB_REGS32) {
1307 return NULL;
1308 }
1309 return x86_reg_info_32[reg].name;
1310}
1311
1312/*
1313 * Returns the set of feature flags that are supported and migratable by
1314 * QEMU, for a given FeatureWord.
1315 */
1316static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1317{
1318 FeatureWordInfo *wi = &feature_word_info[w];
1319 uint32_t r = 0;
1320 int i;
1321
1322 for (i = 0; i < 32; i++) {
1323 uint32_t f = 1U << i;
1324
1325 /* If the feature name is known, it is implicitly considered migratable,
1326 * unless it is explicitly set in unmigratable_flags */
1327 if ((wi->migratable_flags & f) ||
1328 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1329 r |= f;
1330 }
1331 }
1332 return r;
1333}
1334
1335void host_cpuid(uint32_t function, uint32_t count,
1336 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1337{
1338 uint32_t vec[4];
1339
1340#ifdef __x86_64__
1341 asm volatile("cpuid"
1342 : "=a"(vec[0]), "=b"(vec[1]),
1343 "=c"(vec[2]), "=d"(vec[3])
1344 : "0"(function), "c"(count) : "cc");
1345#elif defined(__i386__)
1346 asm volatile("pusha \n\t"
1347 "cpuid \n\t"
1348 "mov %%eax, 0(%2) \n\t"
1349 "mov %%ebx, 4(%2) \n\t"
1350 "mov %%ecx, 8(%2) \n\t"
1351 "mov %%edx, 12(%2) \n\t"
1352 "popa"
1353 : : "a"(function), "c"(count), "S"(vec)
1354 : "memory", "cc");
1355#else
1356 abort();
1357#endif
1358
1359 if (eax)
1360 *eax = vec[0];
1361 if (ebx)
1362 *ebx = vec[1];
1363 if (ecx)
1364 *ecx = vec[2];
1365 if (edx)
1366 *edx = vec[3];
1367}
1368
1369void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1370{
1371 uint32_t eax, ebx, ecx, edx;
1372
1373 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1374 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1375
1376 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1377 if (family) {
1378 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1379 }
1380 if (model) {
1381 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1382 }
1383 if (stepping) {
1384 *stepping = eax & 0x0F;
1385 }
1386}
1387
1388/* CPU class name definitions: */
1389
1390/* Return type name for a given CPU model name
1391 * Caller is responsible for freeing the returned string.
1392 */
1393static char *x86_cpu_type_name(const char *model_name)
1394{
1395 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1396}
1397
1398static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1399{
1400 ObjectClass *oc;
1401 char *typename = x86_cpu_type_name(cpu_model);
1402 oc = object_class_by_name(typename);
1403 g_free(typename);
1404 return oc;
1405}
1406
1407static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1408{
1409 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1410 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1411 return g_strndup(class_name,
1412 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1413}
1414
1415struct X86CPUDefinition {
1416 const char *name;
1417 uint32_t level;
1418 uint32_t xlevel;
1419 /* vendor is zero-terminated, 12 character ASCII string */
1420 char vendor[CPUID_VENDOR_SZ + 1];
1421 int family;
1422 int model;
1423 int stepping;
1424 FeatureWordArray features;
1425 const char *model_id;
1426 CPUCaches *cache_info;
1427};
1428
1429static CPUCaches epyc_cache_info = {
1430 .l1d_cache = &(CPUCacheInfo) {
1431 .type = DATA_CACHE,
1432 .level = 1,
1433 .size = 32 * KiB,
1434 .line_size = 64,
1435 .associativity = 8,
1436 .partitions = 1,
1437 .sets = 64,
1438 .lines_per_tag = 1,
1439 .self_init = 1,
1440 .no_invd_sharing = true,
1441 },
1442 .l1i_cache = &(CPUCacheInfo) {
1443 .type = INSTRUCTION_CACHE,
1444 .level = 1,
1445 .size = 64 * KiB,
1446 .line_size = 64,
1447 .associativity = 4,
1448 .partitions = 1,
1449 .sets = 256,
1450 .lines_per_tag = 1,
1451 .self_init = 1,
1452 .no_invd_sharing = true,
1453 },
1454 .l2_cache = &(CPUCacheInfo) {
1455 .type = UNIFIED_CACHE,
1456 .level = 2,
1457 .size = 512 * KiB,
1458 .line_size = 64,
1459 .associativity = 8,
1460 .partitions = 1,
1461 .sets = 1024,
1462 .lines_per_tag = 1,
1463 },
1464 .l3_cache = &(CPUCacheInfo) {
1465 .type = UNIFIED_CACHE,
1466 .level = 3,
1467 .size = 8 * MiB,
1468 .line_size = 64,
1469 .associativity = 16,
1470 .partitions = 1,
1471 .sets = 8192,
1472 .lines_per_tag = 1,
1473 .self_init = true,
1474 .inclusive = true,
1475 .complex_indexing = true,
1476 },
1477};
1478
1479static X86CPUDefinition builtin_x86_defs[] = {
1480 {
1481 .name = "qemu64",
1482 .level = 0xd,
1483 .vendor = CPUID_VENDOR_AMD,
1484 .family = 6,
1485 .model = 6,
1486 .stepping = 3,
1487 .features[FEAT_1_EDX] =
1488 PPRO_FEATURES |
1489 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1490 CPUID_PSE36,
1491 .features[FEAT_1_ECX] =
1492 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1493 .features[FEAT_8000_0001_EDX] =
1494 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1495 .features[FEAT_8000_0001_ECX] =
1496 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1497 .xlevel = 0x8000000A,
1498 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1499 },
1500 {
1501 .name = "phenom",
1502 .level = 5,
1503 .vendor = CPUID_VENDOR_AMD,
1504 .family = 16,
1505 .model = 2,
1506 .stepping = 3,
1507 /* Missing: CPUID_HT */
1508 .features[FEAT_1_EDX] =
1509 PPRO_FEATURES |
1510 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1511 CPUID_PSE36 | CPUID_VME,
1512 .features[FEAT_1_ECX] =
1513 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1514 CPUID_EXT_POPCNT,
1515 .features[FEAT_8000_0001_EDX] =
1516 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1517 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1518 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1519 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1520 CPUID_EXT3_CR8LEG,
1521 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1522 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1523 .features[FEAT_8000_0001_ECX] =
1524 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1525 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1526 /* Missing: CPUID_SVM_LBRV */
1527 .features[FEAT_SVM] =
1528 CPUID_SVM_NPT,
1529 .xlevel = 0x8000001A,
1530 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1531 },
1532 {
1533 .name = "core2duo",
1534 .level = 10,
1535 .vendor = CPUID_VENDOR_INTEL,
1536 .family = 6,
1537 .model = 15,
1538 .stepping = 11,
1539 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1540 .features[FEAT_1_EDX] =
1541 PPRO_FEATURES |
1542 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1543 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1544 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1545 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1546 .features[FEAT_1_ECX] =
1547 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1548 CPUID_EXT_CX16,
1549 .features[FEAT_8000_0001_EDX] =
1550 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1551 .features[FEAT_8000_0001_ECX] =
1552 CPUID_EXT3_LAHF_LM,
1553 .xlevel = 0x80000008,
1554 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1555 },
1556 {
1557 .name = "kvm64",
1558 .level = 0xd,
1559 .vendor = CPUID_VENDOR_INTEL,
1560 .family = 15,
1561 .model = 6,
1562 .stepping = 1,
1563 /* Missing: CPUID_HT */
1564 .features[FEAT_1_EDX] =
1565 PPRO_FEATURES | CPUID_VME |
1566 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1567 CPUID_PSE36,
1568 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1569 .features[FEAT_1_ECX] =
1570 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1571 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1572 .features[FEAT_8000_0001_EDX] =
1573 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1574 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1575 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1576 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1577 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1578 .features[FEAT_8000_0001_ECX] =
1579 0,
1580 .xlevel = 0x80000008,
1581 .model_id = "Common KVM processor"
1582 },
1583 {
1584 .name = "qemu32",
1585 .level = 4,
1586 .vendor = CPUID_VENDOR_INTEL,
1587 .family = 6,
1588 .model = 6,
1589 .stepping = 3,
1590 .features[FEAT_1_EDX] =
1591 PPRO_FEATURES,
1592 .features[FEAT_1_ECX] =
1593 CPUID_EXT_SSE3,
1594 .xlevel = 0x80000004,
1595 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1596 },
1597 {
1598 .name = "kvm32",
1599 .level = 5,
1600 .vendor = CPUID_VENDOR_INTEL,
1601 .family = 15,
1602 .model = 6,
1603 .stepping = 1,
1604 .features[FEAT_1_EDX] =
1605 PPRO_FEATURES | CPUID_VME |
1606 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1607 .features[FEAT_1_ECX] =
1608 CPUID_EXT_SSE3,
1609 .features[FEAT_8000_0001_ECX] =
1610 0,
1611 .xlevel = 0x80000008,
1612 .model_id = "Common 32-bit KVM processor"
1613 },
1614 {
1615 .name = "coreduo",
1616 .level = 10,
1617 .vendor = CPUID_VENDOR_INTEL,
1618 .family = 6,
1619 .model = 14,
1620 .stepping = 8,
1621 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1622 .features[FEAT_1_EDX] =
1623 PPRO_FEATURES | CPUID_VME |
1624 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1625 CPUID_SS,
1626 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1627 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1628 .features[FEAT_1_ECX] =
1629 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1630 .features[FEAT_8000_0001_EDX] =
1631 CPUID_EXT2_NX,
1632 .xlevel = 0x80000008,
1633 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1634 },
1635 {
1636 .name = "486",
1637 .level = 1,
1638 .vendor = CPUID_VENDOR_INTEL,
1639 .family = 4,
1640 .model = 8,
1641 .stepping = 0,
1642 .features[FEAT_1_EDX] =
1643 I486_FEATURES,
1644 .xlevel = 0,
1645 .model_id = "",
1646 },
1647 {
1648 .name = "pentium",
1649 .level = 1,
1650 .vendor = CPUID_VENDOR_INTEL,
1651 .family = 5,
1652 .model = 4,
1653 .stepping = 3,
1654 .features[FEAT_1_EDX] =
1655 PENTIUM_FEATURES,
1656 .xlevel = 0,
1657 .model_id = "",
1658 },
1659 {
1660 .name = "pentium2",
1661 .level = 2,
1662 .vendor = CPUID_VENDOR_INTEL,
1663 .family = 6,
1664 .model = 5,
1665 .stepping = 2,
1666 .features[FEAT_1_EDX] =
1667 PENTIUM2_FEATURES,
1668 .xlevel = 0,
1669 .model_id = "",
1670 },
1671 {
1672 .name = "pentium3",
1673 .level = 3,
1674 .vendor = CPUID_VENDOR_INTEL,
1675 .family = 6,
1676 .model = 7,
1677 .stepping = 3,
1678 .features[FEAT_1_EDX] =
1679 PENTIUM3_FEATURES,
1680 .xlevel = 0,
1681 .model_id = "",
1682 },
1683 {
1684 .name = "athlon",
1685 .level = 2,
1686 .vendor = CPUID_VENDOR_AMD,
1687 .family = 6,
1688 .model = 2,
1689 .stepping = 3,
1690 .features[FEAT_1_EDX] =
1691 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1692 CPUID_MCA,
1693 .features[FEAT_8000_0001_EDX] =
1694 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1695 .xlevel = 0x80000008,
1696 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1697 },
1698 {
1699 .name = "n270",
1700 .level = 10,
1701 .vendor = CPUID_VENDOR_INTEL,
1702 .family = 6,
1703 .model = 28,
1704 .stepping = 2,
1705 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1706 .features[FEAT_1_EDX] =
1707 PPRO_FEATURES |
1708 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1709 CPUID_ACPI | CPUID_SS,
1710 /* Some CPUs got no CPUID_SEP */
1711 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1712 * CPUID_EXT_XTPR */
1713 .features[FEAT_1_ECX] =
1714 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1715 CPUID_EXT_MOVBE,
1716 .features[FEAT_8000_0001_EDX] =
1717 CPUID_EXT2_NX,
1718 .features[FEAT_8000_0001_ECX] =
1719 CPUID_EXT3_LAHF_LM,
1720 .xlevel = 0x80000008,
1721 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1722 },
1723 {
1724 .name = "Conroe",
1725 .level = 10,
1726 .vendor = CPUID_VENDOR_INTEL,
1727 .family = 6,
1728 .model = 15,
1729 .stepping = 3,
1730 .features[FEAT_1_EDX] =
1731 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1732 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1733 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1734 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1735 CPUID_DE | CPUID_FP87,
1736 .features[FEAT_1_ECX] =
1737 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1738 .features[FEAT_8000_0001_EDX] =
1739 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1740 .features[FEAT_8000_0001_ECX] =
1741 CPUID_EXT3_LAHF_LM,
1742 .xlevel = 0x80000008,
1743 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1744 },
1745 {
1746 .name = "Penryn",
1747 .level = 10,
1748 .vendor = CPUID_VENDOR_INTEL,
1749 .family = 6,
1750 .model = 23,
1751 .stepping = 3,
1752 .features[FEAT_1_EDX] =
1753 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1754 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1755 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1756 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1757 CPUID_DE | CPUID_FP87,
1758 .features[FEAT_1_ECX] =
1759 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1760 CPUID_EXT_SSE3,
1761 .features[FEAT_8000_0001_EDX] =
1762 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1763 .features[FEAT_8000_0001_ECX] =
1764 CPUID_EXT3_LAHF_LM,
1765 .xlevel = 0x80000008,
1766 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1767 },
1768 {
1769 .name = "Nehalem",
1770 .level = 11,
1771 .vendor = CPUID_VENDOR_INTEL,
1772 .family = 6,
1773 .model = 26,
1774 .stepping = 3,
1775 .features[FEAT_1_EDX] =
1776 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1777 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1778 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1779 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1780 CPUID_DE | CPUID_FP87,
1781 .features[FEAT_1_ECX] =
1782 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1783 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1784 .features[FEAT_8000_0001_EDX] =
1785 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1786 .features[FEAT_8000_0001_ECX] =
1787 CPUID_EXT3_LAHF_LM,
1788 .xlevel = 0x80000008,
1789 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1790 },
1791 {
1792 .name = "Nehalem-IBRS",
1793 .level = 11,
1794 .vendor = CPUID_VENDOR_INTEL,
1795 .family = 6,
1796 .model = 26,
1797 .stepping = 3,
1798 .features[FEAT_1_EDX] =
1799 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1800 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1801 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1802 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1803 CPUID_DE | CPUID_FP87,
1804 .features[FEAT_1_ECX] =
1805 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1806 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1807 .features[FEAT_7_0_EDX] =
1808 CPUID_7_0_EDX_SPEC_CTRL,
1809 .features[FEAT_8000_0001_EDX] =
1810 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1811 .features[FEAT_8000_0001_ECX] =
1812 CPUID_EXT3_LAHF_LM,
1813 .xlevel = 0x80000008,
1814 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1815 },
1816 {
1817 .name = "Westmere",
1818 .level = 11,
1819 .vendor = CPUID_VENDOR_INTEL,
1820 .family = 6,
1821 .model = 44,
1822 .stepping = 1,
1823 .features[FEAT_1_EDX] =
1824 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1825 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1826 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1827 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1828 CPUID_DE | CPUID_FP87,
1829 .features[FEAT_1_ECX] =
1830 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1831 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1832 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1833 .features[FEAT_8000_0001_EDX] =
1834 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1835 .features[FEAT_8000_0001_ECX] =
1836 CPUID_EXT3_LAHF_LM,
1837 .features[FEAT_6_EAX] =
1838 CPUID_6_EAX_ARAT,
1839 .xlevel = 0x80000008,
1840 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1841 },
1842 {
1843 .name = "Westmere-IBRS",
1844 .level = 11,
1845 .vendor = CPUID_VENDOR_INTEL,
1846 .family = 6,
1847 .model = 44,
1848 .stepping = 1,
1849 .features[FEAT_1_EDX] =
1850 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1851 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1852 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1853 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1854 CPUID_DE | CPUID_FP87,
1855 .features[FEAT_1_ECX] =
1856 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1857 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1858 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1859 .features[FEAT_8000_0001_EDX] =
1860 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1861 .features[FEAT_8000_0001_ECX] =
1862 CPUID_EXT3_LAHF_LM,
1863 .features[FEAT_7_0_EDX] =
1864 CPUID_7_0_EDX_SPEC_CTRL,
1865 .features[FEAT_6_EAX] =
1866 CPUID_6_EAX_ARAT,
1867 .xlevel = 0x80000008,
1868 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1869 },
1870 {
1871 .name = "SandyBridge",
1872 .level = 0xd,
1873 .vendor = CPUID_VENDOR_INTEL,
1874 .family = 6,
1875 .model = 42,
1876 .stepping = 1,
1877 .features[FEAT_1_EDX] =
1878 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1879 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1880 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1881 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1882 CPUID_DE | CPUID_FP87,
1883 .features[FEAT_1_ECX] =
1884 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1885 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1886 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1887 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1888 CPUID_EXT_SSE3,
1889 .features[FEAT_8000_0001_EDX] =
1890 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1891 CPUID_EXT2_SYSCALL,
1892 .features[FEAT_8000_0001_ECX] =
1893 CPUID_EXT3_LAHF_LM,
1894 .features[FEAT_XSAVE] =
1895 CPUID_XSAVE_XSAVEOPT,
1896 .features[FEAT_6_EAX] =
1897 CPUID_6_EAX_ARAT,
1898 .xlevel = 0x80000008,
1899 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1900 },
1901 {
1902 .name = "SandyBridge-IBRS",
1903 .level = 0xd,
1904 .vendor = CPUID_VENDOR_INTEL,
1905 .family = 6,
1906 .model = 42,
1907 .stepping = 1,
1908 .features[FEAT_1_EDX] =
1909 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1910 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1911 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1912 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1913 CPUID_DE | CPUID_FP87,
1914 .features[FEAT_1_ECX] =
1915 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1916 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1917 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1918 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1919 CPUID_EXT_SSE3,
1920 .features[FEAT_8000_0001_EDX] =
1921 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1922 CPUID_EXT2_SYSCALL,
1923 .features[FEAT_8000_0001_ECX] =
1924 CPUID_EXT3_LAHF_LM,
1925 .features[FEAT_7_0_EDX] =
1926 CPUID_7_0_EDX_SPEC_CTRL,
1927 .features[FEAT_XSAVE] =
1928 CPUID_XSAVE_XSAVEOPT,
1929 .features[FEAT_6_EAX] =
1930 CPUID_6_EAX_ARAT,
1931 .xlevel = 0x80000008,
1932 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1933 },
1934 {
1935 .name = "IvyBridge",
1936 .level = 0xd,
1937 .vendor = CPUID_VENDOR_INTEL,
1938 .family = 6,
1939 .model = 58,
1940 .stepping = 9,
1941 .features[FEAT_1_EDX] =
1942 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1943 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1944 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1945 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1946 CPUID_DE | CPUID_FP87,
1947 .features[FEAT_1_ECX] =
1948 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1949 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1950 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1951 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1952 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1953 .features[FEAT_7_0_EBX] =
1954 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1955 CPUID_7_0_EBX_ERMS,
1956 .features[FEAT_8000_0001_EDX] =
1957 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1958 CPUID_EXT2_SYSCALL,
1959 .features[FEAT_8000_0001_ECX] =
1960 CPUID_EXT3_LAHF_LM,
1961 .features[FEAT_XSAVE] =
1962 CPUID_XSAVE_XSAVEOPT,
1963 .features[FEAT_6_EAX] =
1964 CPUID_6_EAX_ARAT,
1965 .xlevel = 0x80000008,
1966 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1967 },
1968 {
1969 .name = "IvyBridge-IBRS",
1970 .level = 0xd,
1971 .vendor = CPUID_VENDOR_INTEL,
1972 .family = 6,
1973 .model = 58,
1974 .stepping = 9,
1975 .features[FEAT_1_EDX] =
1976 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1980 CPUID_DE | CPUID_FP87,
1981 .features[FEAT_1_ECX] =
1982 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1983 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1984 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1985 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1986 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1987 .features[FEAT_7_0_EBX] =
1988 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1989 CPUID_7_0_EBX_ERMS,
1990 .features[FEAT_8000_0001_EDX] =
1991 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1992 CPUID_EXT2_SYSCALL,
1993 .features[FEAT_8000_0001_ECX] =
1994 CPUID_EXT3_LAHF_LM,
1995 .features[FEAT_7_0_EDX] =
1996 CPUID_7_0_EDX_SPEC_CTRL,
1997 .features[FEAT_XSAVE] =
1998 CPUID_XSAVE_XSAVEOPT,
1999 .features[FEAT_6_EAX] =
2000 CPUID_6_EAX_ARAT,
2001 .xlevel = 0x80000008,
2002 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
2003 },
2004 {
2005 .name = "Haswell-noTSX",
2006 .level = 0xd,
2007 .vendor = CPUID_VENDOR_INTEL,
2008 .family = 6,
2009 .model = 60,
2010 .stepping = 1,
2011 .features[FEAT_1_EDX] =
2012 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2013 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2014 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2015 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2016 CPUID_DE | CPUID_FP87,
2017 .features[FEAT_1_ECX] =
2018 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2019 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2020 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2021 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2022 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2023 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2024 .features[FEAT_8000_0001_EDX] =
2025 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2026 CPUID_EXT2_SYSCALL,
2027 .features[FEAT_8000_0001_ECX] =
2028 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2029 .features[FEAT_7_0_EBX] =
2030 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2031 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2032 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2033 .features[FEAT_XSAVE] =
2034 CPUID_XSAVE_XSAVEOPT,
2035 .features[FEAT_6_EAX] =
2036 CPUID_6_EAX_ARAT,
2037 .xlevel = 0x80000008,
2038 .model_id = "Intel Core Processor (Haswell, no TSX)",
2039 },
2040 {
2041 .name = "Haswell-noTSX-IBRS",
2042 .level = 0xd,
2043 .vendor = CPUID_VENDOR_INTEL,
2044 .family = 6,
2045 .model = 60,
2046 .stepping = 1,
2047 .features[FEAT_1_EDX] =
2048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2052 CPUID_DE | CPUID_FP87,
2053 .features[FEAT_1_ECX] =
2054 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2055 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2056 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2057 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2059 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2060 .features[FEAT_8000_0001_EDX] =
2061 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2062 CPUID_EXT2_SYSCALL,
2063 .features[FEAT_8000_0001_ECX] =
2064 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2065 .features[FEAT_7_0_EDX] =
2066 CPUID_7_0_EDX_SPEC_CTRL,
2067 .features[FEAT_7_0_EBX] =
2068 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2069 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2070 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2071 .features[FEAT_XSAVE] =
2072 CPUID_XSAVE_XSAVEOPT,
2073 .features[FEAT_6_EAX] =
2074 CPUID_6_EAX_ARAT,
2075 .xlevel = 0x80000008,
2076 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2077 },
2078 {
2079 .name = "Haswell",
2080 .level = 0xd,
2081 .vendor = CPUID_VENDOR_INTEL,
2082 .family = 6,
2083 .model = 60,
2084 .stepping = 4,
2085 .features[FEAT_1_EDX] =
2086 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2087 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2088 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2089 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2090 CPUID_DE | CPUID_FP87,
2091 .features[FEAT_1_ECX] =
2092 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2093 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2094 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2095 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2096 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2097 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2098 .features[FEAT_8000_0001_EDX] =
2099 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2100 CPUID_EXT2_SYSCALL,
2101 .features[FEAT_8000_0001_ECX] =
2102 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2103 .features[FEAT_7_0_EBX] =
2104 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2105 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2106 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2107 CPUID_7_0_EBX_RTM,
2108 .features[FEAT_XSAVE] =
2109 CPUID_XSAVE_XSAVEOPT,
2110 .features[FEAT_6_EAX] =
2111 CPUID_6_EAX_ARAT,
2112 .xlevel = 0x80000008,
2113 .model_id = "Intel Core Processor (Haswell)",
2114 },
2115 {
2116 .name = "Haswell-IBRS",
2117 .level = 0xd,
2118 .vendor = CPUID_VENDOR_INTEL,
2119 .family = 6,
2120 .model = 60,
2121 .stepping = 4,
2122 .features[FEAT_1_EDX] =
2123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2127 CPUID_DE | CPUID_FP87,
2128 .features[FEAT_1_ECX] =
2129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2130 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2131 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2132 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2133 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2134 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2135 .features[FEAT_8000_0001_EDX] =
2136 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2137 CPUID_EXT2_SYSCALL,
2138 .features[FEAT_8000_0001_ECX] =
2139 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2140 .features[FEAT_7_0_EDX] =
2141 CPUID_7_0_EDX_SPEC_CTRL,
2142 .features[FEAT_7_0_EBX] =
2143 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2144 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2145 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2146 CPUID_7_0_EBX_RTM,
2147 .features[FEAT_XSAVE] =
2148 CPUID_XSAVE_XSAVEOPT,
2149 .features[FEAT_6_EAX] =
2150 CPUID_6_EAX_ARAT,
2151 .xlevel = 0x80000008,
2152 .model_id = "Intel Core Processor (Haswell, IBRS)",
2153 },
2154 {
2155 .name = "Broadwell-noTSX",
2156 .level = 0xd,
2157 .vendor = CPUID_VENDOR_INTEL,
2158 .family = 6,
2159 .model = 61,
2160 .stepping = 2,
2161 .features[FEAT_1_EDX] =
2162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2166 CPUID_DE | CPUID_FP87,
2167 .features[FEAT_1_ECX] =
2168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2174 .features[FEAT_8000_0001_EDX] =
2175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2176 CPUID_EXT2_SYSCALL,
2177 .features[FEAT_8000_0001_ECX] =
2178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2179 .features[FEAT_7_0_EBX] =
2180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2183 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2184 CPUID_7_0_EBX_SMAP,
2185 .features[FEAT_XSAVE] =
2186 CPUID_XSAVE_XSAVEOPT,
2187 .features[FEAT_6_EAX] =
2188 CPUID_6_EAX_ARAT,
2189 .xlevel = 0x80000008,
2190 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2191 },
2192 {
2193 .name = "Broadwell-noTSX-IBRS",
2194 .level = 0xd,
2195 .vendor = CPUID_VENDOR_INTEL,
2196 .family = 6,
2197 .model = 61,
2198 .stepping = 2,
2199 .features[FEAT_1_EDX] =
2200 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2201 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2202 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2203 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2204 CPUID_DE | CPUID_FP87,
2205 .features[FEAT_1_ECX] =
2206 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2207 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2208 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2209 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2210 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2211 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2212 .features[FEAT_8000_0001_EDX] =
2213 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2214 CPUID_EXT2_SYSCALL,
2215 .features[FEAT_8000_0001_ECX] =
2216 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2217 .features[FEAT_7_0_EDX] =
2218 CPUID_7_0_EDX_SPEC_CTRL,
2219 .features[FEAT_7_0_EBX] =
2220 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2221 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2222 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2223 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2224 CPUID_7_0_EBX_SMAP,
2225 .features[FEAT_XSAVE] =
2226 CPUID_XSAVE_XSAVEOPT,
2227 .features[FEAT_6_EAX] =
2228 CPUID_6_EAX_ARAT,
2229 .xlevel = 0x80000008,
2230 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2231 },
2232 {
2233 .name = "Broadwell",
2234 .level = 0xd,
2235 .vendor = CPUID_VENDOR_INTEL,
2236 .family = 6,
2237 .model = 61,
2238 .stepping = 2,
2239 .features[FEAT_1_EDX] =
2240 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2241 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2242 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2243 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2244 CPUID_DE | CPUID_FP87,
2245 .features[FEAT_1_ECX] =
2246 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2247 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2248 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2249 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2250 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2251 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2252 .features[FEAT_8000_0001_EDX] =
2253 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2254 CPUID_EXT2_SYSCALL,
2255 .features[FEAT_8000_0001_ECX] =
2256 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2257 .features[FEAT_7_0_EBX] =
2258 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2259 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2260 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2261 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2262 CPUID_7_0_EBX_SMAP,
2263 .features[FEAT_XSAVE] =
2264 CPUID_XSAVE_XSAVEOPT,
2265 .features[FEAT_6_EAX] =
2266 CPUID_6_EAX_ARAT,
2267 .xlevel = 0x80000008,
2268 .model_id = "Intel Core Processor (Broadwell)",
2269 },
2270 {
2271 .name = "Broadwell-IBRS",
2272 .level = 0xd,
2273 .vendor = CPUID_VENDOR_INTEL,
2274 .family = 6,
2275 .model = 61,
2276 .stepping = 2,
2277 .features[FEAT_1_EDX] =
2278 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2279 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2280 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2281 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2282 CPUID_DE | CPUID_FP87,
2283 .features[FEAT_1_ECX] =
2284 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2285 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2286 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2287 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2288 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2289 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2290 .features[FEAT_8000_0001_EDX] =
2291 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2292 CPUID_EXT2_SYSCALL,
2293 .features[FEAT_8000_0001_ECX] =
2294 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2295 .features[FEAT_7_0_EDX] =
2296 CPUID_7_0_EDX_SPEC_CTRL,
2297 .features[FEAT_7_0_EBX] =
2298 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2299 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2300 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2301 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2302 CPUID_7_0_EBX_SMAP,
2303 .features[FEAT_XSAVE] =
2304 CPUID_XSAVE_XSAVEOPT,
2305 .features[FEAT_6_EAX] =
2306 CPUID_6_EAX_ARAT,
2307 .xlevel = 0x80000008,
2308 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2309 },
2310 {
2311 .name = "Skylake-Client",
2312 .level = 0xd,
2313 .vendor = CPUID_VENDOR_INTEL,
2314 .family = 6,
2315 .model = 94,
2316 .stepping = 3,
2317 .features[FEAT_1_EDX] =
2318 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2319 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2320 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2321 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2322 CPUID_DE | CPUID_FP87,
2323 .features[FEAT_1_ECX] =
2324 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2325 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2326 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2327 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2328 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2329 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2330 .features[FEAT_8000_0001_EDX] =
2331 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2332 CPUID_EXT2_SYSCALL,
2333 .features[FEAT_8000_0001_ECX] =
2334 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2335 .features[FEAT_7_0_EBX] =
2336 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2337 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2338 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2339 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2340 CPUID_7_0_EBX_SMAP,
2341 /* Missing: XSAVES (not supported by some Linux versions,
2342 * including v4.1 to v4.12).
2343 * KVM doesn't yet expose any XSAVES state save component,
2344 * and the only one defined in Skylake (processor tracing)
2345 * probably will block migration anyway.
2346 */
2347 .features[FEAT_XSAVE] =
2348 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2349 CPUID_XSAVE_XGETBV1,
2350 .features[FEAT_6_EAX] =
2351 CPUID_6_EAX_ARAT,
2352 .xlevel = 0x80000008,
2353 .model_id = "Intel Core Processor (Skylake)",
2354 },
2355 {
2356 .name = "Skylake-Client-IBRS",
2357 .level = 0xd,
2358 .vendor = CPUID_VENDOR_INTEL,
2359 .family = 6,
2360 .model = 94,
2361 .stepping = 3,
2362 .features[FEAT_1_EDX] =
2363 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2364 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2365 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2366 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2367 CPUID_DE | CPUID_FP87,
2368 .features[FEAT_1_ECX] =
2369 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2370 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2371 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2372 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2373 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2374 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2375 .features[FEAT_8000_0001_EDX] =
2376 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2377 CPUID_EXT2_SYSCALL,
2378 .features[FEAT_8000_0001_ECX] =
2379 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2380 .features[FEAT_7_0_EDX] =
2381 CPUID_7_0_EDX_SPEC_CTRL,
2382 .features[FEAT_7_0_EBX] =
2383 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2384 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2385 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2386 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2387 CPUID_7_0_EBX_SMAP,
2388 /* Missing: XSAVES (not supported by some Linux versions,
2389 * including v4.1 to v4.12).
2390 * KVM doesn't yet expose any XSAVES state save component,
2391 * and the only one defined in Skylake (processor tracing)
2392 * probably will block migration anyway.
2393 */
2394 .features[FEAT_XSAVE] =
2395 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2396 CPUID_XSAVE_XGETBV1,
2397 .features[FEAT_6_EAX] =
2398 CPUID_6_EAX_ARAT,
2399 .xlevel = 0x80000008,
2400 .model_id = "Intel Core Processor (Skylake, IBRS)",
2401 },
2402 {
2403 .name = "Skylake-Server",
2404 .level = 0xd,
2405 .vendor = CPUID_VENDOR_INTEL,
2406 .family = 6,
2407 .model = 85,
2408 .stepping = 4,
2409 .features[FEAT_1_EDX] =
2410 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2411 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2412 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2413 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2414 CPUID_DE | CPUID_FP87,
2415 .features[FEAT_1_ECX] =
2416 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2417 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2418 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2419 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2420 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2421 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2422 .features[FEAT_8000_0001_EDX] =
2423 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2424 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2425 .features[FEAT_8000_0001_ECX] =
2426 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2427 .features[FEAT_7_0_EBX] =
2428 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2429 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2430 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2431 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2432 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2433 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2434 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2435 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2436 .features[FEAT_7_0_ECX] =
2437 CPUID_7_0_ECX_PKU,
2438 /* Missing: XSAVES (not supported by some Linux versions,
2439 * including v4.1 to v4.12).
2440 * KVM doesn't yet expose any XSAVES state save component,
2441 * and the only one defined in Skylake (processor tracing)
2442 * probably will block migration anyway.
2443 */
2444 .features[FEAT_XSAVE] =
2445 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2446 CPUID_XSAVE_XGETBV1,
2447 .features[FEAT_6_EAX] =
2448 CPUID_6_EAX_ARAT,
2449 .xlevel = 0x80000008,
2450 .model_id = "Intel Xeon Processor (Skylake)",
2451 },
2452 {
2453 .name = "Skylake-Server-IBRS",
2454 .level = 0xd,
2455 .vendor = CPUID_VENDOR_INTEL,
2456 .family = 6,
2457 .model = 85,
2458 .stepping = 4,
2459 .features[FEAT_1_EDX] =
2460 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2461 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2462 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2463 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2464 CPUID_DE | CPUID_FP87,
2465 .features[FEAT_1_ECX] =
2466 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2467 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2468 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2469 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2470 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2471 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2472 .features[FEAT_8000_0001_EDX] =
2473 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2474 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2475 .features[FEAT_8000_0001_ECX] =
2476 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2477 .features[FEAT_7_0_EDX] =
2478 CPUID_7_0_EDX_SPEC_CTRL,
2479 .features[FEAT_7_0_EBX] =
2480 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2481 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2482 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2483 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2484 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2485 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2486 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2487 CPUID_7_0_EBX_AVX512VL,
2488 .features[FEAT_7_0_ECX] =
2489 CPUID_7_0_ECX_PKU,
2490 /* Missing: XSAVES (not supported by some Linux versions,
2491 * including v4.1 to v4.12).
2492 * KVM doesn't yet expose any XSAVES state save component,
2493 * and the only one defined in Skylake (processor tracing)
2494 * probably will block migration anyway.
2495 */
2496 .features[FEAT_XSAVE] =
2497 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2498 CPUID_XSAVE_XGETBV1,
2499 .features[FEAT_6_EAX] =
2500 CPUID_6_EAX_ARAT,
2501 .xlevel = 0x80000008,
2502 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2503 },
2504 {
2505 .name = "Cascadelake-Server",
2506 .level = 0xd,
2507 .vendor = CPUID_VENDOR_INTEL,
2508 .family = 6,
2509 .model = 85,
2510 .stepping = 6,
2511 .features[FEAT_1_EDX] =
2512 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2513 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2514 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2515 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2516 CPUID_DE | CPUID_FP87,
2517 .features[FEAT_1_ECX] =
2518 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2519 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2520 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2521 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2522 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2523 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2524 .features[FEAT_8000_0001_EDX] =
2525 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2526 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2527 .features[FEAT_8000_0001_ECX] =
2528 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2529 .features[FEAT_7_0_EBX] =
2530 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2531 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2532 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2533 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2534 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2535 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2536 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2537 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2538 .features[FEAT_7_0_ECX] =
2539 CPUID_7_0_ECX_PKU |
2540 CPUID_7_0_ECX_AVX512VNNI,
2541 .features[FEAT_7_0_EDX] =
2542 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2543 /* Missing: XSAVES (not supported by some Linux versions,
2544 * including v4.1 to v4.12).
2545 * KVM doesn't yet expose any XSAVES state save component,
2546 * and the only one defined in Skylake (processor tracing)
2547 * probably will block migration anyway.
2548 */
2549 .features[FEAT_XSAVE] =
2550 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2551 CPUID_XSAVE_XGETBV1,
2552 .features[FEAT_6_EAX] =
2553 CPUID_6_EAX_ARAT,
2554 .xlevel = 0x80000008,
2555 .model_id = "Intel Xeon Processor (Cascadelake)",
2556 },
2557 {
2558 .name = "Icelake-Client",
2559 .level = 0xd,
2560 .vendor = CPUID_VENDOR_INTEL,
2561 .family = 6,
2562 .model = 126,
2563 .stepping = 0,
2564 .features[FEAT_1_EDX] =
2565 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2566 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2567 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2568 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2569 CPUID_DE | CPUID_FP87,
2570 .features[FEAT_1_ECX] =
2571 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2572 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2573 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2574 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2575 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2576 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2577 .features[FEAT_8000_0001_EDX] =
2578 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2579 CPUID_EXT2_SYSCALL,
2580 .features[FEAT_8000_0001_ECX] =
2581 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2582 .features[FEAT_8000_0008_EBX] =
2583 CPUID_8000_0008_EBX_WBNOINVD,
2584 .features[FEAT_7_0_EBX] =
2585 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2586 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2587 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2588 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2589 CPUID_7_0_EBX_SMAP,
2590 .features[FEAT_7_0_ECX] =
2591 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2592 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2593 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2594 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2595 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2596 .features[FEAT_7_0_EDX] =
2597 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2598 /* Missing: XSAVES (not supported by some Linux versions,
2599 * including v4.1 to v4.12).
2600 * KVM doesn't yet expose any XSAVES state save component,
2601 * and the only one defined in Skylake (processor tracing)
2602 * probably will block migration anyway.
2603 */
2604 .features[FEAT_XSAVE] =
2605 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2606 CPUID_XSAVE_XGETBV1,
2607 .features[FEAT_6_EAX] =
2608 CPUID_6_EAX_ARAT,
2609 .xlevel = 0x80000008,
2610 .model_id = "Intel Core Processor (Icelake)",
2611 },
2612 {
2613 .name = "Icelake-Server",
2614 .level = 0xd,
2615 .vendor = CPUID_VENDOR_INTEL,
2616 .family = 6,
2617 .model = 134,
2618 .stepping = 0,
2619 .features[FEAT_1_EDX] =
2620 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2621 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2622 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2623 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2624 CPUID_DE | CPUID_FP87,
2625 .features[FEAT_1_ECX] =
2626 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2627 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2628 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2629 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2630 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2631 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2632 .features[FEAT_8000_0001_EDX] =
2633 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2634 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2635 .features[FEAT_8000_0001_ECX] =
2636 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2637 .features[FEAT_8000_0008_EBX] =
2638 CPUID_8000_0008_EBX_WBNOINVD,
2639 .features[FEAT_7_0_EBX] =
2640 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2641 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2642 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2643 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2644 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2645 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2646 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2647 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2648 .features[FEAT_7_0_ECX] =
2649 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2650 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2651 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2652 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2653 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2654 .features[FEAT_7_0_EDX] =
2655 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2656 /* Missing: XSAVES (not supported by some Linux versions,
2657 * including v4.1 to v4.12).
2658 * KVM doesn't yet expose any XSAVES state save component,
2659 * and the only one defined in Skylake (processor tracing)
2660 * probably will block migration anyway.
2661 */
2662 .features[FEAT_XSAVE] =
2663 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2664 CPUID_XSAVE_XGETBV1,
2665 .features[FEAT_6_EAX] =
2666 CPUID_6_EAX_ARAT,
2667 .xlevel = 0x80000008,
2668 .model_id = "Intel Xeon Processor (Icelake)",
2669 },
2670 {
2671 .name = "KnightsMill",
2672 .level = 0xd,
2673 .vendor = CPUID_VENDOR_INTEL,
2674 .family = 6,
2675 .model = 133,
2676 .stepping = 0,
2677 .features[FEAT_1_EDX] =
2678 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2679 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2680 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2681 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2682 CPUID_PSE | CPUID_DE | CPUID_FP87,
2683 .features[FEAT_1_ECX] =
2684 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2685 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2686 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2687 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2688 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2689 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2690 .features[FEAT_8000_0001_EDX] =
2691 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2692 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2693 .features[FEAT_8000_0001_ECX] =
2694 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2695 .features[FEAT_7_0_EBX] =
2696 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2697 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2698 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2699 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2700 CPUID_7_0_EBX_AVX512ER,
2701 .features[FEAT_7_0_ECX] =
2702 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2703 .features[FEAT_7_0_EDX] =
2704 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2705 .features[FEAT_XSAVE] =
2706 CPUID_XSAVE_XSAVEOPT,
2707 .features[FEAT_6_EAX] =
2708 CPUID_6_EAX_ARAT,
2709 .xlevel = 0x80000008,
2710 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2711 },
2712 {
2713 .name = "Opteron_G1",
2714 .level = 5,
2715 .vendor = CPUID_VENDOR_AMD,
2716 .family = 15,
2717 .model = 6,
2718 .stepping = 1,
2719 .features[FEAT_1_EDX] =
2720 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2721 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2722 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2723 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2724 CPUID_DE | CPUID_FP87,
2725 .features[FEAT_1_ECX] =
2726 CPUID_EXT_SSE3,
2727 .features[FEAT_8000_0001_EDX] =
2728 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2729 .xlevel = 0x80000008,
2730 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2731 },
2732 {
2733 .name = "Opteron_G2",
2734 .level = 5,
2735 .vendor = CPUID_VENDOR_AMD,
2736 .family = 15,
2737 .model = 6,
2738 .stepping = 1,
2739 .features[FEAT_1_EDX] =
2740 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2741 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2742 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2743 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2744 CPUID_DE | CPUID_FP87,
2745 .features[FEAT_1_ECX] =
2746 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2747 .features[FEAT_8000_0001_EDX] =
2748 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2749 .features[FEAT_8000_0001_ECX] =
2750 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2751 .xlevel = 0x80000008,
2752 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2753 },
2754 {
2755 .name = "Opteron_G3",
2756 .level = 5,
2757 .vendor = CPUID_VENDOR_AMD,
2758 .family = 16,
2759 .model = 2,
2760 .stepping = 3,
2761 .features[FEAT_1_EDX] =
2762 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2763 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2764 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2765 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2766 CPUID_DE | CPUID_FP87,
2767 .features[FEAT_1_ECX] =
2768 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2769 CPUID_EXT_SSE3,
2770 .features[FEAT_8000_0001_EDX] =
2771 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2772 CPUID_EXT2_RDTSCP,
2773 .features[FEAT_8000_0001_ECX] =
2774 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2775 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2776 .xlevel = 0x80000008,
2777 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2778 },
2779 {
2780 .name = "Opteron_G4",
2781 .level = 0xd,
2782 .vendor = CPUID_VENDOR_AMD,
2783 .family = 21,
2784 .model = 1,
2785 .stepping = 2,
2786 .features[FEAT_1_EDX] =
2787 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2788 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2789 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2790 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2791 CPUID_DE | CPUID_FP87,
2792 .features[FEAT_1_ECX] =
2793 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2794 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2795 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2796 CPUID_EXT_SSE3,
2797 .features[FEAT_8000_0001_EDX] =
2798 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2799 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2800 .features[FEAT_8000_0001_ECX] =
2801 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2802 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2803 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2804 CPUID_EXT3_LAHF_LM,
2805 .features[FEAT_SVM] =
2806 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2807 /* no xsaveopt! */
2808 .xlevel = 0x8000001A,
2809 .model_id = "AMD Opteron 62xx class CPU",
2810 },
2811 {
2812 .name = "Opteron_G5",
2813 .level = 0xd,
2814 .vendor = CPUID_VENDOR_AMD,
2815 .family = 21,
2816 .model = 2,
2817 .stepping = 0,
2818 .features[FEAT_1_EDX] =
2819 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2820 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2821 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2822 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2823 CPUID_DE | CPUID_FP87,
2824 .features[FEAT_1_ECX] =
2825 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2826 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2827 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2828 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2829 .features[FEAT_8000_0001_EDX] =
2830 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2831 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2832 .features[FEAT_8000_0001_ECX] =
2833 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2834 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2835 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2836 CPUID_EXT3_LAHF_LM,
2837 .features[FEAT_SVM] =
2838 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2839 /* no xsaveopt! */
2840 .xlevel = 0x8000001A,
2841 .model_id = "AMD Opteron 63xx class CPU",
2842 },
2843 {
2844 .name = "EPYC",
2845 .level = 0xd,
2846 .vendor = CPUID_VENDOR_AMD,
2847 .family = 23,
2848 .model = 1,
2849 .stepping = 2,
2850 .features[FEAT_1_EDX] =
2851 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2852 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2853 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2854 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2855 CPUID_VME | CPUID_FP87,
2856 .features[FEAT_1_ECX] =
2857 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2858 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2859 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2860 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2861 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2862 .features[FEAT_8000_0001_EDX] =
2863 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2864 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2865 CPUID_EXT2_SYSCALL,
2866 .features[FEAT_8000_0001_ECX] =
2867 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2868 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2869 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2870 CPUID_EXT3_TOPOEXT,
2871 .features[FEAT_7_0_EBX] =
2872 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2873 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2874 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2875 CPUID_7_0_EBX_SHA_NI,
2876 /* Missing: XSAVES (not supported by some Linux versions,
2877 * including v4.1 to v4.12).
2878 * KVM doesn't yet expose any XSAVES state save component.
2879 */
2880 .features[FEAT_XSAVE] =
2881 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2882 CPUID_XSAVE_XGETBV1,
2883 .features[FEAT_6_EAX] =
2884 CPUID_6_EAX_ARAT,
2885 .features[FEAT_SVM] =
2886 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2887 .xlevel = 0x8000001E,
2888 .model_id = "AMD EPYC Processor",
2889 .cache_info = &epyc_cache_info,
2890 },
2891 {
2892 .name = "EPYC-IBPB",
2893 .level = 0xd,
2894 .vendor = CPUID_VENDOR_AMD,
2895 .family = 23,
2896 .model = 1,
2897 .stepping = 2,
2898 .features[FEAT_1_EDX] =
2899 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2900 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2901 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2902 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2903 CPUID_VME | CPUID_FP87,
2904 .features[FEAT_1_ECX] =
2905 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2906 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2907 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2908 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2909 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2910 .features[FEAT_8000_0001_EDX] =
2911 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2912 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2913 CPUID_EXT2_SYSCALL,
2914 .features[FEAT_8000_0001_ECX] =
2915 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2916 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2917 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2918 CPUID_EXT3_TOPOEXT,
2919 .features[FEAT_8000_0008_EBX] =
2920 CPUID_8000_0008_EBX_IBPB,
2921 .features[FEAT_7_0_EBX] =
2922 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2923 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2924 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2925 CPUID_7_0_EBX_SHA_NI,
2926 /* Missing: XSAVES (not supported by some Linux versions,
2927 * including v4.1 to v4.12).
2928 * KVM doesn't yet expose any XSAVES state save component.
2929 */
2930 .features[FEAT_XSAVE] =
2931 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2932 CPUID_XSAVE_XGETBV1,
2933 .features[FEAT_6_EAX] =
2934 CPUID_6_EAX_ARAT,
2935 .features[FEAT_SVM] =
2936 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2937 .xlevel = 0x8000001E,
2938 .model_id = "AMD EPYC Processor (with IBPB)",
2939 .cache_info = &epyc_cache_info,
2940 },
2941 {
2942 .name = "Dhyana",
2943 .level = 0xd,
2944 .vendor = CPUID_VENDOR_HYGON,
2945 .family = 24,
2946 .model = 0,
2947 .stepping = 1,
2948 .features[FEAT_1_EDX] =
2949 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2950 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2951 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2952 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2953 CPUID_VME | CPUID_FP87,
2954 .features[FEAT_1_ECX] =
2955 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2956 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
2957 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2958 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2959 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
2960 .features[FEAT_8000_0001_EDX] =
2961 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2962 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2963 CPUID_EXT2_SYSCALL,
2964 .features[FEAT_8000_0001_ECX] =
2965 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2966 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2967 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2968 CPUID_EXT3_TOPOEXT,
2969 .features[FEAT_8000_0008_EBX] =
2970 CPUID_8000_0008_EBX_IBPB,
2971 .features[FEAT_7_0_EBX] =
2972 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2973 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2974 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
2975 /*
2976 * Missing: XSAVES (not supported by some Linux versions,
2977 * including v4.1 to v4.12).
2978 * KVM doesn't yet expose any XSAVES state save component.
2979 */
2980 .features[FEAT_XSAVE] =
2981 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2982 CPUID_XSAVE_XGETBV1,
2983 .features[FEAT_6_EAX] =
2984 CPUID_6_EAX_ARAT,
2985 .features[FEAT_SVM] =
2986 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2987 .xlevel = 0x8000001E,
2988 .model_id = "Hygon Dhyana Processor",
2989 .cache_info = &epyc_cache_info,
2990 },
2991};
2992
2993typedef struct PropValue {
2994 const char *prop, *value;
2995} PropValue;
2996
2997/* KVM-specific features that are automatically added/removed
2998 * from all CPU models when KVM is enabled.
2999 */
3000static PropValue kvm_default_props[] = {
3001 { "kvmclock", "on" },
3002 { "kvm-nopiodelay", "on" },
3003 { "kvm-asyncpf", "on" },
3004 { "kvm-steal-time", "on" },
3005 { "kvm-pv-eoi", "on" },
3006 { "kvmclock-stable-bit", "on" },
3007 { "x2apic", "on" },
3008 { "acpi", "off" },
3009 { "monitor", "off" },
3010 { "svm", "off" },
3011 { NULL, NULL },
3012};
3013
3014/* TCG-specific defaults that override all CPU models when using TCG
3015 */
3016static PropValue tcg_default_props[] = {
3017 { "vme", "off" },
3018 { NULL, NULL },
3019};
3020
3021
3022void x86_cpu_change_kvm_default(const char *prop, const char *value)
3023{
3024 PropValue *pv;
3025 for (pv = kvm_default_props; pv->prop; pv++) {
3026 if (!strcmp(pv->prop, prop)) {
3027 pv->value = value;
3028 break;
3029 }
3030 }
3031
3032 /* It is valid to call this function only for properties that
3033 * are already present in the kvm_default_props table.
3034 */
3035 assert(pv->prop);
3036}
3037
3038static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3039 bool migratable_only);
3040
3041static bool lmce_supported(void)
3042{
3043 uint64_t mce_cap = 0;
3044
3045#ifdef CONFIG_KVM
3046 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
3047 return false;
3048 }
3049#endif
3050
3051 return !!(mce_cap & MCG_LMCE_P);
3052}
3053
3054#define CPUID_MODEL_ID_SZ 48
3055
3056/**
3057 * cpu_x86_fill_model_id:
3058 * Get CPUID model ID string from host CPU.
3059 *
3060 * @str should have at least CPUID_MODEL_ID_SZ bytes
3061 *
3062 * The function does NOT add a null terminator to the string
3063 * automatically.
3064 */
3065static int cpu_x86_fill_model_id(char *str)
3066{
3067 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3068 int i;
3069
3070 for (i = 0; i < 3; i++) {
3071 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3072 memcpy(str + i * 16 + 0, &eax, 4);
3073 memcpy(str + i * 16 + 4, &ebx, 4);
3074 memcpy(str + i * 16 + 8, &ecx, 4);
3075 memcpy(str + i * 16 + 12, &edx, 4);
3076 }
3077 return 0;
3078}
3079
3080static Property max_x86_cpu_properties[] = {
3081 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3082 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3083 DEFINE_PROP_END_OF_LIST()
3084};
3085
3086static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3087{
3088 DeviceClass *dc = DEVICE_CLASS(oc);
3089 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3090
3091 xcc->ordering = 9;
3092
3093 xcc->model_description =
3094 "Enables all features supported by the accelerator in the current host";
3095
3096 dc->props = max_x86_cpu_properties;
3097}
3098
3099static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3100
3101static void max_x86_cpu_initfn(Object *obj)
3102{
3103 X86CPU *cpu = X86_CPU(obj);
3104 CPUX86State *env = &cpu->env;
3105 KVMState *s = kvm_state;
3106
3107 /* We can't fill the features array here because we don't know yet if
3108 * "migratable" is true or false.
3109 */
3110 cpu->max_features = true;
3111
3112 if (accel_uses_host_cpuid()) {
3113 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3114 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3115 int family, model, stepping;
3116 X86CPUDefinition host_cpudef = { };
3117 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3118
3119 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
3120 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
3121
3122 host_vendor_fms(vendor, &family, &model, &stepping);
3123
3124 cpu_x86_fill_model_id(model_id);
3125
3126 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3127 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3128 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3129 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3130 &error_abort);
3131 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3132 &error_abort);
3133
3134 if (kvm_enabled()) {
3135 env->cpuid_min_level =
3136 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3137 env->cpuid_min_xlevel =
3138 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3139 env->cpuid_min_xlevel2 =
3140 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3141 } else {
3142 env->cpuid_min_level =
3143 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3144 env->cpuid_min_xlevel =
3145 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3146 env->cpuid_min_xlevel2 =
3147 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3148 }
3149
3150 if (lmce_supported()) {
3151 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3152 }
3153 } else {
3154 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3155 "vendor", &error_abort);
3156 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3157 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3158 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3159 object_property_set_str(OBJECT(cpu),
3160 "QEMU TCG CPU version " QEMU_HW_VERSION,
3161 "model-id", &error_abort);
3162 }
3163
3164 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3165}
3166
3167static const TypeInfo max_x86_cpu_type_info = {
3168 .name = X86_CPU_TYPE_NAME("max"),
3169 .parent = TYPE_X86_CPU,
3170 .instance_init = max_x86_cpu_initfn,
3171 .class_init = max_x86_cpu_class_init,
3172};
3173
3174#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3175static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3176{
3177 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3178
3179 xcc->host_cpuid_required = true;
3180 xcc->ordering = 8;
3181
3182#if defined(CONFIG_KVM)
3183 xcc->model_description =
3184 "KVM processor with all supported host features ";
3185#elif defined(CONFIG_HVF)
3186 xcc->model_description =
3187 "HVF processor with all supported host features ";
3188#endif
3189}
3190
3191static const TypeInfo host_x86_cpu_type_info = {
3192 .name = X86_CPU_TYPE_NAME("host"),
3193 .parent = X86_CPU_TYPE_NAME("max"),
3194 .class_init = host_x86_cpu_class_init,
3195};
3196
3197#endif
3198
3199static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3200{
3201 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3202
3203 switch (f->type) {
3204 case CPUID_FEATURE_WORD:
3205 {
3206 const char *reg = get_register_name_32(f->cpuid.reg);
3207 assert(reg);
3208 return g_strdup_printf("CPUID.%02XH:%s",
3209 f->cpuid.eax, reg);
3210 }
3211 case MSR_FEATURE_WORD:
3212 return g_strdup_printf("MSR(%02XH)",
3213 f->msr.index);
3214 }
3215
3216 return NULL;
3217}
3218
3219static void report_unavailable_features(FeatureWord w, uint32_t mask)
3220{
3221 FeatureWordInfo *f = &feature_word_info[w];
3222 int i;
3223 char *feat_word_str;
3224
3225 for (i = 0; i < 32; ++i) {
3226 if ((1UL << i) & mask) {
3227 feat_word_str = feature_word_description(f, i);
3228 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3229 accel_uses_host_cpuid() ? "host" : "TCG",
3230 feat_word_str,
3231 f->feat_names[i] ? "." : "",
3232 f->feat_names[i] ? f->feat_names[i] : "", i);
3233 g_free(feat_word_str);
3234 }
3235 }
3236}
3237
3238static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3239 const char *name, void *opaque,
3240 Error **errp)
3241{
3242 X86CPU *cpu = X86_CPU(obj);
3243 CPUX86State *env = &cpu->env;
3244 int64_t value;
3245
3246 value = (env->cpuid_version >> 8) & 0xf;
3247 if (value == 0xf) {
3248 value += (env->cpuid_version >> 20) & 0xff;
3249 }
3250 visit_type_int(v, name, &value, errp);
3251}
3252
3253static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3254 const char *name, void *opaque,
3255 Error **errp)
3256{
3257 X86CPU *cpu = X86_CPU(obj);
3258 CPUX86State *env = &cpu->env;
3259 const int64_t min = 0;
3260 const int64_t max = 0xff + 0xf;
3261 Error *local_err = NULL;
3262 int64_t value;
3263
3264 visit_type_int(v, name, &value, &local_err);
3265 if (local_err) {
3266 error_propagate(errp, local_err);
3267 return;
3268 }
3269 if (value < min || value > max) {
3270 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3271 name ? name : "null", value, min, max);
3272 return;
3273 }
3274
3275 env->cpuid_version &= ~0xff00f00;
3276 if (value > 0x0f) {
3277 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3278 } else {
3279 env->cpuid_version |= value << 8;
3280 }
3281}
3282
3283static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3284 const char *name, void *opaque,
3285 Error **errp)
3286{
3287 X86CPU *cpu = X86_CPU(obj);
3288 CPUX86State *env = &cpu->env;
3289 int64_t value;
3290
3291 value = (env->cpuid_version >> 4) & 0xf;
3292 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3293 visit_type_int(v, name, &value, errp);
3294}
3295
3296static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3297 const char *name, void *opaque,
3298 Error **errp)
3299{
3300 X86CPU *cpu = X86_CPU(obj);
3301 CPUX86State *env = &cpu->env;
3302 const int64_t min = 0;
3303 const int64_t max = 0xff;
3304 Error *local_err = NULL;
3305 int64_t value;
3306
3307 visit_type_int(v, name, &value, &local_err);
3308 if (local_err) {
3309 error_propagate(errp, local_err);
3310 return;
3311 }
3312 if (value < min || value > max) {
3313 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3314 name ? name : "null", value, min, max);
3315 return;
3316 }
3317
3318 env->cpuid_version &= ~0xf00f0;
3319 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3320}
3321
3322static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3323 const char *name, void *opaque,
3324 Error **errp)
3325{
3326 X86CPU *cpu = X86_CPU(obj);
3327 CPUX86State *env = &cpu->env;
3328 int64_t value;
3329
3330 value = env->cpuid_version & 0xf;
3331 visit_type_int(v, name, &value, errp);
3332}
3333
3334static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3335 const char *name, void *opaque,
3336 Error **errp)
3337{
3338 X86CPU *cpu = X86_CPU(obj);
3339 CPUX86State *env = &cpu->env;
3340 const int64_t min = 0;
3341 const int64_t max = 0xf;
3342 Error *local_err = NULL;
3343 int64_t value;
3344
3345 visit_type_int(v, name, &value, &local_err);
3346 if (local_err) {
3347 error_propagate(errp, local_err);
3348 return;
3349 }
3350 if (value < min || value > max) {
3351 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3352 name ? name : "null", value, min, max);
3353 return;
3354 }
3355
3356 env->cpuid_version &= ~0xf;
3357 env->cpuid_version |= value & 0xf;
3358}
3359
3360static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3361{
3362 X86CPU *cpu = X86_CPU(obj);
3363 CPUX86State *env = &cpu->env;
3364 char *value;
3365
3366 value = g_malloc(CPUID_VENDOR_SZ + 1);
3367 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3368 env->cpuid_vendor3);
3369 return value;
3370}
3371
3372static void x86_cpuid_set_vendor(Object *obj, const char *value,
3373 Error **errp)
3374{
3375 X86CPU *cpu = X86_CPU(obj);
3376 CPUX86State *env = &cpu->env;
3377 int i;
3378
3379 if (strlen(value) != CPUID_VENDOR_SZ) {
3380 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3381 return;
3382 }
3383
3384 env->cpuid_vendor1 = 0;
3385 env->cpuid_vendor2 = 0;
3386 env->cpuid_vendor3 = 0;
3387 for (i = 0; i < 4; i++) {
3388 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3389 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3390 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3391 }
3392}
3393
3394static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3395{
3396 X86CPU *cpu = X86_CPU(obj);
3397 CPUX86State *env = &cpu->env;
3398 char *value;
3399 int i;
3400
3401 value = g_malloc(48 + 1);
3402 for (i = 0; i < 48; i++) {
3403 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3404 }
3405 value[48] = '\0';
3406 return value;
3407}
3408
3409static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3410 Error **errp)
3411{
3412 X86CPU *cpu = X86_CPU(obj);
3413 CPUX86State *env = &cpu->env;
3414 int c, len, i;
3415
3416 if (model_id == NULL) {
3417 model_id = "";
3418 }
3419 len = strlen(model_id);
3420 memset(env->cpuid_model, 0, 48);
3421 for (i = 0; i < 48; i++) {
3422 if (i >= len) {
3423 c = '\0';
3424 } else {
3425 c = (uint8_t)model_id[i];
3426 }
3427 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3428 }
3429}
3430
3431static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3432 void *opaque, Error **errp)
3433{
3434 X86CPU *cpu = X86_CPU(obj);
3435 int64_t value;
3436
3437 value = cpu->env.tsc_khz * 1000;
3438 visit_type_int(v, name, &value, errp);
3439}
3440
3441static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3442 void *opaque, Error **errp)
3443{
3444 X86CPU *cpu = X86_CPU(obj);
3445 const int64_t min = 0;
3446 const int64_t max = INT64_MAX;
3447 Error *local_err = NULL;
3448 int64_t value;
3449
3450 visit_type_int(v, name, &value, &local_err);
3451 if (local_err) {
3452 error_propagate(errp, local_err);
3453 return;
3454 }
3455 if (value < min || value > max) {
3456 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3457 name ? name : "null", value, min, max);
3458 return;
3459 }
3460
3461 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3462}
3463
3464/* Generic getter for "feature-words" and "filtered-features" properties */
3465static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3466 const char *name, void *opaque,
3467 Error **errp)
3468{
3469 uint32_t *array = (uint32_t *)opaque;
3470 FeatureWord w;
3471 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3472 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3473 X86CPUFeatureWordInfoList *list = NULL;
3474
3475 for (w = 0; w < FEATURE_WORDS; w++) {
3476 FeatureWordInfo *wi = &feature_word_info[w];
3477 /*
3478 * We didn't have MSR features when "feature-words" was
3479 * introduced. Therefore skipped other type entries.
3480 */
3481 if (wi->type != CPUID_FEATURE_WORD) {
3482 continue;
3483 }
3484 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3485 qwi->cpuid_input_eax = wi->cpuid.eax;
3486 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3487 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3488 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3489 qwi->features = array[w];
3490
3491 /* List will be in reverse order, but order shouldn't matter */
3492 list_entries[w].next = list;
3493 list_entries[w].value = &word_infos[w];
3494 list = &list_entries[w];
3495 }
3496
3497 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3498}
3499
3500static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3501 void *opaque, Error **errp)
3502{
3503 X86CPU *cpu = X86_CPU(obj);
3504 int64_t value = cpu->hyperv_spinlock_attempts;
3505
3506 visit_type_int(v, name, &value, errp);
3507}
3508
3509static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3510 void *opaque, Error **errp)
3511{
3512 const int64_t min = 0xFFF;
3513 const int64_t max = UINT_MAX;
3514 X86CPU *cpu = X86_CPU(obj);
3515 Error *err = NULL;
3516 int64_t value;
3517
3518 visit_type_int(v, name, &value, &err);
3519 if (err) {
3520 error_propagate(errp, err);
3521 return;
3522 }
3523
3524 if (value < min || value > max) {
3525 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3526 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3527 object_get_typename(obj), name ? name : "null",
3528 value, min, max);
3529 return;
3530 }
3531 cpu->hyperv_spinlock_attempts = value;
3532}
3533
3534static const PropertyInfo qdev_prop_spinlocks = {
3535 .name = "int",
3536 .get = x86_get_hv_spinlocks,
3537 .set = x86_set_hv_spinlocks,
3538};
3539
3540/* Convert all '_' in a feature string option name to '-', to make feature
3541 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3542 */
3543static inline void feat2prop(char *s)
3544{
3545 while ((s = strchr(s, '_'))) {
3546 *s = '-';
3547 }
3548}
3549
3550/* Return the feature property name for a feature flag bit */
3551static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3552{
3553 /* XSAVE components are automatically enabled by other features,
3554 * so return the original feature name instead
3555 */
3556 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3557 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3558
3559 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3560 x86_ext_save_areas[comp].bits) {
3561 w = x86_ext_save_areas[comp].feature;
3562 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3563 }
3564 }
3565
3566 assert(bitnr < 32);
3567 assert(w < FEATURE_WORDS);
3568 return feature_word_info[w].feat_names[bitnr];
3569}
3570
3571/* Compatibily hack to maintain legacy +-feat semantic,
3572 * where +-feat overwrites any feature set by
3573 * feat=on|feat even if the later is parsed after +-feat
3574 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3575 */
3576static GList *plus_features, *minus_features;
3577
3578static gint compare_string(gconstpointer a, gconstpointer b)
3579{
3580 return g_strcmp0(a, b);
3581}
3582
3583/* Parse "+feature,-feature,feature=foo" CPU feature string
3584 */
3585static void x86_cpu_parse_featurestr(const char *typename, char *features,
3586 Error **errp)
3587{
3588 char *featurestr; /* Single 'key=value" string being parsed */
3589 static bool cpu_globals_initialized;
3590 bool ambiguous = false;
3591
3592 if (cpu_globals_initialized) {
3593 return;
3594 }
3595 cpu_globals_initialized = true;
3596
3597 if (!features) {
3598 return;
3599 }
3600
3601 for (featurestr = strtok(features, ",");
3602 featurestr;
3603 featurestr = strtok(NULL, ",")) {
3604 const char *name;
3605 const char *val = NULL;
3606 char *eq = NULL;
3607 char num[32];
3608 GlobalProperty *prop;
3609
3610 /* Compatibility syntax: */
3611 if (featurestr[0] == '+') {
3612 plus_features = g_list_append(plus_features,
3613 g_strdup(featurestr + 1));
3614 continue;
3615 } else if (featurestr[0] == '-') {
3616 minus_features = g_list_append(minus_features,
3617 g_strdup(featurestr + 1));
3618 continue;
3619 }
3620
3621 eq = strchr(featurestr, '=');
3622 if (eq) {
3623 *eq++ = 0;
3624 val = eq;
3625 } else {
3626 val = "on";
3627 }
3628
3629 feat2prop(featurestr);
3630 name = featurestr;
3631
3632 if (g_list_find_custom(plus_features, name, compare_string)) {
3633 warn_report("Ambiguous CPU model string. "
3634 "Don't mix both \"+%s\" and \"%s=%s\"",
3635 name, name, val);
3636 ambiguous = true;
3637 }
3638 if (g_list_find_custom(minus_features, name, compare_string)) {
3639 warn_report("Ambiguous CPU model string. "
3640 "Don't mix both \"-%s\" and \"%s=%s\"",
3641 name, name, val);
3642 ambiguous = true;
3643 }
3644
3645 /* Special case: */
3646 if (!strcmp(name, "tsc-freq")) {
3647 int ret;
3648 uint64_t tsc_freq;
3649
3650 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3651 if (ret < 0 || tsc_freq > INT64_MAX) {
3652 error_setg(errp, "bad numerical value %s", val);
3653 return;
3654 }
3655 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3656 val = num;
3657 name = "tsc-frequency";
3658 }
3659
3660 prop = g_new0(typeof(*prop), 1);
3661 prop->driver = typename;
3662 prop->property = g_strdup(name);
3663 prop->value = g_strdup(val);
3664 qdev_prop_register_global(prop);
3665 }
3666
3667 if (ambiguous) {
3668 warn_report("Compatibility of ambiguous CPU model "
3669 "strings won't be kept on future QEMU versions");
3670 }
3671}
3672
3673static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3674static int x86_cpu_filter_features(X86CPU *cpu);
3675
3676/* Build a list with the name of all features on a feature word array */
3677static void x86_cpu_list_feature_names(FeatureWordArray features,
3678 strList **feat_names)
3679{
3680 FeatureWord w;
3681 strList **next = feat_names;
3682
3683 for (w = 0; w < FEATURE_WORDS; w++) {
3684 uint32_t filtered = features[w];
3685 int i;
3686 for (i = 0; i < 32; i++) {
3687 if (filtered & (1UL << i)) {
3688 strList *new = g_new0(strList, 1);
3689 new->value = g_strdup(x86_cpu_feature_name(w, i));
3690 *next = new;
3691 next = &new->next;
3692 }
3693 }
3694 }
3695}
3696
3697static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
3698 const char *name, void *opaque,
3699 Error **errp)
3700{
3701 X86CPU *xc = X86_CPU(obj);
3702 strList *result = NULL;
3703
3704 x86_cpu_list_feature_names(xc->filtered_features, &result);
3705 visit_type_strList(v, "unavailable-features", &result, errp);
3706}
3707
3708/* Check for missing features that may prevent the CPU class from
3709 * running using the current machine and accelerator.
3710 */
3711static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3712 strList **missing_feats)
3713{
3714 X86CPU *xc;
3715 Error *err = NULL;
3716 strList **next = missing_feats;
3717
3718 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3719 strList *new = g_new0(strList, 1);
3720 new->value = g_strdup("kvm");
3721 *missing_feats = new;
3722 return;
3723 }
3724
3725 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3726
3727 x86_cpu_expand_features(xc, &err);
3728 if (err) {
3729 /* Errors at x86_cpu_expand_features should never happen,
3730 * but in case it does, just report the model as not
3731 * runnable at all using the "type" property.
3732 */
3733 strList *new = g_new0(strList, 1);
3734 new->value = g_strdup("type");
3735 *next = new;
3736 next = &new->next;
3737 }
3738
3739 x86_cpu_filter_features(xc);
3740
3741 x86_cpu_list_feature_names(xc->filtered_features, next);
3742
3743 object_unref(OBJECT(xc));
3744}
3745
3746/* Print all cpuid feature names in featureset
3747 */
3748static void listflags(GList *features)
3749{
3750 size_t len = 0;
3751 GList *tmp;
3752
3753 for (tmp = features; tmp; tmp = tmp->next) {
3754 const char *name = tmp->data;
3755 if ((len + strlen(name) + 1) >= 75) {
3756 qemu_printf("\n");
3757 len = 0;
3758 }
3759 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3760 len += strlen(name) + 1;
3761 }
3762 qemu_printf("\n");
3763}
3764
3765/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3766static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3767{
3768 ObjectClass *class_a = (ObjectClass *)a;
3769 ObjectClass *class_b = (ObjectClass *)b;
3770 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3771 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3772 char *name_a, *name_b;
3773 int ret;
3774
3775 if (cc_a->ordering != cc_b->ordering) {
3776 ret = cc_a->ordering - cc_b->ordering;
3777 } else {
3778 name_a = x86_cpu_class_get_model_name(cc_a);
3779 name_b = x86_cpu_class_get_model_name(cc_b);
3780 ret = strcmp(name_a, name_b);
3781 g_free(name_a);
3782 g_free(name_b);
3783 }
3784 return ret;
3785}
3786
3787static GSList *get_sorted_cpu_model_list(void)
3788{
3789 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3790 list = g_slist_sort(list, x86_cpu_list_compare);
3791 return list;
3792}
3793
3794static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3795{
3796 ObjectClass *oc = data;
3797 X86CPUClass *cc = X86_CPU_CLASS(oc);
3798 char *name = x86_cpu_class_get_model_name(cc);
3799 const char *desc = cc->model_description;
3800 if (!desc && cc->cpu_def) {
3801 desc = cc->cpu_def->model_id;
3802 }
3803
3804 qemu_printf("x86 %-20s %-48s\n", name, desc);
3805 g_free(name);
3806}
3807
3808/* list available CPU models and flags */
3809void x86_cpu_list(void)
3810{
3811 int i, j;
3812 GSList *list;
3813 GList *names = NULL;
3814
3815 qemu_printf("Available CPUs:\n");
3816 list = get_sorted_cpu_model_list();
3817 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3818 g_slist_free(list);
3819
3820 names = NULL;
3821 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3822 FeatureWordInfo *fw = &feature_word_info[i];
3823 for (j = 0; j < 32; j++) {
3824 if (fw->feat_names[j]) {
3825 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3826 }
3827 }
3828 }
3829
3830 names = g_list_sort(names, (GCompareFunc)strcmp);
3831
3832 qemu_printf("\nRecognized CPUID flags:\n");
3833 listflags(names);
3834 qemu_printf("\n");
3835 g_list_free(names);
3836}
3837
3838static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3839{
3840 ObjectClass *oc = data;
3841 X86CPUClass *cc = X86_CPU_CLASS(oc);
3842 CpuDefinitionInfoList **cpu_list = user_data;
3843 CpuDefinitionInfoList *entry;
3844 CpuDefinitionInfo *info;
3845
3846 info = g_malloc0(sizeof(*info));
3847 info->name = x86_cpu_class_get_model_name(cc);
3848 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3849 info->has_unavailable_features = true;
3850 info->q_typename = g_strdup(object_class_get_name(oc));
3851 info->migration_safe = cc->migration_safe;
3852 info->has_migration_safe = true;
3853 info->q_static = cc->static_model;
3854
3855 entry = g_malloc0(sizeof(*entry));
3856 entry->value = info;
3857 entry->next = *cpu_list;
3858 *cpu_list = entry;
3859}
3860
3861CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
3862{
3863 CpuDefinitionInfoList *cpu_list = NULL;
3864 GSList *list = get_sorted_cpu_model_list();
3865 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3866 g_slist_free(list);
3867 return cpu_list;
3868}
3869
3870static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3871 bool migratable_only)
3872{
3873 FeatureWordInfo *wi = &feature_word_info[w];
3874 uint32_t r = 0;
3875
3876 if (kvm_enabled()) {
3877 switch (wi->type) {
3878 case CPUID_FEATURE_WORD:
3879 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3880 wi->cpuid.ecx,
3881 wi->cpuid.reg);
3882 break;
3883 case MSR_FEATURE_WORD:
3884 r = kvm_arch_get_supported_msr_feature(kvm_state,
3885 wi->msr.index);
3886 break;
3887 }
3888 } else if (hvf_enabled()) {
3889 if (wi->type != CPUID_FEATURE_WORD) {
3890 return 0;
3891 }
3892 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3893 wi->cpuid.ecx,
3894 wi->cpuid.reg);
3895 } else if (tcg_enabled()) {
3896 r = wi->tcg_features;
3897 } else {
3898 return ~0;
3899 }
3900 if (migratable_only) {
3901 r &= x86_cpu_get_migratable_flags(w);
3902 }
3903 return r;
3904}
3905
3906static void x86_cpu_report_filtered_features(X86CPU *cpu)
3907{
3908 FeatureWord w;
3909
3910 for (w = 0; w < FEATURE_WORDS; w++) {
3911 report_unavailable_features(w, cpu->filtered_features[w]);
3912 }
3913}
3914
3915static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3916{
3917 PropValue *pv;
3918 for (pv = props; pv->prop; pv++) {
3919 if (!pv->value) {
3920 continue;
3921 }
3922 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3923 &error_abort);
3924 }
3925}
3926
3927/* Load data from X86CPUDefinition into a X86CPU object
3928 */
3929static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3930{
3931 CPUX86State *env = &cpu->env;
3932 const char *vendor;
3933 char host_vendor[CPUID_VENDOR_SZ + 1];
3934 FeatureWord w;
3935
3936 /*NOTE: any property set by this function should be returned by
3937 * x86_cpu_static_props(), so static expansion of
3938 * query-cpu-model-expansion is always complete.
3939 */
3940
3941 /* CPU models only set _minimum_ values for level/xlevel: */
3942 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3943 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3944
3945 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3946 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3947 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3948 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3949 for (w = 0; w < FEATURE_WORDS; w++) {
3950 env->features[w] = def->features[w];
3951 }
3952
3953 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3954 cpu->legacy_cache = !def->cache_info;
3955
3956 /* Special cases not set in the X86CPUDefinition structs: */
3957 /* TODO: in-kernel irqchip for hvf */
3958 if (kvm_enabled()) {
3959 if (!kvm_irqchip_in_kernel()) {
3960 x86_cpu_change_kvm_default("x2apic", "off");
3961 }
3962
3963 x86_cpu_apply_props(cpu, kvm_default_props);
3964 } else if (tcg_enabled()) {
3965 x86_cpu_apply_props(cpu, tcg_default_props);
3966 }
3967
3968 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3969
3970 /* sysenter isn't supported in compatibility mode on AMD,
3971 * syscall isn't supported in compatibility mode on Intel.
3972 * Normally we advertise the actual CPU vendor, but you can
3973 * override this using the 'vendor' property if you want to use
3974 * KVM's sysenter/syscall emulation in compatibility mode and
3975 * when doing cross vendor migration
3976 */
3977 vendor = def->vendor;
3978 if (accel_uses_host_cpuid()) {
3979 uint32_t ebx = 0, ecx = 0, edx = 0;
3980 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3981 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3982 vendor = host_vendor;
3983 }
3984
3985 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3986
3987}
3988
3989#ifndef CONFIG_USER_ONLY
3990/* Return a QDict containing keys for all properties that can be included
3991 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3992 * must be included in the dictionary.
3993 */
3994static QDict *x86_cpu_static_props(void)
3995{
3996 FeatureWord w;
3997 int i;
3998 static const char *props[] = {
3999 "min-level",
4000 "min-xlevel",
4001 "family",
4002 "model",
4003 "stepping",
4004 "model-id",
4005 "vendor",
4006 "lmce",
4007 NULL,
4008 };
4009 static QDict *d;
4010
4011 if (d) {
4012 return d;
4013 }
4014
4015 d = qdict_new();
4016 for (i = 0; props[i]; i++) {
4017 qdict_put_null(d, props[i]);
4018 }
4019
4020 for (w = 0; w < FEATURE_WORDS; w++) {
4021 FeatureWordInfo *fi = &feature_word_info[w];
4022 int bit;
4023 for (bit = 0; bit < 32; bit++) {
4024 if (!fi->feat_names[bit]) {
4025 continue;
4026 }
4027 qdict_put_null(d, fi->feat_names[bit]);
4028 }
4029 }
4030
4031 return d;
4032}
4033
4034/* Add an entry to @props dict, with the value for property. */
4035static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
4036{
4037 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
4038 &error_abort);
4039
4040 qdict_put_obj(props, prop, value);
4041}
4042
4043/* Convert CPU model data from X86CPU object to a property dictionary
4044 * that can recreate exactly the same CPU model.
4045 */
4046static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
4047{
4048 QDict *sprops = x86_cpu_static_props();
4049 const QDictEntry *e;
4050
4051 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
4052 const char *prop = qdict_entry_key(e);
4053 x86_cpu_expand_prop(cpu, props, prop);
4054 }
4055}
4056
4057/* Convert CPU model data from X86CPU object to a property dictionary
4058 * that can recreate exactly the same CPU model, including every
4059 * writeable QOM property.
4060 */
4061static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
4062{
4063 ObjectPropertyIterator iter;
4064 ObjectProperty *prop;
4065
4066 object_property_iter_init(&iter, OBJECT(cpu));
4067 while ((prop = object_property_iter_next(&iter))) {
4068 /* skip read-only or write-only properties */
4069 if (!prop->get || !prop->set) {
4070 continue;
4071 }
4072
4073 /* "hotplugged" is the only property that is configurable
4074 * on the command-line but will be set differently on CPUs
4075 * created using "-cpu ... -smp ..." and by CPUs created
4076 * on the fly by x86_cpu_from_model() for querying. Skip it.
4077 */
4078 if (!strcmp(prop->name, "hotplugged")) {
4079 continue;
4080 }
4081 x86_cpu_expand_prop(cpu, props, prop->name);
4082 }
4083}
4084
4085static void object_apply_props(Object *obj, QDict *props, Error **errp)
4086{
4087 const QDictEntry *prop;
4088 Error *err = NULL;
4089
4090 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4091 object_property_set_qobject(obj, qdict_entry_value(prop),
4092 qdict_entry_key(prop), &err);
4093 if (err) {
4094 break;
4095 }
4096 }
4097
4098 error_propagate(errp, err);
4099}
4100
4101/* Create X86CPU object according to model+props specification */
4102static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4103{
4104 X86CPU *xc = NULL;
4105 X86CPUClass *xcc;
4106 Error *err = NULL;
4107
4108 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4109 if (xcc == NULL) {
4110 error_setg(&err, "CPU model '%s' not found", model);
4111 goto out;
4112 }
4113
4114 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4115 if (props) {
4116 object_apply_props(OBJECT(xc), props, &err);
4117 if (err) {
4118 goto out;
4119 }
4120 }
4121
4122 x86_cpu_expand_features(xc, &err);
4123 if (err) {
4124 goto out;
4125 }
4126
4127out:
4128 if (err) {
4129 error_propagate(errp, err);
4130 object_unref(OBJECT(xc));
4131 xc = NULL;
4132 }
4133 return xc;
4134}
4135
4136CpuModelExpansionInfo *
4137qmp_query_cpu_model_expansion(CpuModelExpansionType type,
4138 CpuModelInfo *model,
4139 Error **errp)
4140{
4141 X86CPU *xc = NULL;
4142 Error *err = NULL;
4143 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4144 QDict *props = NULL;
4145 const char *base_name;
4146
4147 xc = x86_cpu_from_model(model->name,
4148 model->has_props ?
4149 qobject_to(QDict, model->props) :
4150 NULL, &err);
4151 if (err) {
4152 goto out;
4153 }
4154
4155 props = qdict_new();
4156 ret->model = g_new0(CpuModelInfo, 1);
4157 ret->model->props = QOBJECT(props);
4158 ret->model->has_props = true;
4159
4160 switch (type) {
4161 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4162 /* Static expansion will be based on "base" only */
4163 base_name = "base";
4164 x86_cpu_to_dict(xc, props);
4165 break;
4166 case CPU_MODEL_EXPANSION_TYPE_FULL:
4167 /* As we don't return every single property, full expansion needs
4168 * to keep the original model name+props, and add extra
4169 * properties on top of that.
4170 */
4171 base_name = model->name;
4172 x86_cpu_to_dict_full(xc, props);
4173 break;
4174 default:
4175 error_setg(&err, "Unsupported expansion type");
4176 goto out;
4177 }
4178
4179 x86_cpu_to_dict(xc, props);
4180
4181 ret->model->name = g_strdup(base_name);
4182
4183out:
4184 object_unref(OBJECT(xc));
4185 if (err) {
4186 error_propagate(errp, err);
4187 qapi_free_CpuModelExpansionInfo(ret);
4188 ret = NULL;
4189 }
4190 return ret;
4191}
4192#endif /* !CONFIG_USER_ONLY */
4193
4194static gchar *x86_gdb_arch_name(CPUState *cs)
4195{
4196#ifdef TARGET_X86_64
4197 return g_strdup("i386:x86-64");
4198#else
4199 return g_strdup("i386");
4200#endif
4201}
4202
4203static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4204{
4205 X86CPUDefinition *cpudef = data;
4206 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4207
4208 xcc->cpu_def = cpudef;
4209 xcc->migration_safe = true;
4210}
4211
4212static void x86_register_cpudef_type(X86CPUDefinition *def)
4213{
4214 char *typename = x86_cpu_type_name(def->name);
4215 TypeInfo ti = {
4216 .name = typename,
4217 .parent = TYPE_X86_CPU,
4218 .class_init = x86_cpu_cpudef_class_init,
4219 .class_data = def,
4220 };
4221
4222 /* AMD aliases are handled at runtime based on CPUID vendor, so
4223 * they shouldn't be set on the CPU model table.
4224 */
4225 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4226 /* catch mistakes instead of silently truncating model_id when too long */
4227 assert(def->model_id && strlen(def->model_id) <= 48);
4228
4229
4230 type_register(&ti);
4231 g_free(typename);
4232}
4233
4234#if !defined(CONFIG_USER_ONLY)
4235
4236void cpu_clear_apic_feature(CPUX86State *env)
4237{
4238 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4239}
4240
4241#endif /* !CONFIG_USER_ONLY */
4242
4243void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4244 uint32_t *eax, uint32_t *ebx,
4245 uint32_t *ecx, uint32_t *edx)
4246{
4247 X86CPU *cpu = env_archcpu(env);
4248 CPUState *cs = env_cpu(env);
4249 uint32_t pkg_offset;
4250 uint32_t limit;
4251 uint32_t signature[3];
4252
4253 /* Calculate & apply limits for different index ranges */
4254 if (index >= 0xC0000000) {
4255 limit = env->cpuid_xlevel2;
4256 } else if (index >= 0x80000000) {
4257 limit = env->cpuid_xlevel;
4258 } else if (index >= 0x40000000) {
4259 limit = 0x40000001;
4260 } else {
4261 limit = env->cpuid_level;
4262 }
4263
4264 if (index > limit) {
4265 /* Intel documentation states that invalid EAX input will
4266 * return the same information as EAX=cpuid_level
4267 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4268 */
4269 index = env->cpuid_level;
4270 }
4271
4272 switch(index) {
4273 case 0:
4274 *eax = env->cpuid_level;
4275 *ebx = env->cpuid_vendor1;
4276 *edx = env->cpuid_vendor2;
4277 *ecx = env->cpuid_vendor3;
4278 break;
4279 case 1:
4280 *eax = env->cpuid_version;
4281 *ebx = (cpu->apic_id << 24) |
4282 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4283 *ecx = env->features[FEAT_1_ECX];
4284 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4285 *ecx |= CPUID_EXT_OSXSAVE;
4286 }
4287 *edx = env->features[FEAT_1_EDX];
4288 if (cs->nr_cores * cs->nr_threads > 1) {
4289 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4290 *edx |= CPUID_HT;
4291 }
4292 break;
4293 case 2:
4294 /* cache info: needed for Pentium Pro compatibility */
4295 if (cpu->cache_info_passthrough) {
4296 host_cpuid(index, 0, eax, ebx, ecx, edx);
4297 break;
4298 }
4299 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4300 *ebx = 0;
4301 if (!cpu->enable_l3_cache) {
4302 *ecx = 0;
4303 } else {
4304 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4305 }
4306 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4307 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4308 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4309 break;
4310 case 4:
4311 /* cache info: needed for Core compatibility */
4312 if (cpu->cache_info_passthrough) {
4313 host_cpuid(index, count, eax, ebx, ecx, edx);
4314 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4315 *eax &= ~0xFC000000;
4316 if ((*eax & 31) && cs->nr_cores > 1) {
4317 *eax |= (cs->nr_cores - 1) << 26;
4318 }
4319 } else {
4320 *eax = 0;
4321 switch (count) {
4322 case 0: /* L1 dcache info */
4323 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4324 1, cs->nr_cores,
4325 eax, ebx, ecx, edx);
4326 break;
4327 case 1: /* L1 icache info */
4328 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4329 1, cs->nr_cores,
4330 eax, ebx, ecx, edx);
4331 break;
4332 case 2: /* L2 cache info */
4333 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4334 cs->nr_threads, cs->nr_cores,
4335 eax, ebx, ecx, edx);
4336 break;
4337 case 3: /* L3 cache info */
4338 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4339 if (cpu->enable_l3_cache) {
4340 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4341 (1 << pkg_offset), cs->nr_cores,
4342 eax, ebx, ecx, edx);
4343 break;
4344 }
4345 /* fall through */
4346 default: /* end of info */
4347 *eax = *ebx = *ecx = *edx = 0;
4348 break;
4349 }
4350 }
4351 break;
4352 case 5:
4353 /* MONITOR/MWAIT Leaf */
4354 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4355 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4356 *ecx = cpu->mwait.ecx; /* flags */
4357 *edx = cpu->mwait.edx; /* mwait substates */
4358 break;
4359 case 6:
4360 /* Thermal and Power Leaf */
4361 *eax = env->features[FEAT_6_EAX];
4362 *ebx = 0;
4363 *ecx = 0;
4364 *edx = 0;
4365 break;
4366 case 7:
4367 /* Structured Extended Feature Flags Enumeration Leaf */
4368 if (count == 0) {
4369 *eax = 0; /* Maximum ECX value for sub-leaves */
4370 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4371 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4372 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4373 *ecx |= CPUID_7_0_ECX_OSPKE;
4374 }
4375 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4376 } else {
4377 *eax = 0;
4378 *ebx = 0;
4379 *ecx = 0;
4380 *edx = 0;
4381 }
4382 break;
4383 case 9:
4384 /* Direct Cache Access Information Leaf */
4385 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4386 *ebx = 0;
4387 *ecx = 0;
4388 *edx = 0;
4389 break;
4390 case 0xA:
4391 /* Architectural Performance Monitoring Leaf */
4392 if (kvm_enabled() && cpu->enable_pmu) {
4393 KVMState *s = cs->kvm_state;
4394
4395 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4396 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4397 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4398 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4399 } else if (hvf_enabled() && cpu->enable_pmu) {
4400 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4401 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4402 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4403 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4404 } else {
4405 *eax = 0;
4406 *ebx = 0;
4407 *ecx = 0;
4408 *edx = 0;
4409 }
4410 break;
4411 case 0xB:
4412 /* Extended Topology Enumeration Leaf */
4413 if (!cpu->enable_cpuid_0xb) {
4414 *eax = *ebx = *ecx = *edx = 0;
4415 break;
4416 }
4417
4418 *ecx = count & 0xff;
4419 *edx = cpu->apic_id;
4420
4421 switch (count) {
4422 case 0:
4423 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4424 *ebx = cs->nr_threads;
4425 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4426 break;
4427 case 1:
4428 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4429 *ebx = cs->nr_cores * cs->nr_threads;
4430 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4431 break;
4432 default:
4433 *eax = 0;
4434 *ebx = 0;
4435 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4436 }
4437
4438 assert(!(*eax & ~0x1f));
4439 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4440 break;
4441 case 0xD: {
4442 /* Processor Extended State */
4443 *eax = 0;
4444 *ebx = 0;
4445 *ecx = 0;
4446 *edx = 0;
4447 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4448 break;
4449 }
4450
4451 if (count == 0) {
4452 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4453 *eax = env->features[FEAT_XSAVE_COMP_LO];
4454 *edx = env->features[FEAT_XSAVE_COMP_HI];
4455 *ebx = xsave_area_size(env->xcr0);
4456 } else if (count == 1) {
4457 *eax = env->features[FEAT_XSAVE];
4458 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4459 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4460 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4461 *eax = esa->size;
4462 *ebx = esa->offset;
4463 }
4464 }
4465 break;
4466 }
4467 case 0x14: {
4468 /* Intel Processor Trace Enumeration */
4469 *eax = 0;
4470 *ebx = 0;
4471 *ecx = 0;
4472 *edx = 0;
4473 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4474 !kvm_enabled()) {
4475 break;
4476 }
4477
4478 if (count == 0) {
4479 *eax = INTEL_PT_MAX_SUBLEAF;
4480 *ebx = INTEL_PT_MINIMAL_EBX;
4481 *ecx = INTEL_PT_MINIMAL_ECX;
4482 } else if (count == 1) {
4483 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4484 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4485 }
4486 break;
4487 }
4488 case 0x40000000:
4489 /*
4490 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4491 * set here, but we restrict to TCG none the less.
4492 */
4493 if (tcg_enabled() && cpu->expose_tcg) {
4494 memcpy(signature, "TCGTCGTCGTCG", 12);
4495 *eax = 0x40000001;
4496 *ebx = signature[0];
4497 *ecx = signature[1];
4498 *edx = signature[2];
4499 } else {
4500 *eax = 0;
4501 *ebx = 0;
4502 *ecx = 0;
4503 *edx = 0;
4504 }
4505 break;
4506 case 0x40000001:
4507 *eax = 0;
4508 *ebx = 0;
4509 *ecx = 0;
4510 *edx = 0;
4511 break;
4512 case 0x80000000:
4513 *eax = env->cpuid_xlevel;
4514 *ebx = env->cpuid_vendor1;
4515 *edx = env->cpuid_vendor2;
4516 *ecx = env->cpuid_vendor3;
4517 break;
4518 case 0x80000001:
4519 *eax = env->cpuid_version;
4520 *ebx = 0;
4521 *ecx = env->features[FEAT_8000_0001_ECX];
4522 *edx = env->features[FEAT_8000_0001_EDX];
4523
4524 /* The Linux kernel checks for the CMPLegacy bit and
4525 * discards multiple thread information if it is set.
4526 * So don't set it here for Intel to make Linux guests happy.
4527 */
4528 if (cs->nr_cores * cs->nr_threads > 1) {
4529 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4530 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4531 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4532 *ecx |= 1 << 1; /* CmpLegacy bit */
4533 }
4534 }
4535 break;
4536 case 0x80000002:
4537 case 0x80000003:
4538 case 0x80000004:
4539 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4540 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4541 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4542 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4543 break;
4544 case 0x80000005:
4545 /* cache info (L1 cache) */
4546 if (cpu->cache_info_passthrough) {
4547 host_cpuid(index, 0, eax, ebx, ecx, edx);
4548 break;
4549 }
4550 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4551 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4552 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4553 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4554 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4555 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4556 break;
4557 case 0x80000006:
4558 /* cache info (L2 cache) */
4559 if (cpu->cache_info_passthrough) {
4560 host_cpuid(index, 0, eax, ebx, ecx, edx);
4561 break;
4562 }
4563 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4564 (L2_DTLB_2M_ENTRIES << 16) | \
4565 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4566 (L2_ITLB_2M_ENTRIES);
4567 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4568 (L2_DTLB_4K_ENTRIES << 16) | \
4569 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4570 (L2_ITLB_4K_ENTRIES);
4571 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4572 cpu->enable_l3_cache ?
4573 env->cache_info_amd.l3_cache : NULL,
4574 ecx, edx);
4575 break;
4576 case 0x80000007:
4577 *eax = 0;
4578 *ebx = 0;
4579 *ecx = 0;
4580 *edx = env->features[FEAT_8000_0007_EDX];
4581 break;
4582 case 0x80000008:
4583 /* virtual & phys address size in low 2 bytes. */
4584 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4585 /* 64 bit processor */
4586 *eax = cpu->phys_bits; /* configurable physical bits */
4587 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4588 *eax |= 0x00003900; /* 57 bits virtual */
4589 } else {
4590 *eax |= 0x00003000; /* 48 bits virtual */
4591 }
4592 } else {
4593 *eax = cpu->phys_bits;
4594 }
4595 *ebx = env->features[FEAT_8000_0008_EBX];
4596 *ecx = 0;
4597 *edx = 0;
4598 if (cs->nr_cores * cs->nr_threads > 1) {
4599 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4600 }
4601 break;
4602 case 0x8000000A:
4603 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4604 *eax = 0x00000001; /* SVM Revision */
4605 *ebx = 0x00000010; /* nr of ASIDs */
4606 *ecx = 0;
4607 *edx = env->features[FEAT_SVM]; /* optional features */
4608 } else {
4609 *eax = 0;
4610 *ebx = 0;
4611 *ecx = 0;
4612 *edx = 0;
4613 }
4614 break;
4615 case 0x8000001D:
4616 *eax = 0;
4617 if (cpu->cache_info_passthrough) {
4618 host_cpuid(index, count, eax, ebx, ecx, edx);
4619 break;
4620 }
4621 switch (count) {
4622 case 0: /* L1 dcache info */
4623 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4624 eax, ebx, ecx, edx);
4625 break;
4626 case 1: /* L1 icache info */
4627 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4628 eax, ebx, ecx, edx);
4629 break;
4630 case 2: /* L2 cache info */
4631 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4632 eax, ebx, ecx, edx);
4633 break;
4634 case 3: /* L3 cache info */
4635 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4636 eax, ebx, ecx, edx);
4637 break;
4638 default: /* end of info */
4639 *eax = *ebx = *ecx = *edx = 0;
4640 break;
4641 }
4642 break;
4643 case 0x8000001E:
4644 assert(cpu->core_id <= 255);
4645 encode_topo_cpuid8000001e(cs, cpu,
4646 eax, ebx, ecx, edx);
4647 break;
4648 case 0xC0000000:
4649 *eax = env->cpuid_xlevel2;
4650 *ebx = 0;
4651 *ecx = 0;
4652 *edx = 0;
4653 break;
4654 case 0xC0000001:
4655 /* Support for VIA CPU's CPUID instruction */
4656 *eax = env->cpuid_version;
4657 *ebx = 0;
4658 *ecx = 0;
4659 *edx = env->features[FEAT_C000_0001_EDX];
4660 break;
4661 case 0xC0000002:
4662 case 0xC0000003:
4663 case 0xC0000004:
4664 /* Reserved for the future, and now filled with zero */
4665 *eax = 0;
4666 *ebx = 0;
4667 *ecx = 0;
4668 *edx = 0;
4669 break;
4670 case 0x8000001F:
4671 *eax = sev_enabled() ? 0x2 : 0;
4672 *ebx = sev_get_cbit_position();
4673 *ebx |= sev_get_reduced_phys_bits() << 6;
4674 *ecx = 0;
4675 *edx = 0;
4676 break;
4677 default:
4678 /* reserved values: zero */
4679 *eax = 0;
4680 *ebx = 0;
4681 *ecx = 0;
4682 *edx = 0;
4683 break;
4684 }
4685}
4686
4687/* CPUClass::reset() */
4688static void x86_cpu_reset(CPUState *s)
4689{
4690 X86CPU *cpu = X86_CPU(s);
4691 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4692 CPUX86State *env = &cpu->env;
4693 target_ulong cr4;
4694 uint64_t xcr0;
4695 int i;
4696
4697 xcc->parent_reset(s);
4698
4699 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4700
4701 env->old_exception = -1;
4702
4703 /* init to reset state */
4704
4705 env->hflags2 |= HF2_GIF_MASK;
4706
4707 cpu_x86_update_cr0(env, 0x60000010);
4708 env->a20_mask = ~0x0;
4709 env->smbase = 0x30000;
4710 env->msr_smi_count = 0;
4711
4712 env->idt.limit = 0xffff;
4713 env->gdt.limit = 0xffff;
4714 env->ldt.limit = 0xffff;
4715 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4716 env->tr.limit = 0xffff;
4717 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4718
4719 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4720 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4721 DESC_R_MASK | DESC_A_MASK);
4722 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4723 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4724 DESC_A_MASK);
4725 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4726 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4727 DESC_A_MASK);
4728 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4729 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4730 DESC_A_MASK);
4731 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4732 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4733 DESC_A_MASK);
4734 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4735 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4736 DESC_A_MASK);
4737
4738 env->eip = 0xfff0;
4739 env->regs[R_EDX] = env->cpuid_version;
4740
4741 env->eflags = 0x2;
4742
4743 /* FPU init */
4744 for (i = 0; i < 8; i++) {
4745 env->fptags[i] = 1;
4746 }
4747 cpu_set_fpuc(env, 0x37f);
4748
4749 env->mxcsr = 0x1f80;
4750 /* All units are in INIT state. */
4751 env->xstate_bv = 0;
4752
4753 env->pat = 0x0007040600070406ULL;
4754 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4755 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
4756 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
4757 }
4758
4759 memset(env->dr, 0, sizeof(env->dr));
4760 env->dr[6] = DR6_FIXED_1;
4761 env->dr[7] = DR7_FIXED_1;
4762 cpu_breakpoint_remove_all(s, BP_CPU);
4763 cpu_watchpoint_remove_all(s, BP_CPU);
4764
4765 cr4 = 0;
4766 xcr0 = XSTATE_FP_MASK;
4767
4768#ifdef CONFIG_USER_ONLY
4769 /* Enable all the features for user-mode. */
4770 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4771 xcr0 |= XSTATE_SSE_MASK;
4772 }
4773 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4774 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4775 if (env->features[esa->feature] & esa->bits) {
4776 xcr0 |= 1ull << i;
4777 }
4778 }
4779
4780 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4781 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4782 }
4783 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4784 cr4 |= CR4_FSGSBASE_MASK;
4785 }
4786#endif
4787
4788 env->xcr0 = xcr0;
4789 cpu_x86_update_cr4(env, cr4);
4790
4791 /*
4792 * SDM 11.11.5 requires:
4793 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4794 * - IA32_MTRR_PHYSMASKn.V = 0
4795 * All other bits are undefined. For simplification, zero it all.
4796 */
4797 env->mtrr_deftype = 0;
4798 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4799 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4800
4801 env->interrupt_injected = -1;
4802 env->exception_injected = -1;
4803 env->nmi_injected = false;
4804#if !defined(CONFIG_USER_ONLY)
4805 /* We hard-wire the BSP to the first CPU. */
4806 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4807
4808 s->halted = !cpu_is_bsp(cpu);
4809
4810 if (kvm_enabled()) {
4811 kvm_arch_reset_vcpu(cpu);
4812 }
4813 else if (hvf_enabled()) {
4814 hvf_reset_vcpu(s);
4815 }
4816#endif
4817}
4818
4819#ifndef CONFIG_USER_ONLY
4820bool cpu_is_bsp(X86CPU *cpu)
4821{
4822 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4823}
4824
4825/* TODO: remove me, when reset over QOM tree is implemented */
4826static void x86_cpu_machine_reset_cb(void *opaque)
4827{
4828 X86CPU *cpu = opaque;
4829 cpu_reset(CPU(cpu));
4830}
4831#endif
4832
4833static void mce_init(X86CPU *cpu)
4834{
4835 CPUX86State *cenv = &cpu->env;
4836 unsigned int bank;
4837
4838 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4839 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4840 (CPUID_MCE | CPUID_MCA)) {
4841 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4842 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4843 cenv->mcg_ctl = ~(uint64_t)0;
4844 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4845 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4846 }
4847 }
4848}
4849
4850#ifndef CONFIG_USER_ONLY
4851APICCommonClass *apic_get_class(void)
4852{
4853 const char *apic_type = "apic";
4854
4855 /* TODO: in-kernel irqchip for hvf */
4856 if (kvm_apic_in_kernel()) {
4857 apic_type = "kvm-apic";
4858 } else if (xen_enabled()) {
4859 apic_type = "xen-apic";
4860 }
4861
4862 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4863}
4864
4865static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4866{
4867 APICCommonState *apic;
4868 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4869
4870 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4871
4872 object_property_add_child(OBJECT(cpu), "lapic",
4873 OBJECT(cpu->apic_state), &error_abort);
4874 object_unref(OBJECT(cpu->apic_state));
4875
4876 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4877 /* TODO: convert to link<> */
4878 apic = APIC_COMMON(cpu->apic_state);
4879 apic->cpu = cpu;
4880 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4881}
4882
4883static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4884{
4885 APICCommonState *apic;
4886 static bool apic_mmio_map_once;
4887
4888 if (cpu->apic_state == NULL) {
4889 return;
4890 }
4891 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4892 errp);
4893
4894 /* Map APIC MMIO area */
4895 apic = APIC_COMMON(cpu->apic_state);
4896 if (!apic_mmio_map_once) {
4897 memory_region_add_subregion_overlap(get_system_memory(),
4898 apic->apicbase &
4899 MSR_IA32_APICBASE_BASE,
4900 &apic->io_memory,
4901 0x1000);
4902 apic_mmio_map_once = true;
4903 }
4904}
4905
4906static void x86_cpu_machine_done(Notifier *n, void *unused)
4907{
4908 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4909 MemoryRegion *smram =
4910 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4911
4912 if (smram) {
4913 cpu->smram = g_new(MemoryRegion, 1);
4914 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4915 smram, 0, 1ull << 32);
4916 memory_region_set_enabled(cpu->smram, true);
4917 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4918 }
4919}
4920#else
4921static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4922{
4923}
4924#endif
4925
4926/* Note: Only safe for use on x86(-64) hosts */
4927static uint32_t x86_host_phys_bits(void)
4928{
4929 uint32_t eax;
4930 uint32_t host_phys_bits;
4931
4932 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4933 if (eax >= 0x80000008) {
4934 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4935 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4936 * at 23:16 that can specify a maximum physical address bits for
4937 * the guest that can override this value; but I've not seen
4938 * anything with that set.
4939 */
4940 host_phys_bits = eax & 0xff;
4941 } else {
4942 /* It's an odd 64 bit machine that doesn't have the leaf for
4943 * physical address bits; fall back to 36 that's most older
4944 * Intel.
4945 */
4946 host_phys_bits = 36;
4947 }
4948
4949 return host_phys_bits;
4950}
4951
4952static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4953{
4954 if (*min < value) {
4955 *min = value;
4956 }
4957}
4958
4959/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4960static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4961{
4962 CPUX86State *env = &cpu->env;
4963 FeatureWordInfo *fi = &feature_word_info[w];
4964 uint32_t eax = fi->cpuid.eax;
4965 uint32_t region = eax & 0xF0000000;
4966
4967 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
4968 if (!env->features[w]) {
4969 return;
4970 }
4971
4972 switch (region) {
4973 case 0x00000000:
4974 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4975 break;
4976 case 0x80000000:
4977 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4978 break;
4979 case 0xC0000000:
4980 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4981 break;
4982 }
4983}
4984
4985/* Calculate XSAVE components based on the configured CPU feature flags */
4986static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4987{
4988 CPUX86State *env = &cpu->env;
4989 int i;
4990 uint64_t mask;
4991
4992 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4993 return;
4994 }
4995
4996 mask = 0;
4997 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4998 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4999 if (env->features[esa->feature] & esa->bits) {
5000 mask |= (1ULL << i);
5001 }
5002 }
5003
5004 env->features[FEAT_XSAVE_COMP_LO] = mask;
5005 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
5006}
5007
5008/***** Steps involved on loading and filtering CPUID data
5009 *
5010 * When initializing and realizing a CPU object, the steps
5011 * involved in setting up CPUID data are:
5012 *
5013 * 1) Loading CPU model definition (X86CPUDefinition). This is
5014 * implemented by x86_cpu_load_def() and should be completely
5015 * transparent, as it is done automatically by instance_init.
5016 * No code should need to look at X86CPUDefinition structs
5017 * outside instance_init.
5018 *
5019 * 2) CPU expansion. This is done by realize before CPUID
5020 * filtering, and will make sure host/accelerator data is
5021 * loaded for CPU models that depend on host capabilities
5022 * (e.g. "host"). Done by x86_cpu_expand_features().
5023 *
5024 * 3) CPUID filtering. This initializes extra data related to
5025 * CPUID, and checks if the host supports all capabilities
5026 * required by the CPU. Runnability of a CPU model is
5027 * determined at this step. Done by x86_cpu_filter_features().
5028 *
5029 * Some operations don't require all steps to be performed.
5030 * More precisely:
5031 *
5032 * - CPU instance creation (instance_init) will run only CPU
5033 * model loading. CPU expansion can't run at instance_init-time
5034 * because host/accelerator data may be not available yet.
5035 * - CPU realization will perform both CPU model expansion and CPUID
5036 * filtering, and return an error in case one of them fails.
5037 * - query-cpu-definitions needs to run all 3 steps. It needs
5038 * to run CPUID filtering, as the 'unavailable-features'
5039 * field is set based on the filtering results.
5040 * - The query-cpu-model-expansion QMP command only needs to run
5041 * CPU model loading and CPU expansion. It should not filter
5042 * any CPUID data based on host capabilities.
5043 */
5044
5045/* Expand CPU configuration data, based on configured features
5046 * and host/accelerator capabilities when appropriate.
5047 */
5048static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
5049{
5050 CPUX86State *env = &cpu->env;
5051 FeatureWord w;
5052 GList *l;
5053 Error *local_err = NULL;
5054
5055 /*TODO: Now cpu->max_features doesn't overwrite features
5056 * set using QOM properties, and we can convert
5057 * plus_features & minus_features to global properties
5058 * inside x86_cpu_parse_featurestr() too.
5059 */
5060 if (cpu->max_features) {
5061 for (w = 0; w < FEATURE_WORDS; w++) {
5062 /* Override only features that weren't set explicitly
5063 * by the user.
5064 */
5065 env->features[w] |=
5066 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
5067 ~env->user_features[w] & \
5068 ~feature_word_info[w].no_autoenable_flags;
5069 }
5070 }
5071
5072 for (l = plus_features; l; l = l->next) {
5073 const char *prop = l->data;
5074 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
5075 if (local_err) {
5076 goto out;
5077 }
5078 }
5079
5080 for (l = minus_features; l; l = l->next) {
5081 const char *prop = l->data;
5082 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5083 if (local_err) {
5084 goto out;
5085 }
5086 }
5087
5088 if (!kvm_enabled() || !cpu->expose_kvm) {
5089 env->features[FEAT_KVM] = 0;
5090 }
5091
5092 x86_cpu_enable_xsave_components(cpu);
5093
5094 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5095 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5096 if (cpu->full_cpuid_auto_level) {
5097 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5098 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5099 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5100 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5101 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5102 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5103 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5104 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5105 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5106 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5107 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5108
5109 /* Intel Processor Trace requires CPUID[0x14] */
5110 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5111 kvm_enabled() && cpu->intel_pt_auto_level) {
5112 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5113 }
5114
5115 /* SVM requires CPUID[0x8000000A] */
5116 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5117 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5118 }
5119
5120 /* SEV requires CPUID[0x8000001F] */
5121 if (sev_enabled()) {
5122 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5123 }
5124 }
5125
5126 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5127 if (env->cpuid_level == UINT32_MAX) {
5128 env->cpuid_level = env->cpuid_min_level;
5129 }
5130 if (env->cpuid_xlevel == UINT32_MAX) {
5131 env->cpuid_xlevel = env->cpuid_min_xlevel;
5132 }
5133 if (env->cpuid_xlevel2 == UINT32_MAX) {
5134 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5135 }
5136
5137out:
5138 if (local_err != NULL) {
5139 error_propagate(errp, local_err);
5140 }
5141}
5142
5143/*
5144 * Finishes initialization of CPUID data, filters CPU feature
5145 * words based on host availability of each feature.
5146 *
5147 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5148 */
5149static int x86_cpu_filter_features(X86CPU *cpu)
5150{
5151 CPUX86State *env = &cpu->env;
5152 FeatureWord w;
5153 int rv = 0;
5154
5155 for (w = 0; w < FEATURE_WORDS; w++) {
5156 uint32_t host_feat =
5157 x86_cpu_get_supported_feature_word(w, false);
5158 uint32_t requested_features = env->features[w];
5159 env->features[w] &= host_feat;
5160 cpu->filtered_features[w] = requested_features & ~env->features[w];
5161 if (cpu->filtered_features[w]) {
5162 rv = 1;
5163 }
5164 }
5165
5166 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5167 kvm_enabled()) {
5168 KVMState *s = CPU(cpu)->kvm_state;
5169 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5170 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5171 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5172 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5173 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5174
5175 if (!eax_0 ||
5176 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5177 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5178 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5179 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5180 INTEL_PT_ADDR_RANGES_NUM) ||
5181 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5182 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5183 (ecx_0 & INTEL_PT_IP_LIP)) {
5184 /*
5185 * Processor Trace capabilities aren't configurable, so if the
5186 * host can't emulate the capabilities we report on
5187 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5188 */
5189 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5190 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5191 rv = 1;
5192 }
5193 }
5194
5195 return rv;
5196}
5197
5198#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
5199 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
5200 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
5201#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
5202 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
5203 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
5204static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5205{
5206 CPUState *cs = CPU(dev);
5207 X86CPU *cpu = X86_CPU(dev);
5208 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5209 CPUX86State *env = &cpu->env;
5210 Error *local_err = NULL;
5211 static bool ht_warned;
5212
5213 if (xcc->host_cpuid_required) {
5214 if (!accel_uses_host_cpuid()) {
5215 char *name = x86_cpu_class_get_model_name(xcc);
5216 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5217 g_free(name);
5218 goto out;
5219 }
5220
5221 if (enable_cpu_pm) {
5222 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5223 &cpu->mwait.ecx, &cpu->mwait.edx);
5224 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5225 }
5226 }
5227
5228 /* mwait extended info: needed for Core compatibility */
5229 /* We always wake on interrupt even if host does not have the capability */
5230 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5231
5232 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5233 error_setg(errp, "apic-id property was not initialized properly");
5234 return;
5235 }
5236
5237 x86_cpu_expand_features(cpu, &local_err);
5238 if (local_err) {
5239 goto out;
5240 }
5241
5242 if (x86_cpu_filter_features(cpu) &&
5243 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5244 x86_cpu_report_filtered_features(cpu);
5245 if (cpu->enforce_cpuid) {
5246 error_setg(&local_err,
5247 accel_uses_host_cpuid() ?
5248 "Host doesn't support requested features" :
5249 "TCG doesn't support requested features");
5250 goto out;
5251 }
5252 }
5253
5254 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5255 * CPUID[1].EDX.
5256 */
5257 if (IS_AMD_CPU(env)) {
5258 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5259 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5260 & CPUID_EXT2_AMD_ALIASES);
5261 }
5262
5263 /* For 64bit systems think about the number of physical bits to present.
5264 * ideally this should be the same as the host; anything other than matching
5265 * the host can cause incorrect guest behaviour.
5266 * QEMU used to pick the magic value of 40 bits that corresponds to
5267 * consumer AMD devices but nothing else.
5268 */
5269 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5270 if (accel_uses_host_cpuid()) {
5271 uint32_t host_phys_bits = x86_host_phys_bits();
5272 static bool warned;
5273
5274 if (cpu->host_phys_bits) {
5275 /* The user asked for us to use the host physical bits */
5276 cpu->phys_bits = host_phys_bits;
5277 if (cpu->host_phys_bits_limit &&
5278 cpu->phys_bits > cpu->host_phys_bits_limit) {
5279 cpu->phys_bits = cpu->host_phys_bits_limit;
5280 }
5281 }
5282
5283 /* Print a warning if the user set it to a value that's not the
5284 * host value.
5285 */
5286 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5287 !warned) {
5288 warn_report("Host physical bits (%u)"
5289 " does not match phys-bits property (%u)",
5290 host_phys_bits, cpu->phys_bits);
5291 warned = true;
5292 }
5293
5294 if (cpu->phys_bits &&
5295 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5296 cpu->phys_bits < 32)) {
5297 error_setg(errp, "phys-bits should be between 32 and %u "
5298 " (but is %u)",
5299 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5300 return;
5301 }
5302 } else {
5303 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5304 error_setg(errp, "TCG only supports phys-bits=%u",
5305 TCG_PHYS_ADDR_BITS);
5306 return;
5307 }
5308 }
5309 /* 0 means it was not explicitly set by the user (or by machine
5310 * compat_props or by the host code above). In this case, the default
5311 * is the value used by TCG (40).
5312 */
5313 if (cpu->phys_bits == 0) {
5314 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5315 }
5316 } else {
5317 /* For 32 bit systems don't use the user set value, but keep
5318 * phys_bits consistent with what we tell the guest.
5319 */
5320 if (cpu->phys_bits != 0) {
5321 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5322 return;
5323 }
5324
5325 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5326 cpu->phys_bits = 36;
5327 } else {
5328 cpu->phys_bits = 32;
5329 }
5330 }
5331
5332 /* Cache information initialization */
5333 if (!cpu->legacy_cache) {
5334 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5335 char *name = x86_cpu_class_get_model_name(xcc);
5336 error_setg(errp,
5337 "CPU model '%s' doesn't support legacy-cache=off", name);
5338 g_free(name);
5339 return;
5340 }
5341 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5342 *xcc->cpu_def->cache_info;
5343 } else {
5344 /* Build legacy cache information */
5345 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5346 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5347 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5348 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5349
5350 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5351 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5352 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5353 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5354
5355 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5356 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5357 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5358 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5359 }
5360
5361
5362 cpu_exec_realizefn(cs, &local_err);
5363 if (local_err != NULL) {
5364 error_propagate(errp, local_err);
5365 return;
5366 }
5367
5368#ifndef CONFIG_USER_ONLY
5369 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5370
5371 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
5372 x86_cpu_apic_create(cpu, &local_err);
5373 if (local_err != NULL) {
5374 goto out;
5375 }
5376 }
5377#endif
5378
5379 mce_init(cpu);
5380
5381#ifndef CONFIG_USER_ONLY
5382 if (tcg_enabled()) {
5383 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5384 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5385
5386 /* Outer container... */
5387 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5388 memory_region_set_enabled(cpu->cpu_as_root, true);
5389
5390 /* ... with two regions inside: normal system memory with low
5391 * priority, and...
5392 */
5393 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5394 get_system_memory(), 0, ~0ull);
5395 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5396 memory_region_set_enabled(cpu->cpu_as_mem, true);
5397
5398 cs->num_ases = 2;
5399 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5400 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5401
5402 /* ... SMRAM with higher priority, linked from /machine/smram. */
5403 cpu->machine_done.notify = x86_cpu_machine_done;
5404 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5405 }
5406#endif
5407
5408 qemu_init_vcpu(cs);
5409
5410 /*
5411 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5412 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5413 * based on inputs (sockets,cores,threads), it is still better to give
5414 * users a warning.
5415 *
5416 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5417 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5418 */
5419 if (IS_AMD_CPU(env) &&
5420 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5421 cs->nr_threads > 1 && !ht_warned) {
5422 warn_report("This family of AMD CPU doesn't support "
5423 "hyperthreading(%d)",
5424 cs->nr_threads);
5425 error_printf("Please configure -smp options properly"
5426 " or try enabling topoext feature.\n");
5427 ht_warned = true;
5428 }
5429
5430 x86_cpu_apic_realize(cpu, &local_err);
5431 if (local_err != NULL) {
5432 goto out;
5433 }
5434 cpu_reset(cs);
5435
5436 xcc->parent_realize(dev, &local_err);
5437
5438out:
5439 if (local_err != NULL) {
5440 error_propagate(errp, local_err);
5441 return;
5442 }
5443}
5444
5445static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5446{
5447 X86CPU *cpu = X86_CPU(dev);
5448 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5449 Error *local_err = NULL;
5450
5451#ifndef CONFIG_USER_ONLY
5452 cpu_remove_sync(CPU(dev));
5453 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5454#endif
5455
5456 if (cpu->apic_state) {
5457 object_unparent(OBJECT(cpu->apic_state));
5458 cpu->apic_state = NULL;
5459 }
5460
5461 xcc->parent_unrealize(dev, &local_err);
5462 if (local_err != NULL) {
5463 error_propagate(errp, local_err);
5464 return;
5465 }
5466}
5467
5468typedef struct BitProperty {
5469 FeatureWord w;
5470 uint32_t mask;
5471} BitProperty;
5472
5473static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5474 void *opaque, Error **errp)
5475{
5476 X86CPU *cpu = X86_CPU(obj);
5477 BitProperty *fp = opaque;
5478 uint32_t f = cpu->env.features[fp->w];
5479 bool value = (f & fp->mask) == fp->mask;
5480 visit_type_bool(v, name, &value, errp);
5481}
5482
5483static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5484 void *opaque, Error **errp)
5485{
5486 DeviceState *dev = DEVICE(obj);
5487 X86CPU *cpu = X86_CPU(obj);
5488 BitProperty *fp = opaque;
5489 Error *local_err = NULL;
5490 bool value;
5491
5492 if (dev->realized) {
5493 qdev_prop_set_after_realize(dev, name, errp);
5494 return;
5495 }
5496
5497 visit_type_bool(v, name, &value, &local_err);
5498 if (local_err) {
5499 error_propagate(errp, local_err);
5500 return;
5501 }
5502
5503 if (value) {
5504 cpu->env.features[fp->w] |= fp->mask;
5505 } else {
5506 cpu->env.features[fp->w] &= ~fp->mask;
5507 }
5508 cpu->env.user_features[fp->w] |= fp->mask;
5509}
5510
5511static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5512 void *opaque)
5513{
5514 BitProperty *prop = opaque;
5515 g_free(prop);
5516}
5517
5518/* Register a boolean property to get/set a single bit in a uint32_t field.
5519 *
5520 * The same property name can be registered multiple times to make it affect
5521 * multiple bits in the same FeatureWord. In that case, the getter will return
5522 * true only if all bits are set.
5523 */
5524static void x86_cpu_register_bit_prop(X86CPU *cpu,
5525 const char *prop_name,
5526 FeatureWord w,
5527 int bitnr)
5528{
5529 BitProperty *fp;
5530 ObjectProperty *op;
5531 uint32_t mask = (1UL << bitnr);
5532
5533 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5534 if (op) {
5535 fp = op->opaque;
5536 assert(fp->w == w);
5537 fp->mask |= mask;
5538 } else {
5539 fp = g_new0(BitProperty, 1);
5540 fp->w = w;
5541 fp->mask = mask;
5542 object_property_add(OBJECT(cpu), prop_name, "bool",
5543 x86_cpu_get_bit_prop,
5544 x86_cpu_set_bit_prop,
5545 x86_cpu_release_bit_prop, fp, &error_abort);
5546 }
5547}
5548
5549static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5550 FeatureWord w,
5551 int bitnr)
5552{
5553 FeatureWordInfo *fi = &feature_word_info[w];
5554 const char *name = fi->feat_names[bitnr];
5555
5556 if (!name) {
5557 return;
5558 }
5559
5560 /* Property names should use "-" instead of "_".
5561 * Old names containing underscores are registered as aliases
5562 * using object_property_add_alias()
5563 */
5564 assert(!strchr(name, '_'));
5565 /* aliases don't use "|" delimiters anymore, they are registered
5566 * manually using object_property_add_alias() */
5567 assert(!strchr(name, '|'));
5568 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5569}
5570
5571static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5572{
5573 X86CPU *cpu = X86_CPU(cs);
5574 CPUX86State *env = &cpu->env;
5575 GuestPanicInformation *panic_info = NULL;
5576
5577 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5578 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5579
5580 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5581
5582 assert(HV_CRASH_PARAMS >= 5);
5583 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5584 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5585 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5586 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5587 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5588 }
5589
5590 return panic_info;
5591}
5592static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5593 const char *name, void *opaque,
5594 Error **errp)
5595{
5596 CPUState *cs = CPU(obj);
5597 GuestPanicInformation *panic_info;
5598
5599 if (!cs->crash_occurred) {
5600 error_setg(errp, "No crash occured");
5601 return;
5602 }
5603
5604 panic_info = x86_cpu_get_crash_info(cs);
5605 if (panic_info == NULL) {
5606 error_setg(errp, "No crash information");
5607 return;
5608 }
5609
5610 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5611 errp);
5612 qapi_free_GuestPanicInformation(panic_info);
5613}
5614
5615static void x86_cpu_initfn(Object *obj)
5616{
5617 X86CPU *cpu = X86_CPU(obj);
5618 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5619 CPUX86State *env = &cpu->env;
5620 FeatureWord w;
5621
5622 cpu_set_cpustate_pointers(cpu);
5623
5624 object_property_add(obj, "family", "int",
5625 x86_cpuid_version_get_family,
5626 x86_cpuid_version_set_family, NULL, NULL, NULL);
5627 object_property_add(obj, "model", "int",
5628 x86_cpuid_version_get_model,
5629 x86_cpuid_version_set_model, NULL, NULL, NULL);
5630 object_property_add(obj, "stepping", "int",
5631 x86_cpuid_version_get_stepping,
5632 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5633 object_property_add_str(obj, "vendor",
5634 x86_cpuid_get_vendor,
5635 x86_cpuid_set_vendor, NULL);
5636 object_property_add_str(obj, "model-id",
5637 x86_cpuid_get_model_id,
5638 x86_cpuid_set_model_id, NULL);
5639 object_property_add(obj, "tsc-frequency", "int",
5640 x86_cpuid_get_tsc_freq,
5641 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5642 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5643 x86_cpu_get_feature_words,
5644 NULL, NULL, (void *)env->features, NULL);
5645 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5646 x86_cpu_get_feature_words,
5647 NULL, NULL, (void *)cpu->filtered_features, NULL);
5648 /*
5649 * The "unavailable-features" property has the same semantics as
5650 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
5651 * QMP command: they list the features that would have prevented the
5652 * CPU from running if the "enforce" flag was set.
5653 */
5654 object_property_add(obj, "unavailable-features", "strList",
5655 x86_cpu_get_unavailable_features,
5656 NULL, NULL, NULL, &error_abort);
5657
5658 object_property_add(obj, "crash-information", "GuestPanicInformation",
5659 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5660
5661 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5662
5663 for (w = 0; w < FEATURE_WORDS; w++) {
5664 int bitnr;
5665
5666 for (bitnr = 0; bitnr < 32; bitnr++) {
5667 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5668 }
5669 }
5670
5671 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5672 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5673 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5674 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5675 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5676 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5677 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5678
5679 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5680 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5681 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5682 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5683 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5684 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5685 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5686 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5687 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5688 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5689 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5690 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5691 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5692 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5693 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5694 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5695 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5696 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5697 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5698 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5699 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5700
5701 if (xcc->cpu_def) {
5702 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5703 }
5704}
5705
5706static int64_t x86_cpu_get_arch_id(CPUState *cs)
5707{
5708 X86CPU *cpu = X86_CPU(cs);
5709
5710 return cpu->apic_id;
5711}
5712
5713static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5714{
5715 X86CPU *cpu = X86_CPU(cs);
5716
5717 return cpu->env.cr[0] & CR0_PG_MASK;
5718}
5719
5720static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5721{
5722 X86CPU *cpu = X86_CPU(cs);
5723
5724 cpu->env.eip = value;
5725}
5726
5727static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5728{
5729 X86CPU *cpu = X86_CPU(cs);
5730
5731 cpu->env.eip = tb->pc - tb->cs_base;
5732}
5733
5734int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5735{
5736 X86CPU *cpu = X86_CPU(cs);
5737 CPUX86State *env = &cpu->env;
5738
5739#if !defined(CONFIG_USER_ONLY)
5740 if (interrupt_request & CPU_INTERRUPT_POLL) {
5741 return CPU_INTERRUPT_POLL;
5742 }
5743#endif
5744 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5745 return CPU_INTERRUPT_SIPI;
5746 }
5747
5748 if (env->hflags2 & HF2_GIF_MASK) {
5749 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5750 !(env->hflags & HF_SMM_MASK)) {
5751 return CPU_INTERRUPT_SMI;
5752 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5753 !(env->hflags2 & HF2_NMI_MASK)) {
5754 return CPU_INTERRUPT_NMI;
5755 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5756 return CPU_INTERRUPT_MCE;
5757 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5758 (((env->hflags2 & HF2_VINTR_MASK) &&
5759 (env->hflags2 & HF2_HIF_MASK)) ||
5760 (!(env->hflags2 & HF2_VINTR_MASK) &&
5761 (env->eflags & IF_MASK &&
5762 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5763 return CPU_INTERRUPT_HARD;
5764#if !defined(CONFIG_USER_ONLY)
5765 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5766 (env->eflags & IF_MASK) &&
5767 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5768 return CPU_INTERRUPT_VIRQ;
5769#endif
5770 }
5771 }
5772
5773 return 0;
5774}
5775
5776static bool x86_cpu_has_work(CPUState *cs)
5777{
5778 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5779}
5780
5781static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5782{
5783 X86CPU *cpu = X86_CPU(cs);
5784 CPUX86State *env = &cpu->env;
5785
5786 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5787 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5788 : bfd_mach_i386_i8086);
5789 info->print_insn = print_insn_i386;
5790
5791 info->cap_arch = CS_ARCH_X86;
5792 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5793 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5794 : CS_MODE_16);
5795 info->cap_insn_unit = 1;
5796 info->cap_insn_split = 8;
5797}
5798
5799void x86_update_hflags(CPUX86State *env)
5800{
5801 uint32_t hflags;
5802#define HFLAG_COPY_MASK \
5803 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5804 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5805 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5806 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5807
5808 hflags = env->hflags & HFLAG_COPY_MASK;
5809 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5810 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5811 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5812 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5813 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5814
5815 if (env->cr[4] & CR4_OSFXSR_MASK) {
5816 hflags |= HF_OSFXSR_MASK;
5817 }
5818
5819 if (env->efer & MSR_EFER_LMA) {
5820 hflags |= HF_LMA_MASK;
5821 }
5822
5823 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5824 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5825 } else {
5826 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5827 (DESC_B_SHIFT - HF_CS32_SHIFT);
5828 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5829 (DESC_B_SHIFT - HF_SS32_SHIFT);
5830 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5831 !(hflags & HF_CS32_MASK)) {
5832 hflags |= HF_ADDSEG_MASK;
5833 } else {
5834 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5835 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5836 }
5837 }
5838 env->hflags = hflags;
5839}
5840
5841static Property x86_cpu_properties[] = {
5842#ifdef CONFIG_USER_ONLY
5843 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5844 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5845 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5846 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5847 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5848#else
5849 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5850 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5851 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5852 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5853#endif
5854 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5855 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5856
5857 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5858 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
5859 HYPERV_FEAT_RELAXED, 0),
5860 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
5861 HYPERV_FEAT_VAPIC, 0),
5862 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
5863 HYPERV_FEAT_TIME, 0),
5864 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
5865 HYPERV_FEAT_CRASH, 0),
5866 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
5867 HYPERV_FEAT_RESET, 0),
5868 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
5869 HYPERV_FEAT_VPINDEX, 0),
5870 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
5871 HYPERV_FEAT_RUNTIME, 0),
5872 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
5873 HYPERV_FEAT_SYNIC, 0),
5874 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
5875 HYPERV_FEAT_STIMER, 0),
5876 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
5877 HYPERV_FEAT_FREQUENCIES, 0),
5878 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
5879 HYPERV_FEAT_REENLIGHTENMENT, 0),
5880 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
5881 HYPERV_FEAT_TLBFLUSH, 0),
5882 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
5883 HYPERV_FEAT_EVMCS, 0),
5884 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
5885 HYPERV_FEAT_IPI, 0),
5886 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
5887
5888 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5889 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5890 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5891 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5892 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5893 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
5894 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5895 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5896 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5897 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5898 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5899 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5900 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5901 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5902 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5903 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5904 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5905 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5906 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5907 false),
5908 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5909 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5910 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5911 true),
5912 /*
5913 * lecacy_cache defaults to true unless the CPU model provides its
5914 * own cache information (see x86_cpu_load_def()).
5915 */
5916 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5917
5918 /*
5919 * From "Requirements for Implementing the Microsoft
5920 * Hypervisor Interface":
5921 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5922 *
5923 * "Starting with Windows Server 2012 and Windows 8, if
5924 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5925 * the hypervisor imposes no specific limit to the number of VPs.
5926 * In this case, Windows Server 2012 guest VMs may use more than
5927 * 64 VPs, up to the maximum supported number of processors applicable
5928 * to the specific Windows version being used."
5929 */
5930 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5931 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
5932 false),
5933 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
5934 true),
5935 DEFINE_PROP_END_OF_LIST()
5936};
5937
5938static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5939{
5940 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5941 CPUClass *cc = CPU_CLASS(oc);
5942 DeviceClass *dc = DEVICE_CLASS(oc);
5943
5944 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5945 &xcc->parent_realize);
5946 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5947 &xcc->parent_unrealize);
5948 dc->props = x86_cpu_properties;
5949
5950 xcc->parent_reset = cc->reset;
5951 cc->reset = x86_cpu_reset;
5952 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5953
5954 cc->class_by_name = x86_cpu_class_by_name;
5955 cc->parse_features = x86_cpu_parse_featurestr;
5956 cc->has_work = x86_cpu_has_work;
5957#ifdef CONFIG_TCG
5958 cc->do_interrupt = x86_cpu_do_interrupt;
5959 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5960#endif
5961 cc->dump_state = x86_cpu_dump_state;
5962 cc->get_crash_info = x86_cpu_get_crash_info;
5963 cc->set_pc = x86_cpu_set_pc;
5964 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5965 cc->gdb_read_register = x86_cpu_gdb_read_register;
5966 cc->gdb_write_register = x86_cpu_gdb_write_register;
5967 cc->get_arch_id = x86_cpu_get_arch_id;
5968 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5969#ifndef CONFIG_USER_ONLY
5970 cc->asidx_from_attrs = x86_asidx_from_attrs;
5971 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5972 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5973 cc->write_elf64_note = x86_cpu_write_elf64_note;
5974 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5975 cc->write_elf32_note = x86_cpu_write_elf32_note;
5976 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5977 cc->vmsd = &vmstate_x86_cpu;
5978#endif
5979 cc->gdb_arch_name = x86_gdb_arch_name;
5980#ifdef TARGET_X86_64
5981 cc->gdb_core_xml_file = "i386-64bit.xml";
5982 cc->gdb_num_core_regs = 66;
5983#else
5984 cc->gdb_core_xml_file = "i386-32bit.xml";
5985 cc->gdb_num_core_regs = 50;
5986#endif
5987#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5988 cc->debug_excp_handler = breakpoint_handler;
5989#endif
5990 cc->cpu_exec_enter = x86_cpu_exec_enter;
5991 cc->cpu_exec_exit = x86_cpu_exec_exit;
5992#ifdef CONFIG_TCG
5993 cc->tcg_initialize = tcg_x86_init;
5994 cc->tlb_fill = x86_cpu_tlb_fill;
5995#endif
5996 cc->disas_set_info = x86_disas_set_info;
5997
5998 dc->user_creatable = true;
5999}
6000
6001static const TypeInfo x86_cpu_type_info = {
6002 .name = TYPE_X86_CPU,
6003 .parent = TYPE_CPU,
6004 .instance_size = sizeof(X86CPU),
6005 .instance_init = x86_cpu_initfn,
6006 .abstract = true,
6007 .class_size = sizeof(X86CPUClass),
6008 .class_init = x86_cpu_common_class_init,
6009};
6010
6011
6012/* "base" CPU model, used by query-cpu-model-expansion */
6013static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
6014{
6015 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6016
6017 xcc->static_model = true;
6018 xcc->migration_safe = true;
6019 xcc->model_description = "base CPU model type with no features enabled";
6020 xcc->ordering = 8;
6021}
6022
6023static const TypeInfo x86_base_cpu_type_info = {
6024 .name = X86_CPU_TYPE_NAME("base"),
6025 .parent = TYPE_X86_CPU,
6026 .class_init = x86_cpu_base_class_init,
6027};
6028
6029static void x86_cpu_register_types(void)
6030{
6031 int i;
6032
6033 type_register_static(&x86_cpu_type_info);
6034 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
6035 x86_register_cpudef_type(&builtin_x86_defs[i]);
6036 }
6037 type_register_static(&max_x86_cpu_type_info);
6038 type_register_static(&x86_base_cpu_type_info);
6039#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
6040 type_register_static(&host_x86_cpu_type_info);
6041#endif
6042}
6043
6044type_init(x86_cpu_register_types)