]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - target/i386/cpu.c
target-i386: Reenable RDTSCP support on Opteron_G[345] CPU models CPU models
[mirror_qemu.git] / target / i386 / cpu.c
... / ...
CommitLineData
1/*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "qemu/units.h"
22#include "qemu/cutils.h"
23#include "qemu/bitops.h"
24
25#include "cpu.h"
26#include "exec/exec-all.h"
27#include "sysemu/kvm.h"
28#include "sysemu/hvf.h"
29#include "sysemu/cpus.h"
30#include "kvm_i386.h"
31#include "sev_i386.h"
32
33#include "qemu/error-report.h"
34#include "qemu/option.h"
35#include "qemu/config-file.h"
36#include "qapi/error.h"
37#include "qapi/qapi-visit-misc.h"
38#include "qapi/qapi-visit-run-state.h"
39#include "qapi/qmp/qdict.h"
40#include "qapi/qmp/qerror.h"
41#include "qapi/visitor.h"
42#include "qom/qom-qobject.h"
43#include "sysemu/arch_init.h"
44
45#include "standard-headers/asm-x86/kvm_para.h"
46
47#include "sysemu/sysemu.h"
48#include "hw/qdev-properties.h"
49#include "hw/i386/topology.h"
50#ifndef CONFIG_USER_ONLY
51#include "exec/address-spaces.h"
52#include "hw/hw.h"
53#include "hw/xen/xen.h"
54#include "hw/i386/apic_internal.h"
55#endif
56
57#include "disas/capstone.h"
58
59/* Helpers for building CPUID[2] descriptors: */
60
61struct CPUID2CacheDescriptorInfo {
62 enum CacheType type;
63 int level;
64 int size;
65 int line_size;
66 int associativity;
67};
68
69/*
70 * Known CPUID 2 cache descriptors.
71 * From Intel SDM Volume 2A, CPUID instruction
72 */
73struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
74 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
75 .associativity = 4, .line_size = 32, },
76 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
77 .associativity = 4, .line_size = 32, },
78 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
79 .associativity = 4, .line_size = 64, },
80 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
81 .associativity = 2, .line_size = 32, },
82 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
83 .associativity = 4, .line_size = 32, },
84 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
85 .associativity = 4, .line_size = 64, },
86 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
87 .associativity = 6, .line_size = 64, },
88 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
89 .associativity = 2, .line_size = 64, },
90 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
91 .associativity = 8, .line_size = 64, },
92 /* lines per sector is not supported cpuid2_cache_descriptor(),
93 * so descriptors 0x22, 0x23 are not included
94 */
95 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
96 .associativity = 16, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x25, 0x20 are not included
99 */
100 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
101 .associativity = 8, .line_size = 64, },
102 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
103 .associativity = 8, .line_size = 64, },
104 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
105 .associativity = 4, .line_size = 32, },
106 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
107 .associativity = 4, .line_size = 32, },
108 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
109 .associativity = 4, .line_size = 32, },
110 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
111 .associativity = 4, .line_size = 32, },
112 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
113 .associativity = 4, .line_size = 32, },
114 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
115 .associativity = 4, .line_size = 64, },
116 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
117 .associativity = 8, .line_size = 64, },
118 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
119 .associativity = 12, .line_size = 64, },
120 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
121 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
122 .associativity = 12, .line_size = 64, },
123 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
124 .associativity = 16, .line_size = 64, },
125 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
126 .associativity = 12, .line_size = 64, },
127 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
128 .associativity = 16, .line_size = 64, },
129 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
130 .associativity = 24, .line_size = 64, },
131 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
132 .associativity = 8, .line_size = 64, },
133 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
134 .associativity = 4, .line_size = 64, },
135 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
136 .associativity = 4, .line_size = 64, },
137 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
138 .associativity = 4, .line_size = 64, },
139 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
140 .associativity = 4, .line_size = 64, },
141 /* lines per sector is not supported cpuid2_cache_descriptor(),
142 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
143 */
144 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
145 .associativity = 8, .line_size = 64, },
146 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
147 .associativity = 2, .line_size = 64, },
148 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
149 .associativity = 8, .line_size = 64, },
150 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
151 .associativity = 8, .line_size = 32, },
152 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
153 .associativity = 8, .line_size = 32, },
154 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
155 .associativity = 8, .line_size = 32, },
156 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
157 .associativity = 8, .line_size = 32, },
158 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
159 .associativity = 4, .line_size = 64, },
160 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
161 .associativity = 8, .line_size = 64, },
162 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
163 .associativity = 4, .line_size = 64, },
164 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
165 .associativity = 4, .line_size = 64, },
166 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
167 .associativity = 4, .line_size = 64, },
168 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
169 .associativity = 8, .line_size = 64, },
170 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
171 .associativity = 8, .line_size = 64, },
172 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
173 .associativity = 8, .line_size = 64, },
174 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
175 .associativity = 12, .line_size = 64, },
176 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
177 .associativity = 12, .line_size = 64, },
178 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
179 .associativity = 12, .line_size = 64, },
180 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
181 .associativity = 16, .line_size = 64, },
182 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
183 .associativity = 16, .line_size = 64, },
184 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
185 .associativity = 16, .line_size = 64, },
186 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
187 .associativity = 24, .line_size = 64, },
188 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
189 .associativity = 24, .line_size = 64, },
190 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
191 .associativity = 24, .line_size = 64, },
192};
193
194/*
195 * "CPUID leaf 2 does not report cache descriptor information,
196 * use CPUID leaf 4 to query cache parameters"
197 */
198#define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
199
200/*
201 * Return a CPUID 2 cache descriptor for a given cache.
202 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
203 */
204static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
205{
206 int i;
207
208 assert(cache->size > 0);
209 assert(cache->level > 0);
210 assert(cache->line_size > 0);
211 assert(cache->associativity > 0);
212 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
213 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
214 if (d->level == cache->level && d->type == cache->type &&
215 d->size == cache->size && d->line_size == cache->line_size &&
216 d->associativity == cache->associativity) {
217 return i;
218 }
219 }
220
221 return CACHE_DESCRIPTOR_UNAVAILABLE;
222}
223
224/* CPUID Leaf 4 constants: */
225
226/* EAX: */
227#define CACHE_TYPE_D 1
228#define CACHE_TYPE_I 2
229#define CACHE_TYPE_UNIFIED 3
230
231#define CACHE_LEVEL(l) (l << 5)
232
233#define CACHE_SELF_INIT_LEVEL (1 << 8)
234
235/* EDX: */
236#define CACHE_NO_INVD_SHARING (1 << 0)
237#define CACHE_INCLUSIVE (1 << 1)
238#define CACHE_COMPLEX_IDX (1 << 2)
239
240/* Encode CacheType for CPUID[4].EAX */
241#define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
242 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
243 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
244 0 /* Invalid value */)
245
246
247/* Encode cache info for CPUID[4] */
248static void encode_cache_cpuid4(CPUCacheInfo *cache,
249 int num_apic_ids, int num_cores,
250 uint32_t *eax, uint32_t *ebx,
251 uint32_t *ecx, uint32_t *edx)
252{
253 assert(cache->size == cache->line_size * cache->associativity *
254 cache->partitions * cache->sets);
255
256 assert(num_apic_ids > 0);
257 *eax = CACHE_TYPE(cache->type) |
258 CACHE_LEVEL(cache->level) |
259 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
260 ((num_cores - 1) << 26) |
261 ((num_apic_ids - 1) << 14);
262
263 assert(cache->line_size > 0);
264 assert(cache->partitions > 0);
265 assert(cache->associativity > 0);
266 /* We don't implement fully-associative caches */
267 assert(cache->associativity < cache->sets);
268 *ebx = (cache->line_size - 1) |
269 ((cache->partitions - 1) << 12) |
270 ((cache->associativity - 1) << 22);
271
272 assert(cache->sets > 0);
273 *ecx = cache->sets - 1;
274
275 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
276 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
277 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
278}
279
280/* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
281static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
282{
283 assert(cache->size % 1024 == 0);
284 assert(cache->lines_per_tag > 0);
285 assert(cache->associativity > 0);
286 assert(cache->line_size > 0);
287 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
288 (cache->lines_per_tag << 8) | (cache->line_size);
289}
290
291#define ASSOC_FULL 0xFF
292
293/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
294#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
295 a == 2 ? 0x2 : \
296 a == 4 ? 0x4 : \
297 a == 8 ? 0x6 : \
298 a == 16 ? 0x8 : \
299 a == 32 ? 0xA : \
300 a == 48 ? 0xB : \
301 a == 64 ? 0xC : \
302 a == 96 ? 0xD : \
303 a == 128 ? 0xE : \
304 a == ASSOC_FULL ? 0xF : \
305 0 /* invalid value */)
306
307/*
308 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
309 * @l3 can be NULL.
310 */
311static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
312 CPUCacheInfo *l3,
313 uint32_t *ecx, uint32_t *edx)
314{
315 assert(l2->size % 1024 == 0);
316 assert(l2->associativity > 0);
317 assert(l2->lines_per_tag > 0);
318 assert(l2->line_size > 0);
319 *ecx = ((l2->size / 1024) << 16) |
320 (AMD_ENC_ASSOC(l2->associativity) << 12) |
321 (l2->lines_per_tag << 8) | (l2->line_size);
322
323 if (l3) {
324 assert(l3->size % (512 * 1024) == 0);
325 assert(l3->associativity > 0);
326 assert(l3->lines_per_tag > 0);
327 assert(l3->line_size > 0);
328 *edx = ((l3->size / (512 * 1024)) << 18) |
329 (AMD_ENC_ASSOC(l3->associativity) << 12) |
330 (l3->lines_per_tag << 8) | (l3->line_size);
331 } else {
332 *edx = 0;
333 }
334}
335
336/*
337 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
338 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
339 * Define the constants to build the cpu topology. Right now, TOPOEXT
340 * feature is enabled only on EPYC. So, these constants are based on
341 * EPYC supported configurations. We may need to handle the cases if
342 * these values change in future.
343 */
344/* Maximum core complexes in a node */
345#define MAX_CCX 2
346/* Maximum cores in a core complex */
347#define MAX_CORES_IN_CCX 4
348/* Maximum cores in a node */
349#define MAX_CORES_IN_NODE 8
350/* Maximum nodes in a socket */
351#define MAX_NODES_PER_SOCKET 4
352
353/*
354 * Figure out the number of nodes required to build this config.
355 * Max cores in a node is 8
356 */
357static int nodes_in_socket(int nr_cores)
358{
359 int nodes;
360
361 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
362
363 /* Hardware does not support config with 3 nodes, return 4 in that case */
364 return (nodes == 3) ? 4 : nodes;
365}
366
367/*
368 * Decide the number of cores in a core complex with the given nr_cores using
369 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
370 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
371 * L3 cache is shared across all cores in a core complex. So, this will also
372 * tell us how many cores are sharing the L3 cache.
373 */
374static int cores_in_core_complex(int nr_cores)
375{
376 int nodes;
377
378 /* Check if we can fit all the cores in one core complex */
379 if (nr_cores <= MAX_CORES_IN_CCX) {
380 return nr_cores;
381 }
382 /* Get the number of nodes required to build this config */
383 nodes = nodes_in_socket(nr_cores);
384
385 /*
386 * Divide the cores accros all the core complexes
387 * Return rounded up value
388 */
389 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
390}
391
392/* Encode cache info for CPUID[8000001D] */
393static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
394 uint32_t *eax, uint32_t *ebx,
395 uint32_t *ecx, uint32_t *edx)
396{
397 uint32_t l3_cores;
398 assert(cache->size == cache->line_size * cache->associativity *
399 cache->partitions * cache->sets);
400
401 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
402 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
403
404 /* L3 is shared among multiple cores */
405 if (cache->level == 3) {
406 l3_cores = cores_in_core_complex(cs->nr_cores);
407 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
408 } else {
409 *eax |= ((cs->nr_threads - 1) << 14);
410 }
411
412 assert(cache->line_size > 0);
413 assert(cache->partitions > 0);
414 assert(cache->associativity > 0);
415 /* We don't implement fully-associative caches */
416 assert(cache->associativity < cache->sets);
417 *ebx = (cache->line_size - 1) |
418 ((cache->partitions - 1) << 12) |
419 ((cache->associativity - 1) << 22);
420
421 assert(cache->sets > 0);
422 *ecx = cache->sets - 1;
423
424 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
425 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
426 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
427}
428
429/* Data structure to hold the configuration info for a given core index */
430struct core_topology {
431 /* core complex id of the current core index */
432 int ccx_id;
433 /*
434 * Adjusted core index for this core in the topology
435 * This can be 0,1,2,3 with max 4 cores in a core complex
436 */
437 int core_id;
438 /* Node id for this core index */
439 int node_id;
440 /* Number of nodes in this config */
441 int num_nodes;
442};
443
444/*
445 * Build the configuration closely match the EPYC hardware. Using the EPYC
446 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
447 * right now. This could change in future.
448 * nr_cores : Total number of cores in the config
449 * core_id : Core index of the current CPU
450 * topo : Data structure to hold all the config info for this core index
451 */
452static void build_core_topology(int nr_cores, int core_id,
453 struct core_topology *topo)
454{
455 int nodes, cores_in_ccx;
456
457 /* First get the number of nodes required */
458 nodes = nodes_in_socket(nr_cores);
459
460 cores_in_ccx = cores_in_core_complex(nr_cores);
461
462 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
463 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
464 topo->core_id = core_id % cores_in_ccx;
465 topo->num_nodes = nodes;
466}
467
468/* Encode cache info for CPUID[8000001E] */
469static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
470 uint32_t *eax, uint32_t *ebx,
471 uint32_t *ecx, uint32_t *edx)
472{
473 struct core_topology topo = {0};
474 unsigned long nodes;
475 int shift;
476
477 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
478 *eax = cpu->apic_id;
479 /*
480 * CPUID_Fn8000001E_EBX
481 * 31:16 Reserved
482 * 15:8 Threads per core (The number of threads per core is
483 * Threads per core + 1)
484 * 7:0 Core id (see bit decoding below)
485 * SMT:
486 * 4:3 node id
487 * 2 Core complex id
488 * 1:0 Core id
489 * Non SMT:
490 * 5:4 node id
491 * 3 Core complex id
492 * 1:0 Core id
493 */
494 if (cs->nr_threads - 1) {
495 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
496 (topo.ccx_id << 2) | topo.core_id;
497 } else {
498 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
499 }
500 /*
501 * CPUID_Fn8000001E_ECX
502 * 31:11 Reserved
503 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
504 * 7:0 Node id (see bit decoding below)
505 * 2 Socket id
506 * 1:0 Node id
507 */
508 if (topo.num_nodes <= 4) {
509 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
510 topo.node_id;
511 } else {
512 /*
513 * Node id fix up. Actual hardware supports up to 4 nodes. But with
514 * more than 32 cores, we may end up with more than 4 nodes.
515 * Node id is a combination of socket id and node id. Only requirement
516 * here is that this number should be unique accross the system.
517 * Shift the socket id to accommodate more nodes. We dont expect both
518 * socket id and node id to be big number at the same time. This is not
519 * an ideal config but we need to to support it. Max nodes we can have
520 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
521 * 5 bits for nodes. Find the left most set bit to represent the total
522 * number of nodes. find_last_bit returns last set bit(0 based). Left
523 * shift(+1) the socket id to represent all the nodes.
524 */
525 nodes = topo.num_nodes - 1;
526 shift = find_last_bit(&nodes, 8);
527 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
528 topo.node_id;
529 }
530 *edx = 0;
531}
532
533/*
534 * Definitions of the hardcoded cache entries we expose:
535 * These are legacy cache values. If there is a need to change any
536 * of these values please use builtin_x86_defs
537 */
538
539/* L1 data cache: */
540static CPUCacheInfo legacy_l1d_cache = {
541 .type = DATA_CACHE,
542 .level = 1,
543 .size = 32 * KiB,
544 .self_init = 1,
545 .line_size = 64,
546 .associativity = 8,
547 .sets = 64,
548 .partitions = 1,
549 .no_invd_sharing = true,
550};
551
552/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
553static CPUCacheInfo legacy_l1d_cache_amd = {
554 .type = DATA_CACHE,
555 .level = 1,
556 .size = 64 * KiB,
557 .self_init = 1,
558 .line_size = 64,
559 .associativity = 2,
560 .sets = 512,
561 .partitions = 1,
562 .lines_per_tag = 1,
563 .no_invd_sharing = true,
564};
565
566/* L1 instruction cache: */
567static CPUCacheInfo legacy_l1i_cache = {
568 .type = INSTRUCTION_CACHE,
569 .level = 1,
570 .size = 32 * KiB,
571 .self_init = 1,
572 .line_size = 64,
573 .associativity = 8,
574 .sets = 64,
575 .partitions = 1,
576 .no_invd_sharing = true,
577};
578
579/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
580static CPUCacheInfo legacy_l1i_cache_amd = {
581 .type = INSTRUCTION_CACHE,
582 .level = 1,
583 .size = 64 * KiB,
584 .self_init = 1,
585 .line_size = 64,
586 .associativity = 2,
587 .sets = 512,
588 .partitions = 1,
589 .lines_per_tag = 1,
590 .no_invd_sharing = true,
591};
592
593/* Level 2 unified cache: */
594static CPUCacheInfo legacy_l2_cache = {
595 .type = UNIFIED_CACHE,
596 .level = 2,
597 .size = 4 * MiB,
598 .self_init = 1,
599 .line_size = 64,
600 .associativity = 16,
601 .sets = 4096,
602 .partitions = 1,
603 .no_invd_sharing = true,
604};
605
606/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
607static CPUCacheInfo legacy_l2_cache_cpuid2 = {
608 .type = UNIFIED_CACHE,
609 .level = 2,
610 .size = 2 * MiB,
611 .line_size = 64,
612 .associativity = 8,
613};
614
615
616/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
617static CPUCacheInfo legacy_l2_cache_amd = {
618 .type = UNIFIED_CACHE,
619 .level = 2,
620 .size = 512 * KiB,
621 .line_size = 64,
622 .lines_per_tag = 1,
623 .associativity = 16,
624 .sets = 512,
625 .partitions = 1,
626};
627
628/* Level 3 unified cache: */
629static CPUCacheInfo legacy_l3_cache = {
630 .type = UNIFIED_CACHE,
631 .level = 3,
632 .size = 16 * MiB,
633 .line_size = 64,
634 .associativity = 16,
635 .sets = 16384,
636 .partitions = 1,
637 .lines_per_tag = 1,
638 .self_init = true,
639 .inclusive = true,
640 .complex_indexing = true,
641};
642
643/* TLB definitions: */
644
645#define L1_DTLB_2M_ASSOC 1
646#define L1_DTLB_2M_ENTRIES 255
647#define L1_DTLB_4K_ASSOC 1
648#define L1_DTLB_4K_ENTRIES 255
649
650#define L1_ITLB_2M_ASSOC 1
651#define L1_ITLB_2M_ENTRIES 255
652#define L1_ITLB_4K_ASSOC 1
653#define L1_ITLB_4K_ENTRIES 255
654
655#define L2_DTLB_2M_ASSOC 0 /* disabled */
656#define L2_DTLB_2M_ENTRIES 0 /* disabled */
657#define L2_DTLB_4K_ASSOC 4
658#define L2_DTLB_4K_ENTRIES 512
659
660#define L2_ITLB_2M_ASSOC 0 /* disabled */
661#define L2_ITLB_2M_ENTRIES 0 /* disabled */
662#define L2_ITLB_4K_ASSOC 4
663#define L2_ITLB_4K_ENTRIES 512
664
665/* CPUID Leaf 0x14 constants: */
666#define INTEL_PT_MAX_SUBLEAF 0x1
667/*
668 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
669 * MSR can be accessed;
670 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
671 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
672 * of Intel PT MSRs across warm reset;
673 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
674 */
675#define INTEL_PT_MINIMAL_EBX 0xf
676/*
677 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
678 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
679 * accessed;
680 * bit[01]: ToPA tables can hold any number of output entries, up to the
681 * maximum allowed by the MaskOrTableOffset field of
682 * IA32_RTIT_OUTPUT_MASK_PTRS;
683 * bit[02]: Support Single-Range Output scheme;
684 */
685#define INTEL_PT_MINIMAL_ECX 0x7
686/* generated packets which contain IP payloads have LIP values */
687#define INTEL_PT_IP_LIP (1 << 31)
688#define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
689#define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
690#define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
691#define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
692#define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
693
694static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
695 uint32_t vendor2, uint32_t vendor3)
696{
697 int i;
698 for (i = 0; i < 4; i++) {
699 dst[i] = vendor1 >> (8 * i);
700 dst[i + 4] = vendor2 >> (8 * i);
701 dst[i + 8] = vendor3 >> (8 * i);
702 }
703 dst[CPUID_VENDOR_SZ] = '\0';
704}
705
706#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
707#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
708 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
709#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
710 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
711 CPUID_PSE36 | CPUID_FXSR)
712#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
713#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
714 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
715 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
716 CPUID_PAE | CPUID_SEP | CPUID_APIC)
717
718#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
719 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
720 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
721 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
722 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
723 /* partly implemented:
724 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
725 /* missing:
726 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
727#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
728 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
729 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
730 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
731 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
732 /* missing:
733 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
734 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
735 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
736 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
737 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
738
739#ifdef TARGET_X86_64
740#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
741#else
742#define TCG_EXT2_X86_64_FEATURES 0
743#endif
744
745#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
746 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
747 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
748 TCG_EXT2_X86_64_FEATURES)
749#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
750 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
751#define TCG_EXT4_FEATURES 0
752#define TCG_SVM_FEATURES CPUID_SVM_NPT
753#define TCG_KVM_FEATURES 0
754#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
755 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
756 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
757 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
758 CPUID_7_0_EBX_ERMS)
759 /* missing:
760 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
761 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
762 CPUID_7_0_EBX_RDSEED */
763#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
764 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
765 CPUID_7_0_ECX_LA57)
766#define TCG_7_0_EDX_FEATURES 0
767#define TCG_APM_FEATURES 0
768#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
769#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
770 /* missing:
771 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
772
773typedef enum FeatureWordType {
774 CPUID_FEATURE_WORD,
775 MSR_FEATURE_WORD,
776} FeatureWordType;
777
778typedef struct FeatureWordInfo {
779 FeatureWordType type;
780 /* feature flags names are taken from "Intel Processor Identification and
781 * the CPUID Instruction" and AMD's "CPUID Specification".
782 * In cases of disagreement between feature naming conventions,
783 * aliases may be added.
784 */
785 const char *feat_names[32];
786 union {
787 /* If type==CPUID_FEATURE_WORD */
788 struct {
789 uint32_t eax; /* Input EAX for CPUID */
790 bool needs_ecx; /* CPUID instruction uses ECX as input */
791 uint32_t ecx; /* Input ECX value for CPUID */
792 int reg; /* output register (R_* constant) */
793 } cpuid;
794 /* If type==MSR_FEATURE_WORD */
795 struct {
796 uint32_t index;
797 struct { /*CPUID that enumerate this MSR*/
798 FeatureWord cpuid_class;
799 uint32_t cpuid_flag;
800 } cpuid_dep;
801 } msr;
802 };
803 uint32_t tcg_features; /* Feature flags supported by TCG */
804 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
805 uint32_t migratable_flags; /* Feature flags known to be migratable */
806 /* Features that shouldn't be auto-enabled by "-cpu host" */
807 uint32_t no_autoenable_flags;
808} FeatureWordInfo;
809
810static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
811 [FEAT_1_EDX] = {
812 .type = CPUID_FEATURE_WORD,
813 .feat_names = {
814 "fpu", "vme", "de", "pse",
815 "tsc", "msr", "pae", "mce",
816 "cx8", "apic", NULL, "sep",
817 "mtrr", "pge", "mca", "cmov",
818 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
819 NULL, "ds" /* Intel dts */, "acpi", "mmx",
820 "fxsr", "sse", "sse2", "ss",
821 "ht" /* Intel htt */, "tm", "ia64", "pbe",
822 },
823 .cpuid = {.eax = 1, .reg = R_EDX, },
824 .tcg_features = TCG_FEATURES,
825 },
826 [FEAT_1_ECX] = {
827 .type = CPUID_FEATURE_WORD,
828 .feat_names = {
829 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
830 "ds-cpl", "vmx", "smx", "est",
831 "tm2", "ssse3", "cid", NULL,
832 "fma", "cx16", "xtpr", "pdcm",
833 NULL, "pcid", "dca", "sse4.1",
834 "sse4.2", "x2apic", "movbe", "popcnt",
835 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
836 "avx", "f16c", "rdrand", "hypervisor",
837 },
838 .cpuid = { .eax = 1, .reg = R_ECX, },
839 .tcg_features = TCG_EXT_FEATURES,
840 },
841 /* Feature names that are already defined on feature_name[] but
842 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
843 * names on feat_names below. They are copied automatically
844 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
845 */
846 [FEAT_8000_0001_EDX] = {
847 .type = CPUID_FEATURE_WORD,
848 .feat_names = {
849 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
850 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
851 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
852 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
853 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
854 "nx", NULL, "mmxext", NULL /* mmx */,
855 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
856 NULL, "lm", "3dnowext", "3dnow",
857 },
858 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
859 .tcg_features = TCG_EXT2_FEATURES,
860 },
861 [FEAT_8000_0001_ECX] = {
862 .type = CPUID_FEATURE_WORD,
863 .feat_names = {
864 "lahf-lm", "cmp-legacy", "svm", "extapic",
865 "cr8legacy", "abm", "sse4a", "misalignsse",
866 "3dnowprefetch", "osvw", "ibs", "xop",
867 "skinit", "wdt", NULL, "lwp",
868 "fma4", "tce", NULL, "nodeid-msr",
869 NULL, "tbm", "topoext", "perfctr-core",
870 "perfctr-nb", NULL, NULL, NULL,
871 NULL, NULL, NULL, NULL,
872 },
873 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
874 .tcg_features = TCG_EXT3_FEATURES,
875 /*
876 * TOPOEXT is always allowed but can't be enabled blindly by
877 * "-cpu host", as it requires consistent cache topology info
878 * to be provided so it doesn't confuse guests.
879 */
880 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
881 },
882 [FEAT_C000_0001_EDX] = {
883 .type = CPUID_FEATURE_WORD,
884 .feat_names = {
885 NULL, NULL, "xstore", "xstore-en",
886 NULL, NULL, "xcrypt", "xcrypt-en",
887 "ace2", "ace2-en", "phe", "phe-en",
888 "pmm", "pmm-en", NULL, NULL,
889 NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
891 NULL, NULL, NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 },
894 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
895 .tcg_features = TCG_EXT4_FEATURES,
896 },
897 [FEAT_KVM] = {
898 .type = CPUID_FEATURE_WORD,
899 .feat_names = {
900 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
901 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
902 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
903 NULL, NULL, NULL, NULL,
904 NULL, NULL, NULL, NULL,
905 NULL, NULL, NULL, NULL,
906 "kvmclock-stable-bit", NULL, NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 },
909 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
910 .tcg_features = TCG_KVM_FEATURES,
911 },
912 [FEAT_KVM_HINTS] = {
913 .type = CPUID_FEATURE_WORD,
914 .feat_names = {
915 "kvm-hint-dedicated", NULL, NULL, NULL,
916 NULL, NULL, NULL, NULL,
917 NULL, NULL, NULL, NULL,
918 NULL, NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 },
924 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
925 .tcg_features = TCG_KVM_FEATURES,
926 /*
927 * KVM hints aren't auto-enabled by -cpu host, they need to be
928 * explicitly enabled in the command-line.
929 */
930 .no_autoenable_flags = ~0U,
931 },
932 [FEAT_HYPERV_EAX] = {
933 .type = CPUID_FEATURE_WORD,
934 .feat_names = {
935 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
936 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
937 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
938 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
939 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
940 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
941 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
942 NULL, NULL,
943 NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL,
945 NULL, NULL, NULL, NULL,
946 NULL, NULL, NULL, NULL,
947 },
948 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
949 },
950 [FEAT_HYPERV_EBX] = {
951 .type = CPUID_FEATURE_WORD,
952 .feat_names = {
953 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
954 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
955 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
956 NULL /* hv_create_port */, NULL /* hv_connect_port */,
957 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
958 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
959 NULL, NULL,
960 NULL, NULL, NULL, NULL,
961 NULL, NULL, NULL, NULL,
962 NULL, NULL, NULL, NULL,
963 NULL, NULL, NULL, NULL,
964 },
965 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
966 },
967 [FEAT_HYPERV_EDX] = {
968 .type = CPUID_FEATURE_WORD,
969 .feat_names = {
970 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
971 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
972 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
973 NULL, NULL,
974 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
977 NULL, NULL, NULL, NULL,
978 NULL, NULL, NULL, NULL,
979 NULL, NULL, NULL, NULL,
980 },
981 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
982 },
983 [FEAT_HV_RECOMM_EAX] = {
984 .type = CPUID_FEATURE_WORD,
985 .feat_names = {
986 NULL /* hv_recommend_pv_as_switch */,
987 NULL /* hv_recommend_pv_tlbflush_local */,
988 NULL /* hv_recommend_pv_tlbflush_remote */,
989 NULL /* hv_recommend_msr_apic_access */,
990 NULL /* hv_recommend_msr_reset */,
991 NULL /* hv_recommend_relaxed_timing */,
992 NULL /* hv_recommend_dma_remapping */,
993 NULL /* hv_recommend_int_remapping */,
994 NULL /* hv_recommend_x2apic_msrs */,
995 NULL /* hv_recommend_autoeoi_deprecation */,
996 NULL /* hv_recommend_pv_ipi */,
997 NULL /* hv_recommend_ex_hypercalls */,
998 NULL /* hv_hypervisor_is_nested */,
999 NULL /* hv_recommend_int_mbec */,
1000 NULL /* hv_recommend_evmcs */,
1001 NULL,
1002 NULL, NULL, NULL, NULL,
1003 NULL, NULL, NULL, NULL,
1004 NULL, NULL, NULL, NULL,
1005 NULL, NULL, NULL, NULL,
1006 },
1007 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1008 },
1009 [FEAT_HV_NESTED_EAX] = {
1010 .type = CPUID_FEATURE_WORD,
1011 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1012 },
1013 [FEAT_SVM] = {
1014 .type = CPUID_FEATURE_WORD,
1015 .feat_names = {
1016 "npt", "lbrv", "svm-lock", "nrip-save",
1017 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1018 NULL, NULL, "pause-filter", NULL,
1019 "pfthreshold", NULL, NULL, NULL,
1020 NULL, NULL, NULL, NULL,
1021 NULL, NULL, NULL, NULL,
1022 NULL, NULL, NULL, NULL,
1023 NULL, NULL, NULL, NULL,
1024 },
1025 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1026 .tcg_features = TCG_SVM_FEATURES,
1027 },
1028 [FEAT_7_0_EBX] = {
1029 .type = CPUID_FEATURE_WORD,
1030 .feat_names = {
1031 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1032 "hle", "avx2", NULL, "smep",
1033 "bmi2", "erms", "invpcid", "rtm",
1034 NULL, NULL, "mpx", NULL,
1035 "avx512f", "avx512dq", "rdseed", "adx",
1036 "smap", "avx512ifma", "pcommit", "clflushopt",
1037 "clwb", "intel-pt", "avx512pf", "avx512er",
1038 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1039 },
1040 .cpuid = {
1041 .eax = 7,
1042 .needs_ecx = true, .ecx = 0,
1043 .reg = R_EBX,
1044 },
1045 .tcg_features = TCG_7_0_EBX_FEATURES,
1046 },
1047 [FEAT_7_0_ECX] = {
1048 .type = CPUID_FEATURE_WORD,
1049 .feat_names = {
1050 NULL, "avx512vbmi", "umip", "pku",
1051 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1052 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1053 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1054 "la57", NULL, NULL, NULL,
1055 NULL, NULL, "rdpid", NULL,
1056 NULL, "cldemote", NULL, "movdiri",
1057 "movdir64b", NULL, NULL, NULL,
1058 },
1059 .cpuid = {
1060 .eax = 7,
1061 .needs_ecx = true, .ecx = 0,
1062 .reg = R_ECX,
1063 },
1064 .tcg_features = TCG_7_0_ECX_FEATURES,
1065 },
1066 [FEAT_7_0_EDX] = {
1067 .type = CPUID_FEATURE_WORD,
1068 .feat_names = {
1069 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1070 NULL, NULL, NULL, NULL,
1071 NULL, NULL, NULL, NULL,
1072 NULL, NULL, NULL, NULL,
1073 NULL, NULL, "pconfig", NULL,
1074 NULL, NULL, NULL, NULL,
1075 NULL, NULL, "spec-ctrl", "stibp",
1076 NULL, "arch-capabilities", NULL, "ssbd",
1077 },
1078 .cpuid = {
1079 .eax = 7,
1080 .needs_ecx = true, .ecx = 0,
1081 .reg = R_EDX,
1082 },
1083 .tcg_features = TCG_7_0_EDX_FEATURES,
1084 .unmigratable_flags = CPUID_7_0_EDX_ARCH_CAPABILITIES,
1085 },
1086 [FEAT_8000_0007_EDX] = {
1087 .type = CPUID_FEATURE_WORD,
1088 .feat_names = {
1089 NULL, NULL, NULL, NULL,
1090 NULL, NULL, NULL, NULL,
1091 "invtsc", NULL, NULL, NULL,
1092 NULL, NULL, NULL, NULL,
1093 NULL, NULL, NULL, NULL,
1094 NULL, NULL, NULL, NULL,
1095 NULL, NULL, NULL, NULL,
1096 NULL, NULL, NULL, NULL,
1097 },
1098 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1099 .tcg_features = TCG_APM_FEATURES,
1100 .unmigratable_flags = CPUID_APM_INVTSC,
1101 },
1102 [FEAT_8000_0008_EBX] = {
1103 .type = CPUID_FEATURE_WORD,
1104 .feat_names = {
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 NULL, "wbnoinvd", NULL, NULL,
1108 "ibpb", NULL, NULL, NULL,
1109 NULL, NULL, NULL, NULL,
1110 NULL, NULL, NULL, NULL,
1111 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1112 NULL, NULL, NULL, NULL,
1113 },
1114 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1115 .tcg_features = 0,
1116 .unmigratable_flags = 0,
1117 },
1118 [FEAT_XSAVE] = {
1119 .type = CPUID_FEATURE_WORD,
1120 .feat_names = {
1121 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1122 NULL, NULL, NULL, NULL,
1123 NULL, NULL, NULL, NULL,
1124 NULL, NULL, NULL, NULL,
1125 NULL, NULL, NULL, NULL,
1126 NULL, NULL, NULL, NULL,
1127 NULL, NULL, NULL, NULL,
1128 NULL, NULL, NULL, NULL,
1129 },
1130 .cpuid = {
1131 .eax = 0xd,
1132 .needs_ecx = true, .ecx = 1,
1133 .reg = R_EAX,
1134 },
1135 .tcg_features = TCG_XSAVE_FEATURES,
1136 },
1137 [FEAT_6_EAX] = {
1138 .type = CPUID_FEATURE_WORD,
1139 .feat_names = {
1140 NULL, NULL, "arat", NULL,
1141 NULL, NULL, NULL, NULL,
1142 NULL, NULL, NULL, NULL,
1143 NULL, NULL, NULL, NULL,
1144 NULL, NULL, NULL, NULL,
1145 NULL, NULL, NULL, NULL,
1146 NULL, NULL, NULL, NULL,
1147 NULL, NULL, NULL, NULL,
1148 },
1149 .cpuid = { .eax = 6, .reg = R_EAX, },
1150 .tcg_features = TCG_6_EAX_FEATURES,
1151 },
1152 [FEAT_XSAVE_COMP_LO] = {
1153 .type = CPUID_FEATURE_WORD,
1154 .cpuid = {
1155 .eax = 0xD,
1156 .needs_ecx = true, .ecx = 0,
1157 .reg = R_EAX,
1158 },
1159 .tcg_features = ~0U,
1160 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1161 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1162 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1163 XSTATE_PKRU_MASK,
1164 },
1165 [FEAT_XSAVE_COMP_HI] = {
1166 .type = CPUID_FEATURE_WORD,
1167 .cpuid = {
1168 .eax = 0xD,
1169 .needs_ecx = true, .ecx = 0,
1170 .reg = R_EDX,
1171 },
1172 .tcg_features = ~0U,
1173 },
1174 /*Below are MSR exposed features*/
1175 [FEAT_ARCH_CAPABILITIES] = {
1176 .type = MSR_FEATURE_WORD,
1177 .feat_names = {
1178 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1179 "ssb-no", NULL, NULL, NULL,
1180 NULL, NULL, NULL, NULL,
1181 NULL, NULL, NULL, NULL,
1182 NULL, NULL, NULL, NULL,
1183 NULL, NULL, NULL, NULL,
1184 NULL, NULL, NULL, NULL,
1185 NULL, NULL, NULL, NULL,
1186 },
1187 .msr = {
1188 .index = MSR_IA32_ARCH_CAPABILITIES,
1189 .cpuid_dep = {
1190 FEAT_7_0_EDX,
1191 CPUID_7_0_EDX_ARCH_CAPABILITIES
1192 }
1193 },
1194 },
1195};
1196
1197typedef struct X86RegisterInfo32 {
1198 /* Name of register */
1199 const char *name;
1200 /* QAPI enum value register */
1201 X86CPURegister32 qapi_enum;
1202} X86RegisterInfo32;
1203
1204#define REGISTER(reg) \
1205 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1206static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1207 REGISTER(EAX),
1208 REGISTER(ECX),
1209 REGISTER(EDX),
1210 REGISTER(EBX),
1211 REGISTER(ESP),
1212 REGISTER(EBP),
1213 REGISTER(ESI),
1214 REGISTER(EDI),
1215};
1216#undef REGISTER
1217
1218typedef struct ExtSaveArea {
1219 uint32_t feature, bits;
1220 uint32_t offset, size;
1221} ExtSaveArea;
1222
1223static const ExtSaveArea x86_ext_save_areas[] = {
1224 [XSTATE_FP_BIT] = {
1225 /* x87 FP state component is always enabled if XSAVE is supported */
1226 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1227 /* x87 state is in the legacy region of the XSAVE area */
1228 .offset = 0,
1229 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1230 },
1231 [XSTATE_SSE_BIT] = {
1232 /* SSE state component is always enabled if XSAVE is supported */
1233 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1234 /* SSE state is in the legacy region of the XSAVE area */
1235 .offset = 0,
1236 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1237 },
1238 [XSTATE_YMM_BIT] =
1239 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1240 .offset = offsetof(X86XSaveArea, avx_state),
1241 .size = sizeof(XSaveAVX) },
1242 [XSTATE_BNDREGS_BIT] =
1243 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1244 .offset = offsetof(X86XSaveArea, bndreg_state),
1245 .size = sizeof(XSaveBNDREG) },
1246 [XSTATE_BNDCSR_BIT] =
1247 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1248 .offset = offsetof(X86XSaveArea, bndcsr_state),
1249 .size = sizeof(XSaveBNDCSR) },
1250 [XSTATE_OPMASK_BIT] =
1251 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1252 .offset = offsetof(X86XSaveArea, opmask_state),
1253 .size = sizeof(XSaveOpmask) },
1254 [XSTATE_ZMM_Hi256_BIT] =
1255 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1256 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1257 .size = sizeof(XSaveZMM_Hi256) },
1258 [XSTATE_Hi16_ZMM_BIT] =
1259 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1260 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1261 .size = sizeof(XSaveHi16_ZMM) },
1262 [XSTATE_PKRU_BIT] =
1263 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1264 .offset = offsetof(X86XSaveArea, pkru_state),
1265 .size = sizeof(XSavePKRU) },
1266};
1267
1268static uint32_t xsave_area_size(uint64_t mask)
1269{
1270 int i;
1271 uint64_t ret = 0;
1272
1273 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1274 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1275 if ((mask >> i) & 1) {
1276 ret = MAX(ret, esa->offset + esa->size);
1277 }
1278 }
1279 return ret;
1280}
1281
1282static inline bool accel_uses_host_cpuid(void)
1283{
1284 return kvm_enabled() || hvf_enabled();
1285}
1286
1287static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1288{
1289 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1290 cpu->env.features[FEAT_XSAVE_COMP_LO];
1291}
1292
1293const char *get_register_name_32(unsigned int reg)
1294{
1295 if (reg >= CPU_NB_REGS32) {
1296 return NULL;
1297 }
1298 return x86_reg_info_32[reg].name;
1299}
1300
1301/*
1302 * Returns the set of feature flags that are supported and migratable by
1303 * QEMU, for a given FeatureWord.
1304 */
1305static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1306{
1307 FeatureWordInfo *wi = &feature_word_info[w];
1308 uint32_t r = 0;
1309 int i;
1310
1311 for (i = 0; i < 32; i++) {
1312 uint32_t f = 1U << i;
1313
1314 /* If the feature name is known, it is implicitly considered migratable,
1315 * unless it is explicitly set in unmigratable_flags */
1316 if ((wi->migratable_flags & f) ||
1317 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1318 r |= f;
1319 }
1320 }
1321 return r;
1322}
1323
1324void host_cpuid(uint32_t function, uint32_t count,
1325 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1326{
1327 uint32_t vec[4];
1328
1329#ifdef __x86_64__
1330 asm volatile("cpuid"
1331 : "=a"(vec[0]), "=b"(vec[1]),
1332 "=c"(vec[2]), "=d"(vec[3])
1333 : "0"(function), "c"(count) : "cc");
1334#elif defined(__i386__)
1335 asm volatile("pusha \n\t"
1336 "cpuid \n\t"
1337 "mov %%eax, 0(%2) \n\t"
1338 "mov %%ebx, 4(%2) \n\t"
1339 "mov %%ecx, 8(%2) \n\t"
1340 "mov %%edx, 12(%2) \n\t"
1341 "popa"
1342 : : "a"(function), "c"(count), "S"(vec)
1343 : "memory", "cc");
1344#else
1345 abort();
1346#endif
1347
1348 if (eax)
1349 *eax = vec[0];
1350 if (ebx)
1351 *ebx = vec[1];
1352 if (ecx)
1353 *ecx = vec[2];
1354 if (edx)
1355 *edx = vec[3];
1356}
1357
1358void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1359{
1360 uint32_t eax, ebx, ecx, edx;
1361
1362 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1363 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1364
1365 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1366 if (family) {
1367 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1368 }
1369 if (model) {
1370 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1371 }
1372 if (stepping) {
1373 *stepping = eax & 0x0F;
1374 }
1375}
1376
1377/* CPU class name definitions: */
1378
1379/* Return type name for a given CPU model name
1380 * Caller is responsible for freeing the returned string.
1381 */
1382static char *x86_cpu_type_name(const char *model_name)
1383{
1384 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1385}
1386
1387static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1388{
1389 ObjectClass *oc;
1390 char *typename = x86_cpu_type_name(cpu_model);
1391 oc = object_class_by_name(typename);
1392 g_free(typename);
1393 return oc;
1394}
1395
1396static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1397{
1398 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1399 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1400 return g_strndup(class_name,
1401 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1402}
1403
1404struct X86CPUDefinition {
1405 const char *name;
1406 uint32_t level;
1407 uint32_t xlevel;
1408 /* vendor is zero-terminated, 12 character ASCII string */
1409 char vendor[CPUID_VENDOR_SZ + 1];
1410 int family;
1411 int model;
1412 int stepping;
1413 FeatureWordArray features;
1414 const char *model_id;
1415 CPUCaches *cache_info;
1416};
1417
1418static CPUCaches epyc_cache_info = {
1419 .l1d_cache = &(CPUCacheInfo) {
1420 .type = DATA_CACHE,
1421 .level = 1,
1422 .size = 32 * KiB,
1423 .line_size = 64,
1424 .associativity = 8,
1425 .partitions = 1,
1426 .sets = 64,
1427 .lines_per_tag = 1,
1428 .self_init = 1,
1429 .no_invd_sharing = true,
1430 },
1431 .l1i_cache = &(CPUCacheInfo) {
1432 .type = INSTRUCTION_CACHE,
1433 .level = 1,
1434 .size = 64 * KiB,
1435 .line_size = 64,
1436 .associativity = 4,
1437 .partitions = 1,
1438 .sets = 256,
1439 .lines_per_tag = 1,
1440 .self_init = 1,
1441 .no_invd_sharing = true,
1442 },
1443 .l2_cache = &(CPUCacheInfo) {
1444 .type = UNIFIED_CACHE,
1445 .level = 2,
1446 .size = 512 * KiB,
1447 .line_size = 64,
1448 .associativity = 8,
1449 .partitions = 1,
1450 .sets = 1024,
1451 .lines_per_tag = 1,
1452 },
1453 .l3_cache = &(CPUCacheInfo) {
1454 .type = UNIFIED_CACHE,
1455 .level = 3,
1456 .size = 8 * MiB,
1457 .line_size = 64,
1458 .associativity = 16,
1459 .partitions = 1,
1460 .sets = 8192,
1461 .lines_per_tag = 1,
1462 .self_init = true,
1463 .inclusive = true,
1464 .complex_indexing = true,
1465 },
1466};
1467
1468static X86CPUDefinition builtin_x86_defs[] = {
1469 {
1470 .name = "qemu64",
1471 .level = 0xd,
1472 .vendor = CPUID_VENDOR_AMD,
1473 .family = 6,
1474 .model = 6,
1475 .stepping = 3,
1476 .features[FEAT_1_EDX] =
1477 PPRO_FEATURES |
1478 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1479 CPUID_PSE36,
1480 .features[FEAT_1_ECX] =
1481 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1482 .features[FEAT_8000_0001_EDX] =
1483 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1484 .features[FEAT_8000_0001_ECX] =
1485 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1486 .xlevel = 0x8000000A,
1487 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1488 },
1489 {
1490 .name = "phenom",
1491 .level = 5,
1492 .vendor = CPUID_VENDOR_AMD,
1493 .family = 16,
1494 .model = 2,
1495 .stepping = 3,
1496 /* Missing: CPUID_HT */
1497 .features[FEAT_1_EDX] =
1498 PPRO_FEATURES |
1499 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1500 CPUID_PSE36 | CPUID_VME,
1501 .features[FEAT_1_ECX] =
1502 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1503 CPUID_EXT_POPCNT,
1504 .features[FEAT_8000_0001_EDX] =
1505 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1506 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1507 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1508 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1509 CPUID_EXT3_CR8LEG,
1510 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1511 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1512 .features[FEAT_8000_0001_ECX] =
1513 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1514 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1515 /* Missing: CPUID_SVM_LBRV */
1516 .features[FEAT_SVM] =
1517 CPUID_SVM_NPT,
1518 .xlevel = 0x8000001A,
1519 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1520 },
1521 {
1522 .name = "core2duo",
1523 .level = 10,
1524 .vendor = CPUID_VENDOR_INTEL,
1525 .family = 6,
1526 .model = 15,
1527 .stepping = 11,
1528 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1529 .features[FEAT_1_EDX] =
1530 PPRO_FEATURES |
1531 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1532 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1533 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1534 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1535 .features[FEAT_1_ECX] =
1536 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1537 CPUID_EXT_CX16,
1538 .features[FEAT_8000_0001_EDX] =
1539 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1540 .features[FEAT_8000_0001_ECX] =
1541 CPUID_EXT3_LAHF_LM,
1542 .xlevel = 0x80000008,
1543 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1544 },
1545 {
1546 .name = "kvm64",
1547 .level = 0xd,
1548 .vendor = CPUID_VENDOR_INTEL,
1549 .family = 15,
1550 .model = 6,
1551 .stepping = 1,
1552 /* Missing: CPUID_HT */
1553 .features[FEAT_1_EDX] =
1554 PPRO_FEATURES | CPUID_VME |
1555 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1556 CPUID_PSE36,
1557 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1558 .features[FEAT_1_ECX] =
1559 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1560 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1561 .features[FEAT_8000_0001_EDX] =
1562 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1563 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1564 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1565 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1566 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1567 .features[FEAT_8000_0001_ECX] =
1568 0,
1569 .xlevel = 0x80000008,
1570 .model_id = "Common KVM processor"
1571 },
1572 {
1573 .name = "qemu32",
1574 .level = 4,
1575 .vendor = CPUID_VENDOR_INTEL,
1576 .family = 6,
1577 .model = 6,
1578 .stepping = 3,
1579 .features[FEAT_1_EDX] =
1580 PPRO_FEATURES,
1581 .features[FEAT_1_ECX] =
1582 CPUID_EXT_SSE3,
1583 .xlevel = 0x80000004,
1584 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1585 },
1586 {
1587 .name = "kvm32",
1588 .level = 5,
1589 .vendor = CPUID_VENDOR_INTEL,
1590 .family = 15,
1591 .model = 6,
1592 .stepping = 1,
1593 .features[FEAT_1_EDX] =
1594 PPRO_FEATURES | CPUID_VME |
1595 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1596 .features[FEAT_1_ECX] =
1597 CPUID_EXT_SSE3,
1598 .features[FEAT_8000_0001_ECX] =
1599 0,
1600 .xlevel = 0x80000008,
1601 .model_id = "Common 32-bit KVM processor"
1602 },
1603 {
1604 .name = "coreduo",
1605 .level = 10,
1606 .vendor = CPUID_VENDOR_INTEL,
1607 .family = 6,
1608 .model = 14,
1609 .stepping = 8,
1610 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1611 .features[FEAT_1_EDX] =
1612 PPRO_FEATURES | CPUID_VME |
1613 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1614 CPUID_SS,
1615 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1616 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1617 .features[FEAT_1_ECX] =
1618 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1619 .features[FEAT_8000_0001_EDX] =
1620 CPUID_EXT2_NX,
1621 .xlevel = 0x80000008,
1622 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1623 },
1624 {
1625 .name = "486",
1626 .level = 1,
1627 .vendor = CPUID_VENDOR_INTEL,
1628 .family = 4,
1629 .model = 8,
1630 .stepping = 0,
1631 .features[FEAT_1_EDX] =
1632 I486_FEATURES,
1633 .xlevel = 0,
1634 .model_id = "",
1635 },
1636 {
1637 .name = "pentium",
1638 .level = 1,
1639 .vendor = CPUID_VENDOR_INTEL,
1640 .family = 5,
1641 .model = 4,
1642 .stepping = 3,
1643 .features[FEAT_1_EDX] =
1644 PENTIUM_FEATURES,
1645 .xlevel = 0,
1646 .model_id = "",
1647 },
1648 {
1649 .name = "pentium2",
1650 .level = 2,
1651 .vendor = CPUID_VENDOR_INTEL,
1652 .family = 6,
1653 .model = 5,
1654 .stepping = 2,
1655 .features[FEAT_1_EDX] =
1656 PENTIUM2_FEATURES,
1657 .xlevel = 0,
1658 .model_id = "",
1659 },
1660 {
1661 .name = "pentium3",
1662 .level = 3,
1663 .vendor = CPUID_VENDOR_INTEL,
1664 .family = 6,
1665 .model = 7,
1666 .stepping = 3,
1667 .features[FEAT_1_EDX] =
1668 PENTIUM3_FEATURES,
1669 .xlevel = 0,
1670 .model_id = "",
1671 },
1672 {
1673 .name = "athlon",
1674 .level = 2,
1675 .vendor = CPUID_VENDOR_AMD,
1676 .family = 6,
1677 .model = 2,
1678 .stepping = 3,
1679 .features[FEAT_1_EDX] =
1680 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1681 CPUID_MCA,
1682 .features[FEAT_8000_0001_EDX] =
1683 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1684 .xlevel = 0x80000008,
1685 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1686 },
1687 {
1688 .name = "n270",
1689 .level = 10,
1690 .vendor = CPUID_VENDOR_INTEL,
1691 .family = 6,
1692 .model = 28,
1693 .stepping = 2,
1694 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1695 .features[FEAT_1_EDX] =
1696 PPRO_FEATURES |
1697 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1698 CPUID_ACPI | CPUID_SS,
1699 /* Some CPUs got no CPUID_SEP */
1700 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1701 * CPUID_EXT_XTPR */
1702 .features[FEAT_1_ECX] =
1703 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1704 CPUID_EXT_MOVBE,
1705 .features[FEAT_8000_0001_EDX] =
1706 CPUID_EXT2_NX,
1707 .features[FEAT_8000_0001_ECX] =
1708 CPUID_EXT3_LAHF_LM,
1709 .xlevel = 0x80000008,
1710 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1711 },
1712 {
1713 .name = "Conroe",
1714 .level = 10,
1715 .vendor = CPUID_VENDOR_INTEL,
1716 .family = 6,
1717 .model = 15,
1718 .stepping = 3,
1719 .features[FEAT_1_EDX] =
1720 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1721 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1722 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1723 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1724 CPUID_DE | CPUID_FP87,
1725 .features[FEAT_1_ECX] =
1726 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1727 .features[FEAT_8000_0001_EDX] =
1728 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1729 .features[FEAT_8000_0001_ECX] =
1730 CPUID_EXT3_LAHF_LM,
1731 .xlevel = 0x80000008,
1732 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1733 },
1734 {
1735 .name = "Penryn",
1736 .level = 10,
1737 .vendor = CPUID_VENDOR_INTEL,
1738 .family = 6,
1739 .model = 23,
1740 .stepping = 3,
1741 .features[FEAT_1_EDX] =
1742 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1743 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1744 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1745 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1746 CPUID_DE | CPUID_FP87,
1747 .features[FEAT_1_ECX] =
1748 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1749 CPUID_EXT_SSE3,
1750 .features[FEAT_8000_0001_EDX] =
1751 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1752 .features[FEAT_8000_0001_ECX] =
1753 CPUID_EXT3_LAHF_LM,
1754 .xlevel = 0x80000008,
1755 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1756 },
1757 {
1758 .name = "Nehalem",
1759 .level = 11,
1760 .vendor = CPUID_VENDOR_INTEL,
1761 .family = 6,
1762 .model = 26,
1763 .stepping = 3,
1764 .features[FEAT_1_EDX] =
1765 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1766 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1767 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1768 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1769 CPUID_DE | CPUID_FP87,
1770 .features[FEAT_1_ECX] =
1771 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1772 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1773 .features[FEAT_8000_0001_EDX] =
1774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1775 .features[FEAT_8000_0001_ECX] =
1776 CPUID_EXT3_LAHF_LM,
1777 .xlevel = 0x80000008,
1778 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1779 },
1780 {
1781 .name = "Nehalem-IBRS",
1782 .level = 11,
1783 .vendor = CPUID_VENDOR_INTEL,
1784 .family = 6,
1785 .model = 26,
1786 .stepping = 3,
1787 .features[FEAT_1_EDX] =
1788 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1789 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1790 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1791 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1792 CPUID_DE | CPUID_FP87,
1793 .features[FEAT_1_ECX] =
1794 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1795 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1796 .features[FEAT_7_0_EDX] =
1797 CPUID_7_0_EDX_SPEC_CTRL,
1798 .features[FEAT_8000_0001_EDX] =
1799 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1800 .features[FEAT_8000_0001_ECX] =
1801 CPUID_EXT3_LAHF_LM,
1802 .xlevel = 0x80000008,
1803 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1804 },
1805 {
1806 .name = "Westmere",
1807 .level = 11,
1808 .vendor = CPUID_VENDOR_INTEL,
1809 .family = 6,
1810 .model = 44,
1811 .stepping = 1,
1812 .features[FEAT_1_EDX] =
1813 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1814 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1815 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1816 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1817 CPUID_DE | CPUID_FP87,
1818 .features[FEAT_1_ECX] =
1819 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1820 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1821 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1822 .features[FEAT_8000_0001_EDX] =
1823 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1824 .features[FEAT_8000_0001_ECX] =
1825 CPUID_EXT3_LAHF_LM,
1826 .features[FEAT_6_EAX] =
1827 CPUID_6_EAX_ARAT,
1828 .xlevel = 0x80000008,
1829 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1830 },
1831 {
1832 .name = "Westmere-IBRS",
1833 .level = 11,
1834 .vendor = CPUID_VENDOR_INTEL,
1835 .family = 6,
1836 .model = 44,
1837 .stepping = 1,
1838 .features[FEAT_1_EDX] =
1839 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1840 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1841 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1842 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1843 CPUID_DE | CPUID_FP87,
1844 .features[FEAT_1_ECX] =
1845 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1846 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1847 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1848 .features[FEAT_8000_0001_EDX] =
1849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1850 .features[FEAT_8000_0001_ECX] =
1851 CPUID_EXT3_LAHF_LM,
1852 .features[FEAT_7_0_EDX] =
1853 CPUID_7_0_EDX_SPEC_CTRL,
1854 .features[FEAT_6_EAX] =
1855 CPUID_6_EAX_ARAT,
1856 .xlevel = 0x80000008,
1857 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1858 },
1859 {
1860 .name = "SandyBridge",
1861 .level = 0xd,
1862 .vendor = CPUID_VENDOR_INTEL,
1863 .family = 6,
1864 .model = 42,
1865 .stepping = 1,
1866 .features[FEAT_1_EDX] =
1867 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1868 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1869 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1870 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1871 CPUID_DE | CPUID_FP87,
1872 .features[FEAT_1_ECX] =
1873 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1874 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1875 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1876 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1877 CPUID_EXT_SSE3,
1878 .features[FEAT_8000_0001_EDX] =
1879 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1880 CPUID_EXT2_SYSCALL,
1881 .features[FEAT_8000_0001_ECX] =
1882 CPUID_EXT3_LAHF_LM,
1883 .features[FEAT_XSAVE] =
1884 CPUID_XSAVE_XSAVEOPT,
1885 .features[FEAT_6_EAX] =
1886 CPUID_6_EAX_ARAT,
1887 .xlevel = 0x80000008,
1888 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1889 },
1890 {
1891 .name = "SandyBridge-IBRS",
1892 .level = 0xd,
1893 .vendor = CPUID_VENDOR_INTEL,
1894 .family = 6,
1895 .model = 42,
1896 .stepping = 1,
1897 .features[FEAT_1_EDX] =
1898 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1899 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1900 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1901 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1902 CPUID_DE | CPUID_FP87,
1903 .features[FEAT_1_ECX] =
1904 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1905 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1906 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1907 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1908 CPUID_EXT_SSE3,
1909 .features[FEAT_8000_0001_EDX] =
1910 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1911 CPUID_EXT2_SYSCALL,
1912 .features[FEAT_8000_0001_ECX] =
1913 CPUID_EXT3_LAHF_LM,
1914 .features[FEAT_7_0_EDX] =
1915 CPUID_7_0_EDX_SPEC_CTRL,
1916 .features[FEAT_XSAVE] =
1917 CPUID_XSAVE_XSAVEOPT,
1918 .features[FEAT_6_EAX] =
1919 CPUID_6_EAX_ARAT,
1920 .xlevel = 0x80000008,
1921 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1922 },
1923 {
1924 .name = "IvyBridge",
1925 .level = 0xd,
1926 .vendor = CPUID_VENDOR_INTEL,
1927 .family = 6,
1928 .model = 58,
1929 .stepping = 9,
1930 .features[FEAT_1_EDX] =
1931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1935 CPUID_DE | CPUID_FP87,
1936 .features[FEAT_1_ECX] =
1937 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1938 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1939 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1940 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1941 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1942 .features[FEAT_7_0_EBX] =
1943 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1944 CPUID_7_0_EBX_ERMS,
1945 .features[FEAT_8000_0001_EDX] =
1946 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1947 CPUID_EXT2_SYSCALL,
1948 .features[FEAT_8000_0001_ECX] =
1949 CPUID_EXT3_LAHF_LM,
1950 .features[FEAT_XSAVE] =
1951 CPUID_XSAVE_XSAVEOPT,
1952 .features[FEAT_6_EAX] =
1953 CPUID_6_EAX_ARAT,
1954 .xlevel = 0x80000008,
1955 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1956 },
1957 {
1958 .name = "IvyBridge-IBRS",
1959 .level = 0xd,
1960 .vendor = CPUID_VENDOR_INTEL,
1961 .family = 6,
1962 .model = 58,
1963 .stepping = 9,
1964 .features[FEAT_1_EDX] =
1965 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1966 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1967 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1968 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1969 CPUID_DE | CPUID_FP87,
1970 .features[FEAT_1_ECX] =
1971 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1972 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1973 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1974 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1975 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1976 .features[FEAT_7_0_EBX] =
1977 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1978 CPUID_7_0_EBX_ERMS,
1979 .features[FEAT_8000_0001_EDX] =
1980 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1981 CPUID_EXT2_SYSCALL,
1982 .features[FEAT_8000_0001_ECX] =
1983 CPUID_EXT3_LAHF_LM,
1984 .features[FEAT_7_0_EDX] =
1985 CPUID_7_0_EDX_SPEC_CTRL,
1986 .features[FEAT_XSAVE] =
1987 CPUID_XSAVE_XSAVEOPT,
1988 .features[FEAT_6_EAX] =
1989 CPUID_6_EAX_ARAT,
1990 .xlevel = 0x80000008,
1991 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1992 },
1993 {
1994 .name = "Haswell-noTSX",
1995 .level = 0xd,
1996 .vendor = CPUID_VENDOR_INTEL,
1997 .family = 6,
1998 .model = 60,
1999 .stepping = 1,
2000 .features[FEAT_1_EDX] =
2001 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2002 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2003 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2004 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2005 CPUID_DE | CPUID_FP87,
2006 .features[FEAT_1_ECX] =
2007 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2008 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2009 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2010 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2011 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2012 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2013 .features[FEAT_8000_0001_EDX] =
2014 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2015 CPUID_EXT2_SYSCALL,
2016 .features[FEAT_8000_0001_ECX] =
2017 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2018 .features[FEAT_7_0_EBX] =
2019 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2020 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2021 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2022 .features[FEAT_XSAVE] =
2023 CPUID_XSAVE_XSAVEOPT,
2024 .features[FEAT_6_EAX] =
2025 CPUID_6_EAX_ARAT,
2026 .xlevel = 0x80000008,
2027 .model_id = "Intel Core Processor (Haswell, no TSX)",
2028 },
2029 {
2030 .name = "Haswell-noTSX-IBRS",
2031 .level = 0xd,
2032 .vendor = CPUID_VENDOR_INTEL,
2033 .family = 6,
2034 .model = 60,
2035 .stepping = 1,
2036 .features[FEAT_1_EDX] =
2037 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2038 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2039 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2040 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2041 CPUID_DE | CPUID_FP87,
2042 .features[FEAT_1_ECX] =
2043 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2044 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2045 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2046 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2047 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2048 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2049 .features[FEAT_8000_0001_EDX] =
2050 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2051 CPUID_EXT2_SYSCALL,
2052 .features[FEAT_8000_0001_ECX] =
2053 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2054 .features[FEAT_7_0_EDX] =
2055 CPUID_7_0_EDX_SPEC_CTRL,
2056 .features[FEAT_7_0_EBX] =
2057 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2058 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2059 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2060 .features[FEAT_XSAVE] =
2061 CPUID_XSAVE_XSAVEOPT,
2062 .features[FEAT_6_EAX] =
2063 CPUID_6_EAX_ARAT,
2064 .xlevel = 0x80000008,
2065 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2066 },
2067 {
2068 .name = "Haswell",
2069 .level = 0xd,
2070 .vendor = CPUID_VENDOR_INTEL,
2071 .family = 6,
2072 .model = 60,
2073 .stepping = 4,
2074 .features[FEAT_1_EDX] =
2075 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2076 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2077 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2078 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2079 CPUID_DE | CPUID_FP87,
2080 .features[FEAT_1_ECX] =
2081 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2082 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2083 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2084 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2085 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2086 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2087 .features[FEAT_8000_0001_EDX] =
2088 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2089 CPUID_EXT2_SYSCALL,
2090 .features[FEAT_8000_0001_ECX] =
2091 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2092 .features[FEAT_7_0_EBX] =
2093 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2094 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2095 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2096 CPUID_7_0_EBX_RTM,
2097 .features[FEAT_XSAVE] =
2098 CPUID_XSAVE_XSAVEOPT,
2099 .features[FEAT_6_EAX] =
2100 CPUID_6_EAX_ARAT,
2101 .xlevel = 0x80000008,
2102 .model_id = "Intel Core Processor (Haswell)",
2103 },
2104 {
2105 .name = "Haswell-IBRS",
2106 .level = 0xd,
2107 .vendor = CPUID_VENDOR_INTEL,
2108 .family = 6,
2109 .model = 60,
2110 .stepping = 4,
2111 .features[FEAT_1_EDX] =
2112 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2113 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2114 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2115 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2116 CPUID_DE | CPUID_FP87,
2117 .features[FEAT_1_ECX] =
2118 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2119 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2120 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2121 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2122 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2123 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2124 .features[FEAT_8000_0001_EDX] =
2125 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2126 CPUID_EXT2_SYSCALL,
2127 .features[FEAT_8000_0001_ECX] =
2128 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2129 .features[FEAT_7_0_EDX] =
2130 CPUID_7_0_EDX_SPEC_CTRL,
2131 .features[FEAT_7_0_EBX] =
2132 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2133 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2134 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2135 CPUID_7_0_EBX_RTM,
2136 .features[FEAT_XSAVE] =
2137 CPUID_XSAVE_XSAVEOPT,
2138 .features[FEAT_6_EAX] =
2139 CPUID_6_EAX_ARAT,
2140 .xlevel = 0x80000008,
2141 .model_id = "Intel Core Processor (Haswell, IBRS)",
2142 },
2143 {
2144 .name = "Broadwell-noTSX",
2145 .level = 0xd,
2146 .vendor = CPUID_VENDOR_INTEL,
2147 .family = 6,
2148 .model = 61,
2149 .stepping = 2,
2150 .features[FEAT_1_EDX] =
2151 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2152 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2153 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2154 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2155 CPUID_DE | CPUID_FP87,
2156 .features[FEAT_1_ECX] =
2157 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2158 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2159 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2160 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2161 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2162 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2163 .features[FEAT_8000_0001_EDX] =
2164 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2165 CPUID_EXT2_SYSCALL,
2166 .features[FEAT_8000_0001_ECX] =
2167 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2168 .features[FEAT_7_0_EBX] =
2169 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2170 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2171 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2172 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2173 CPUID_7_0_EBX_SMAP,
2174 .features[FEAT_XSAVE] =
2175 CPUID_XSAVE_XSAVEOPT,
2176 .features[FEAT_6_EAX] =
2177 CPUID_6_EAX_ARAT,
2178 .xlevel = 0x80000008,
2179 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2180 },
2181 {
2182 .name = "Broadwell-noTSX-IBRS",
2183 .level = 0xd,
2184 .vendor = CPUID_VENDOR_INTEL,
2185 .family = 6,
2186 .model = 61,
2187 .stepping = 2,
2188 .features[FEAT_1_EDX] =
2189 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2190 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2191 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2192 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2193 CPUID_DE | CPUID_FP87,
2194 .features[FEAT_1_ECX] =
2195 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2196 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2197 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2198 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2199 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2200 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2201 .features[FEAT_8000_0001_EDX] =
2202 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2203 CPUID_EXT2_SYSCALL,
2204 .features[FEAT_8000_0001_ECX] =
2205 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2206 .features[FEAT_7_0_EDX] =
2207 CPUID_7_0_EDX_SPEC_CTRL,
2208 .features[FEAT_7_0_EBX] =
2209 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2210 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2211 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2212 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2213 CPUID_7_0_EBX_SMAP,
2214 .features[FEAT_XSAVE] =
2215 CPUID_XSAVE_XSAVEOPT,
2216 .features[FEAT_6_EAX] =
2217 CPUID_6_EAX_ARAT,
2218 .xlevel = 0x80000008,
2219 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2220 },
2221 {
2222 .name = "Broadwell",
2223 .level = 0xd,
2224 .vendor = CPUID_VENDOR_INTEL,
2225 .family = 6,
2226 .model = 61,
2227 .stepping = 2,
2228 .features[FEAT_1_EDX] =
2229 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2230 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2231 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2232 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2233 CPUID_DE | CPUID_FP87,
2234 .features[FEAT_1_ECX] =
2235 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2236 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2237 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2238 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2239 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2240 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2241 .features[FEAT_8000_0001_EDX] =
2242 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2243 CPUID_EXT2_SYSCALL,
2244 .features[FEAT_8000_0001_ECX] =
2245 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2246 .features[FEAT_7_0_EBX] =
2247 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2248 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2249 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2250 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2251 CPUID_7_0_EBX_SMAP,
2252 .features[FEAT_XSAVE] =
2253 CPUID_XSAVE_XSAVEOPT,
2254 .features[FEAT_6_EAX] =
2255 CPUID_6_EAX_ARAT,
2256 .xlevel = 0x80000008,
2257 .model_id = "Intel Core Processor (Broadwell)",
2258 },
2259 {
2260 .name = "Broadwell-IBRS",
2261 .level = 0xd,
2262 .vendor = CPUID_VENDOR_INTEL,
2263 .family = 6,
2264 .model = 61,
2265 .stepping = 2,
2266 .features[FEAT_1_EDX] =
2267 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2268 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2269 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2270 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2271 CPUID_DE | CPUID_FP87,
2272 .features[FEAT_1_ECX] =
2273 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2274 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2275 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2276 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2277 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2278 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2279 .features[FEAT_8000_0001_EDX] =
2280 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2281 CPUID_EXT2_SYSCALL,
2282 .features[FEAT_8000_0001_ECX] =
2283 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2284 .features[FEAT_7_0_EDX] =
2285 CPUID_7_0_EDX_SPEC_CTRL,
2286 .features[FEAT_7_0_EBX] =
2287 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2288 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2289 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2290 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2291 CPUID_7_0_EBX_SMAP,
2292 .features[FEAT_XSAVE] =
2293 CPUID_XSAVE_XSAVEOPT,
2294 .features[FEAT_6_EAX] =
2295 CPUID_6_EAX_ARAT,
2296 .xlevel = 0x80000008,
2297 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2298 },
2299 {
2300 .name = "Skylake-Client",
2301 .level = 0xd,
2302 .vendor = CPUID_VENDOR_INTEL,
2303 .family = 6,
2304 .model = 94,
2305 .stepping = 3,
2306 .features[FEAT_1_EDX] =
2307 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2308 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2309 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2310 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2311 CPUID_DE | CPUID_FP87,
2312 .features[FEAT_1_ECX] =
2313 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2314 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2315 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2316 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2317 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2318 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2319 .features[FEAT_8000_0001_EDX] =
2320 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2321 CPUID_EXT2_SYSCALL,
2322 .features[FEAT_8000_0001_ECX] =
2323 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2324 .features[FEAT_7_0_EBX] =
2325 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2326 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2327 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2328 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2329 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2330 /* Missing: XSAVES (not supported by some Linux versions,
2331 * including v4.1 to v4.12).
2332 * KVM doesn't yet expose any XSAVES state save component,
2333 * and the only one defined in Skylake (processor tracing)
2334 * probably will block migration anyway.
2335 */
2336 .features[FEAT_XSAVE] =
2337 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2338 CPUID_XSAVE_XGETBV1,
2339 .features[FEAT_6_EAX] =
2340 CPUID_6_EAX_ARAT,
2341 .xlevel = 0x80000008,
2342 .model_id = "Intel Core Processor (Skylake)",
2343 },
2344 {
2345 .name = "Skylake-Client-IBRS",
2346 .level = 0xd,
2347 .vendor = CPUID_VENDOR_INTEL,
2348 .family = 6,
2349 .model = 94,
2350 .stepping = 3,
2351 .features[FEAT_1_EDX] =
2352 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2353 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2354 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2355 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2356 CPUID_DE | CPUID_FP87,
2357 .features[FEAT_1_ECX] =
2358 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2359 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2360 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2361 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2362 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2363 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2364 .features[FEAT_8000_0001_EDX] =
2365 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2366 CPUID_EXT2_SYSCALL,
2367 .features[FEAT_8000_0001_ECX] =
2368 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2369 .features[FEAT_7_0_EDX] =
2370 CPUID_7_0_EDX_SPEC_CTRL,
2371 .features[FEAT_7_0_EBX] =
2372 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2373 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2374 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2375 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2376 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2377 /* Missing: XSAVES (not supported by some Linux versions,
2378 * including v4.1 to v4.12).
2379 * KVM doesn't yet expose any XSAVES state save component,
2380 * and the only one defined in Skylake (processor tracing)
2381 * probably will block migration anyway.
2382 */
2383 .features[FEAT_XSAVE] =
2384 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2385 CPUID_XSAVE_XGETBV1,
2386 .features[FEAT_6_EAX] =
2387 CPUID_6_EAX_ARAT,
2388 .xlevel = 0x80000008,
2389 .model_id = "Intel Core Processor (Skylake, IBRS)",
2390 },
2391 {
2392 .name = "Skylake-Server",
2393 .level = 0xd,
2394 .vendor = CPUID_VENDOR_INTEL,
2395 .family = 6,
2396 .model = 85,
2397 .stepping = 4,
2398 .features[FEAT_1_EDX] =
2399 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2400 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2401 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2402 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2403 CPUID_DE | CPUID_FP87,
2404 .features[FEAT_1_ECX] =
2405 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2406 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2407 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2408 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2409 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2410 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2411 .features[FEAT_8000_0001_EDX] =
2412 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2413 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2414 .features[FEAT_8000_0001_ECX] =
2415 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2416 .features[FEAT_7_0_EBX] =
2417 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2418 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2419 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2420 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2421 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2422 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2423 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2424 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2425 .features[FEAT_7_0_ECX] =
2426 CPUID_7_0_ECX_PKU,
2427 /* Missing: XSAVES (not supported by some Linux versions,
2428 * including v4.1 to v4.12).
2429 * KVM doesn't yet expose any XSAVES state save component,
2430 * and the only one defined in Skylake (processor tracing)
2431 * probably will block migration anyway.
2432 */
2433 .features[FEAT_XSAVE] =
2434 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2435 CPUID_XSAVE_XGETBV1,
2436 .features[FEAT_6_EAX] =
2437 CPUID_6_EAX_ARAT,
2438 .xlevel = 0x80000008,
2439 .model_id = "Intel Xeon Processor (Skylake)",
2440 },
2441 {
2442 .name = "Skylake-Server-IBRS",
2443 .level = 0xd,
2444 .vendor = CPUID_VENDOR_INTEL,
2445 .family = 6,
2446 .model = 85,
2447 .stepping = 4,
2448 .features[FEAT_1_EDX] =
2449 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2450 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2451 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2452 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2453 CPUID_DE | CPUID_FP87,
2454 .features[FEAT_1_ECX] =
2455 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2456 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2457 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2458 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2459 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2460 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2461 .features[FEAT_8000_0001_EDX] =
2462 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2463 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2464 .features[FEAT_8000_0001_ECX] =
2465 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2466 .features[FEAT_7_0_EDX] =
2467 CPUID_7_0_EDX_SPEC_CTRL,
2468 .features[FEAT_7_0_EBX] =
2469 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2470 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2471 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2472 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2473 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2474 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2475 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2476 CPUID_7_0_EBX_AVX512VL,
2477 .features[FEAT_7_0_ECX] =
2478 CPUID_7_0_ECX_PKU,
2479 /* Missing: XSAVES (not supported by some Linux versions,
2480 * including v4.1 to v4.12).
2481 * KVM doesn't yet expose any XSAVES state save component,
2482 * and the only one defined in Skylake (processor tracing)
2483 * probably will block migration anyway.
2484 */
2485 .features[FEAT_XSAVE] =
2486 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2487 CPUID_XSAVE_XGETBV1,
2488 .features[FEAT_6_EAX] =
2489 CPUID_6_EAX_ARAT,
2490 .xlevel = 0x80000008,
2491 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2492 },
2493 {
2494 .name = "Cascadelake-Server",
2495 .level = 0xd,
2496 .vendor = CPUID_VENDOR_INTEL,
2497 .family = 6,
2498 .model = 85,
2499 .stepping = 5,
2500 .features[FEAT_1_EDX] =
2501 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2502 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2503 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2504 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2505 CPUID_DE | CPUID_FP87,
2506 .features[FEAT_1_ECX] =
2507 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2508 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2509 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2510 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2511 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2512 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2513 .features[FEAT_8000_0001_EDX] =
2514 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2515 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2516 .features[FEAT_8000_0001_ECX] =
2517 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2518 .features[FEAT_7_0_EBX] =
2519 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2520 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2521 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2522 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2523 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2524 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2525 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2526 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT |
2527 CPUID_7_0_EBX_INTEL_PT,
2528 .features[FEAT_7_0_ECX] =
2529 CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE |
2530 CPUID_7_0_ECX_AVX512VNNI,
2531 .features[FEAT_7_0_EDX] =
2532 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2533 /* Missing: XSAVES (not supported by some Linux versions,
2534 * including v4.1 to v4.12).
2535 * KVM doesn't yet expose any XSAVES state save component,
2536 * and the only one defined in Skylake (processor tracing)
2537 * probably will block migration anyway.
2538 */
2539 .features[FEAT_XSAVE] =
2540 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2541 CPUID_XSAVE_XGETBV1,
2542 .features[FEAT_6_EAX] =
2543 CPUID_6_EAX_ARAT,
2544 .xlevel = 0x80000008,
2545 .model_id = "Intel Xeon Processor (Cascadelake)",
2546 },
2547 {
2548 .name = "Icelake-Client",
2549 .level = 0xd,
2550 .vendor = CPUID_VENDOR_INTEL,
2551 .family = 6,
2552 .model = 126,
2553 .stepping = 0,
2554 .features[FEAT_1_EDX] =
2555 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2556 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2557 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2558 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2559 CPUID_DE | CPUID_FP87,
2560 .features[FEAT_1_ECX] =
2561 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2562 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2563 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2564 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2565 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2566 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2567 .features[FEAT_8000_0001_EDX] =
2568 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2569 CPUID_EXT2_SYSCALL,
2570 .features[FEAT_8000_0001_ECX] =
2571 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2572 .features[FEAT_8000_0008_EBX] =
2573 CPUID_8000_0008_EBX_WBNOINVD,
2574 .features[FEAT_7_0_EBX] =
2575 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2576 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2577 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2578 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2579 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_INTEL_PT,
2580 .features[FEAT_7_0_ECX] =
2581 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2582 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2583 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2584 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2585 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2586 .features[FEAT_7_0_EDX] =
2587 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2588 /* Missing: XSAVES (not supported by some Linux versions,
2589 * including v4.1 to v4.12).
2590 * KVM doesn't yet expose any XSAVES state save component,
2591 * and the only one defined in Skylake (processor tracing)
2592 * probably will block migration anyway.
2593 */
2594 .features[FEAT_XSAVE] =
2595 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2596 CPUID_XSAVE_XGETBV1,
2597 .features[FEAT_6_EAX] =
2598 CPUID_6_EAX_ARAT,
2599 .xlevel = 0x80000008,
2600 .model_id = "Intel Core Processor (Icelake)",
2601 },
2602 {
2603 .name = "Icelake-Server",
2604 .level = 0xd,
2605 .vendor = CPUID_VENDOR_INTEL,
2606 .family = 6,
2607 .model = 134,
2608 .stepping = 0,
2609 .features[FEAT_1_EDX] =
2610 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2611 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2612 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2613 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2614 CPUID_DE | CPUID_FP87,
2615 .features[FEAT_1_ECX] =
2616 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2617 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2618 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2619 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2620 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2621 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2622 .features[FEAT_8000_0001_EDX] =
2623 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2624 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2625 .features[FEAT_8000_0001_ECX] =
2626 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2627 .features[FEAT_8000_0008_EBX] =
2628 CPUID_8000_0008_EBX_WBNOINVD,
2629 .features[FEAT_7_0_EBX] =
2630 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2631 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2632 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2633 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2634 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2635 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2636 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2637 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT |
2638 CPUID_7_0_EBX_INTEL_PT,
2639 .features[FEAT_7_0_ECX] =
2640 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2641 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2642 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2643 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2644 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2645 .features[FEAT_7_0_EDX] =
2646 CPUID_7_0_EDX_PCONFIG | CPUID_7_0_EDX_SPEC_CTRL |
2647 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2648 /* Missing: XSAVES (not supported by some Linux versions,
2649 * including v4.1 to v4.12).
2650 * KVM doesn't yet expose any XSAVES state save component,
2651 * and the only one defined in Skylake (processor tracing)
2652 * probably will block migration anyway.
2653 */
2654 .features[FEAT_XSAVE] =
2655 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2656 CPUID_XSAVE_XGETBV1,
2657 .features[FEAT_6_EAX] =
2658 CPUID_6_EAX_ARAT,
2659 .xlevel = 0x80000008,
2660 .model_id = "Intel Xeon Processor (Icelake)",
2661 },
2662 {
2663 .name = "KnightsMill",
2664 .level = 0xd,
2665 .vendor = CPUID_VENDOR_INTEL,
2666 .family = 6,
2667 .model = 133,
2668 .stepping = 0,
2669 .features[FEAT_1_EDX] =
2670 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2671 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2672 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2673 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2674 CPUID_PSE | CPUID_DE | CPUID_FP87,
2675 .features[FEAT_1_ECX] =
2676 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2677 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2678 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2679 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2680 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2681 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2682 .features[FEAT_8000_0001_EDX] =
2683 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2684 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2685 .features[FEAT_8000_0001_ECX] =
2686 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2687 .features[FEAT_7_0_EBX] =
2688 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2689 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2690 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2691 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2692 CPUID_7_0_EBX_AVX512ER,
2693 .features[FEAT_7_0_ECX] =
2694 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2695 .features[FEAT_7_0_EDX] =
2696 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2697 .features[FEAT_XSAVE] =
2698 CPUID_XSAVE_XSAVEOPT,
2699 .features[FEAT_6_EAX] =
2700 CPUID_6_EAX_ARAT,
2701 .xlevel = 0x80000008,
2702 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2703 },
2704 {
2705 .name = "Opteron_G1",
2706 .level = 5,
2707 .vendor = CPUID_VENDOR_AMD,
2708 .family = 15,
2709 .model = 6,
2710 .stepping = 1,
2711 .features[FEAT_1_EDX] =
2712 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2713 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2714 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2715 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2716 CPUID_DE | CPUID_FP87,
2717 .features[FEAT_1_ECX] =
2718 CPUID_EXT_SSE3,
2719 .features[FEAT_8000_0001_EDX] =
2720 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2721 .xlevel = 0x80000008,
2722 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2723 },
2724 {
2725 .name = "Opteron_G2",
2726 .level = 5,
2727 .vendor = CPUID_VENDOR_AMD,
2728 .family = 15,
2729 .model = 6,
2730 .stepping = 1,
2731 .features[FEAT_1_EDX] =
2732 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2733 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2734 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2735 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2736 CPUID_DE | CPUID_FP87,
2737 .features[FEAT_1_ECX] =
2738 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2739 .features[FEAT_8000_0001_EDX] =
2740 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2741 .features[FEAT_8000_0001_ECX] =
2742 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2743 .xlevel = 0x80000008,
2744 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2745 },
2746 {
2747 .name = "Opteron_G3",
2748 .level = 5,
2749 .vendor = CPUID_VENDOR_AMD,
2750 .family = 16,
2751 .model = 2,
2752 .stepping = 3,
2753 .features[FEAT_1_EDX] =
2754 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2755 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2756 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2757 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2758 CPUID_DE | CPUID_FP87,
2759 .features[FEAT_1_ECX] =
2760 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2761 CPUID_EXT_SSE3,
2762 .features[FEAT_8000_0001_EDX] =
2763 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2764 CPUID_EXT2_RDTSCP,
2765 .features[FEAT_8000_0001_ECX] =
2766 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2767 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2768 .xlevel = 0x80000008,
2769 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2770 },
2771 {
2772 .name = "Opteron_G4",
2773 .level = 0xd,
2774 .vendor = CPUID_VENDOR_AMD,
2775 .family = 21,
2776 .model = 1,
2777 .stepping = 2,
2778 .features[FEAT_1_EDX] =
2779 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2780 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2781 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2782 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2783 CPUID_DE | CPUID_FP87,
2784 .features[FEAT_1_ECX] =
2785 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2786 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2787 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2788 CPUID_EXT_SSE3,
2789 .features[FEAT_8000_0001_EDX] =
2790 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2791 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2792 .features[FEAT_8000_0001_ECX] =
2793 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2794 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2795 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2796 CPUID_EXT3_LAHF_LM,
2797 /* no xsaveopt! */
2798 .xlevel = 0x8000001A,
2799 .model_id = "AMD Opteron 62xx class CPU",
2800 },
2801 {
2802 .name = "Opteron_G5",
2803 .level = 0xd,
2804 .vendor = CPUID_VENDOR_AMD,
2805 .family = 21,
2806 .model = 2,
2807 .stepping = 0,
2808 .features[FEAT_1_EDX] =
2809 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2810 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2811 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2812 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2813 CPUID_DE | CPUID_FP87,
2814 .features[FEAT_1_ECX] =
2815 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2816 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2817 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2818 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2819 .features[FEAT_8000_0001_EDX] =
2820 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2821 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2822 .features[FEAT_8000_0001_ECX] =
2823 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2824 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2825 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2826 CPUID_EXT3_LAHF_LM,
2827 /* no xsaveopt! */
2828 .xlevel = 0x8000001A,
2829 .model_id = "AMD Opteron 63xx class CPU",
2830 },
2831 {
2832 .name = "EPYC",
2833 .level = 0xd,
2834 .vendor = CPUID_VENDOR_AMD,
2835 .family = 23,
2836 .model = 1,
2837 .stepping = 2,
2838 .features[FEAT_1_EDX] =
2839 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2840 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2841 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2842 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2843 CPUID_VME | CPUID_FP87,
2844 .features[FEAT_1_ECX] =
2845 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2846 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2847 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2848 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2849 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2850 .features[FEAT_8000_0001_EDX] =
2851 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2852 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2853 CPUID_EXT2_SYSCALL,
2854 .features[FEAT_8000_0001_ECX] =
2855 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2856 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2857 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2858 CPUID_EXT3_TOPOEXT,
2859 .features[FEAT_7_0_EBX] =
2860 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2861 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2862 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2863 CPUID_7_0_EBX_SHA_NI,
2864 /* Missing: XSAVES (not supported by some Linux versions,
2865 * including v4.1 to v4.12).
2866 * KVM doesn't yet expose any XSAVES state save component.
2867 */
2868 .features[FEAT_XSAVE] =
2869 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2870 CPUID_XSAVE_XGETBV1,
2871 .features[FEAT_6_EAX] =
2872 CPUID_6_EAX_ARAT,
2873 .xlevel = 0x8000001E,
2874 .model_id = "AMD EPYC Processor",
2875 .cache_info = &epyc_cache_info,
2876 },
2877 {
2878 .name = "EPYC-IBPB",
2879 .level = 0xd,
2880 .vendor = CPUID_VENDOR_AMD,
2881 .family = 23,
2882 .model = 1,
2883 .stepping = 2,
2884 .features[FEAT_1_EDX] =
2885 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2886 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2887 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2888 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2889 CPUID_VME | CPUID_FP87,
2890 .features[FEAT_1_ECX] =
2891 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2892 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2893 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2894 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2895 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2896 .features[FEAT_8000_0001_EDX] =
2897 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2898 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2899 CPUID_EXT2_SYSCALL,
2900 .features[FEAT_8000_0001_ECX] =
2901 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2902 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2903 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2904 CPUID_EXT3_TOPOEXT,
2905 .features[FEAT_8000_0008_EBX] =
2906 CPUID_8000_0008_EBX_IBPB,
2907 .features[FEAT_7_0_EBX] =
2908 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2909 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2910 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2911 CPUID_7_0_EBX_SHA_NI,
2912 /* Missing: XSAVES (not supported by some Linux versions,
2913 * including v4.1 to v4.12).
2914 * KVM doesn't yet expose any XSAVES state save component.
2915 */
2916 .features[FEAT_XSAVE] =
2917 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2918 CPUID_XSAVE_XGETBV1,
2919 .features[FEAT_6_EAX] =
2920 CPUID_6_EAX_ARAT,
2921 .xlevel = 0x8000001E,
2922 .model_id = "AMD EPYC Processor (with IBPB)",
2923 .cache_info = &epyc_cache_info,
2924 },
2925};
2926
2927typedef struct PropValue {
2928 const char *prop, *value;
2929} PropValue;
2930
2931/* KVM-specific features that are automatically added/removed
2932 * from all CPU models when KVM is enabled.
2933 */
2934static PropValue kvm_default_props[] = {
2935 { "kvmclock", "on" },
2936 { "kvm-nopiodelay", "on" },
2937 { "kvm-asyncpf", "on" },
2938 { "kvm-steal-time", "on" },
2939 { "kvm-pv-eoi", "on" },
2940 { "kvmclock-stable-bit", "on" },
2941 { "x2apic", "on" },
2942 { "acpi", "off" },
2943 { "monitor", "off" },
2944 { "svm", "off" },
2945 { NULL, NULL },
2946};
2947
2948/* TCG-specific defaults that override all CPU models when using TCG
2949 */
2950static PropValue tcg_default_props[] = {
2951 { "vme", "off" },
2952 { NULL, NULL },
2953};
2954
2955
2956void x86_cpu_change_kvm_default(const char *prop, const char *value)
2957{
2958 PropValue *pv;
2959 for (pv = kvm_default_props; pv->prop; pv++) {
2960 if (!strcmp(pv->prop, prop)) {
2961 pv->value = value;
2962 break;
2963 }
2964 }
2965
2966 /* It is valid to call this function only for properties that
2967 * are already present in the kvm_default_props table.
2968 */
2969 assert(pv->prop);
2970}
2971
2972static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2973 bool migratable_only);
2974
2975static bool lmce_supported(void)
2976{
2977 uint64_t mce_cap = 0;
2978
2979#ifdef CONFIG_KVM
2980 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2981 return false;
2982 }
2983#endif
2984
2985 return !!(mce_cap & MCG_LMCE_P);
2986}
2987
2988#define CPUID_MODEL_ID_SZ 48
2989
2990/**
2991 * cpu_x86_fill_model_id:
2992 * Get CPUID model ID string from host CPU.
2993 *
2994 * @str should have at least CPUID_MODEL_ID_SZ bytes
2995 *
2996 * The function does NOT add a null terminator to the string
2997 * automatically.
2998 */
2999static int cpu_x86_fill_model_id(char *str)
3000{
3001 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3002 int i;
3003
3004 for (i = 0; i < 3; i++) {
3005 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3006 memcpy(str + i * 16 + 0, &eax, 4);
3007 memcpy(str + i * 16 + 4, &ebx, 4);
3008 memcpy(str + i * 16 + 8, &ecx, 4);
3009 memcpy(str + i * 16 + 12, &edx, 4);
3010 }
3011 return 0;
3012}
3013
3014static Property max_x86_cpu_properties[] = {
3015 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3016 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3017 DEFINE_PROP_END_OF_LIST()
3018};
3019
3020static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3021{
3022 DeviceClass *dc = DEVICE_CLASS(oc);
3023 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3024
3025 xcc->ordering = 9;
3026
3027 xcc->model_description =
3028 "Enables all features supported by the accelerator in the current host";
3029
3030 dc->props = max_x86_cpu_properties;
3031}
3032
3033static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3034
3035static void max_x86_cpu_initfn(Object *obj)
3036{
3037 X86CPU *cpu = X86_CPU(obj);
3038 CPUX86State *env = &cpu->env;
3039 KVMState *s = kvm_state;
3040
3041 /* We can't fill the features array here because we don't know yet if
3042 * "migratable" is true or false.
3043 */
3044 cpu->max_features = true;
3045
3046 if (accel_uses_host_cpuid()) {
3047 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3048 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3049 int family, model, stepping;
3050 X86CPUDefinition host_cpudef = { };
3051 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3052
3053 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
3054 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
3055
3056 host_vendor_fms(vendor, &family, &model, &stepping);
3057
3058 cpu_x86_fill_model_id(model_id);
3059
3060 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3061 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3062 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3063 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3064 &error_abort);
3065 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3066 &error_abort);
3067
3068 if (kvm_enabled()) {
3069 env->cpuid_min_level =
3070 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3071 env->cpuid_min_xlevel =
3072 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3073 env->cpuid_min_xlevel2 =
3074 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3075 } else {
3076 env->cpuid_min_level =
3077 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3078 env->cpuid_min_xlevel =
3079 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3080 env->cpuid_min_xlevel2 =
3081 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3082 }
3083
3084 if (lmce_supported()) {
3085 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3086 }
3087 } else {
3088 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3089 "vendor", &error_abort);
3090 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3091 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3092 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3093 object_property_set_str(OBJECT(cpu),
3094 "QEMU TCG CPU version " QEMU_HW_VERSION,
3095 "model-id", &error_abort);
3096 }
3097
3098 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3099}
3100
3101static const TypeInfo max_x86_cpu_type_info = {
3102 .name = X86_CPU_TYPE_NAME("max"),
3103 .parent = TYPE_X86_CPU,
3104 .instance_init = max_x86_cpu_initfn,
3105 .class_init = max_x86_cpu_class_init,
3106};
3107
3108#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3109static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3110{
3111 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3112
3113 xcc->host_cpuid_required = true;
3114 xcc->ordering = 8;
3115
3116#if defined(CONFIG_KVM)
3117 xcc->model_description =
3118 "KVM processor with all supported host features ";
3119#elif defined(CONFIG_HVF)
3120 xcc->model_description =
3121 "HVF processor with all supported host features ";
3122#endif
3123}
3124
3125static const TypeInfo host_x86_cpu_type_info = {
3126 .name = X86_CPU_TYPE_NAME("host"),
3127 .parent = X86_CPU_TYPE_NAME("max"),
3128 .class_init = host_x86_cpu_class_init,
3129};
3130
3131#endif
3132
3133static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3134{
3135 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3136
3137 switch (f->type) {
3138 case CPUID_FEATURE_WORD:
3139 {
3140 const char *reg = get_register_name_32(f->cpuid.reg);
3141 assert(reg);
3142 return g_strdup_printf("CPUID.%02XH:%s",
3143 f->cpuid.eax, reg);
3144 }
3145 case MSR_FEATURE_WORD:
3146 return g_strdup_printf("MSR(%02XH)",
3147 f->msr.index);
3148 }
3149
3150 return NULL;
3151}
3152
3153static void report_unavailable_features(FeatureWord w, uint32_t mask)
3154{
3155 FeatureWordInfo *f = &feature_word_info[w];
3156 int i;
3157 char *feat_word_str;
3158
3159 for (i = 0; i < 32; ++i) {
3160 if ((1UL << i) & mask) {
3161 feat_word_str = feature_word_description(f, i);
3162 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3163 accel_uses_host_cpuid() ? "host" : "TCG",
3164 feat_word_str,
3165 f->feat_names[i] ? "." : "",
3166 f->feat_names[i] ? f->feat_names[i] : "", i);
3167 g_free(feat_word_str);
3168 }
3169 }
3170}
3171
3172static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3173 const char *name, void *opaque,
3174 Error **errp)
3175{
3176 X86CPU *cpu = X86_CPU(obj);
3177 CPUX86State *env = &cpu->env;
3178 int64_t value;
3179
3180 value = (env->cpuid_version >> 8) & 0xf;
3181 if (value == 0xf) {
3182 value += (env->cpuid_version >> 20) & 0xff;
3183 }
3184 visit_type_int(v, name, &value, errp);
3185}
3186
3187static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3188 const char *name, void *opaque,
3189 Error **errp)
3190{
3191 X86CPU *cpu = X86_CPU(obj);
3192 CPUX86State *env = &cpu->env;
3193 const int64_t min = 0;
3194 const int64_t max = 0xff + 0xf;
3195 Error *local_err = NULL;
3196 int64_t value;
3197
3198 visit_type_int(v, name, &value, &local_err);
3199 if (local_err) {
3200 error_propagate(errp, local_err);
3201 return;
3202 }
3203 if (value < min || value > max) {
3204 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3205 name ? name : "null", value, min, max);
3206 return;
3207 }
3208
3209 env->cpuid_version &= ~0xff00f00;
3210 if (value > 0x0f) {
3211 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3212 } else {
3213 env->cpuid_version |= value << 8;
3214 }
3215}
3216
3217static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3218 const char *name, void *opaque,
3219 Error **errp)
3220{
3221 X86CPU *cpu = X86_CPU(obj);
3222 CPUX86State *env = &cpu->env;
3223 int64_t value;
3224
3225 value = (env->cpuid_version >> 4) & 0xf;
3226 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3227 visit_type_int(v, name, &value, errp);
3228}
3229
3230static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3231 const char *name, void *opaque,
3232 Error **errp)
3233{
3234 X86CPU *cpu = X86_CPU(obj);
3235 CPUX86State *env = &cpu->env;
3236 const int64_t min = 0;
3237 const int64_t max = 0xff;
3238 Error *local_err = NULL;
3239 int64_t value;
3240
3241 visit_type_int(v, name, &value, &local_err);
3242 if (local_err) {
3243 error_propagate(errp, local_err);
3244 return;
3245 }
3246 if (value < min || value > max) {
3247 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3248 name ? name : "null", value, min, max);
3249 return;
3250 }
3251
3252 env->cpuid_version &= ~0xf00f0;
3253 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3254}
3255
3256static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3257 const char *name, void *opaque,
3258 Error **errp)
3259{
3260 X86CPU *cpu = X86_CPU(obj);
3261 CPUX86State *env = &cpu->env;
3262 int64_t value;
3263
3264 value = env->cpuid_version & 0xf;
3265 visit_type_int(v, name, &value, errp);
3266}
3267
3268static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3269 const char *name, void *opaque,
3270 Error **errp)
3271{
3272 X86CPU *cpu = X86_CPU(obj);
3273 CPUX86State *env = &cpu->env;
3274 const int64_t min = 0;
3275 const int64_t max = 0xf;
3276 Error *local_err = NULL;
3277 int64_t value;
3278
3279 visit_type_int(v, name, &value, &local_err);
3280 if (local_err) {
3281 error_propagate(errp, local_err);
3282 return;
3283 }
3284 if (value < min || value > max) {
3285 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3286 name ? name : "null", value, min, max);
3287 return;
3288 }
3289
3290 env->cpuid_version &= ~0xf;
3291 env->cpuid_version |= value & 0xf;
3292}
3293
3294static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3295{
3296 X86CPU *cpu = X86_CPU(obj);
3297 CPUX86State *env = &cpu->env;
3298 char *value;
3299
3300 value = g_malloc(CPUID_VENDOR_SZ + 1);
3301 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3302 env->cpuid_vendor3);
3303 return value;
3304}
3305
3306static void x86_cpuid_set_vendor(Object *obj, const char *value,
3307 Error **errp)
3308{
3309 X86CPU *cpu = X86_CPU(obj);
3310 CPUX86State *env = &cpu->env;
3311 int i;
3312
3313 if (strlen(value) != CPUID_VENDOR_SZ) {
3314 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3315 return;
3316 }
3317
3318 env->cpuid_vendor1 = 0;
3319 env->cpuid_vendor2 = 0;
3320 env->cpuid_vendor3 = 0;
3321 for (i = 0; i < 4; i++) {
3322 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3323 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3324 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3325 }
3326}
3327
3328static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3329{
3330 X86CPU *cpu = X86_CPU(obj);
3331 CPUX86State *env = &cpu->env;
3332 char *value;
3333 int i;
3334
3335 value = g_malloc(48 + 1);
3336 for (i = 0; i < 48; i++) {
3337 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3338 }
3339 value[48] = '\0';
3340 return value;
3341}
3342
3343static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3344 Error **errp)
3345{
3346 X86CPU *cpu = X86_CPU(obj);
3347 CPUX86State *env = &cpu->env;
3348 int c, len, i;
3349
3350 if (model_id == NULL) {
3351 model_id = "";
3352 }
3353 len = strlen(model_id);
3354 memset(env->cpuid_model, 0, 48);
3355 for (i = 0; i < 48; i++) {
3356 if (i >= len) {
3357 c = '\0';
3358 } else {
3359 c = (uint8_t)model_id[i];
3360 }
3361 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3362 }
3363}
3364
3365static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3366 void *opaque, Error **errp)
3367{
3368 X86CPU *cpu = X86_CPU(obj);
3369 int64_t value;
3370
3371 value = cpu->env.tsc_khz * 1000;
3372 visit_type_int(v, name, &value, errp);
3373}
3374
3375static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3376 void *opaque, Error **errp)
3377{
3378 X86CPU *cpu = X86_CPU(obj);
3379 const int64_t min = 0;
3380 const int64_t max = INT64_MAX;
3381 Error *local_err = NULL;
3382 int64_t value;
3383
3384 visit_type_int(v, name, &value, &local_err);
3385 if (local_err) {
3386 error_propagate(errp, local_err);
3387 return;
3388 }
3389 if (value < min || value > max) {
3390 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3391 name ? name : "null", value, min, max);
3392 return;
3393 }
3394
3395 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3396}
3397
3398/* Generic getter for "feature-words" and "filtered-features" properties */
3399static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3400 const char *name, void *opaque,
3401 Error **errp)
3402{
3403 uint32_t *array = (uint32_t *)opaque;
3404 FeatureWord w;
3405 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3406 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3407 X86CPUFeatureWordInfoList *list = NULL;
3408
3409 for (w = 0; w < FEATURE_WORDS; w++) {
3410 FeatureWordInfo *wi = &feature_word_info[w];
3411 /*
3412 * We didn't have MSR features when "feature-words" was
3413 * introduced. Therefore skipped other type entries.
3414 */
3415 if (wi->type != CPUID_FEATURE_WORD) {
3416 continue;
3417 }
3418 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3419 qwi->cpuid_input_eax = wi->cpuid.eax;
3420 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3421 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3422 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3423 qwi->features = array[w];
3424
3425 /* List will be in reverse order, but order shouldn't matter */
3426 list_entries[w].next = list;
3427 list_entries[w].value = &word_infos[w];
3428 list = &list_entries[w];
3429 }
3430
3431 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3432}
3433
3434static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3435 void *opaque, Error **errp)
3436{
3437 X86CPU *cpu = X86_CPU(obj);
3438 int64_t value = cpu->hyperv_spinlock_attempts;
3439
3440 visit_type_int(v, name, &value, errp);
3441}
3442
3443static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3444 void *opaque, Error **errp)
3445{
3446 const int64_t min = 0xFFF;
3447 const int64_t max = UINT_MAX;
3448 X86CPU *cpu = X86_CPU(obj);
3449 Error *err = NULL;
3450 int64_t value;
3451
3452 visit_type_int(v, name, &value, &err);
3453 if (err) {
3454 error_propagate(errp, err);
3455 return;
3456 }
3457
3458 if (value < min || value > max) {
3459 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3460 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3461 object_get_typename(obj), name ? name : "null",
3462 value, min, max);
3463 return;
3464 }
3465 cpu->hyperv_spinlock_attempts = value;
3466}
3467
3468static const PropertyInfo qdev_prop_spinlocks = {
3469 .name = "int",
3470 .get = x86_get_hv_spinlocks,
3471 .set = x86_set_hv_spinlocks,
3472};
3473
3474/* Convert all '_' in a feature string option name to '-', to make feature
3475 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3476 */
3477static inline void feat2prop(char *s)
3478{
3479 while ((s = strchr(s, '_'))) {
3480 *s = '-';
3481 }
3482}
3483
3484/* Return the feature property name for a feature flag bit */
3485static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3486{
3487 /* XSAVE components are automatically enabled by other features,
3488 * so return the original feature name instead
3489 */
3490 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3491 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3492
3493 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3494 x86_ext_save_areas[comp].bits) {
3495 w = x86_ext_save_areas[comp].feature;
3496 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3497 }
3498 }
3499
3500 assert(bitnr < 32);
3501 assert(w < FEATURE_WORDS);
3502 return feature_word_info[w].feat_names[bitnr];
3503}
3504
3505/* Compatibily hack to maintain legacy +-feat semantic,
3506 * where +-feat overwrites any feature set by
3507 * feat=on|feat even if the later is parsed after +-feat
3508 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3509 */
3510static GList *plus_features, *minus_features;
3511
3512static gint compare_string(gconstpointer a, gconstpointer b)
3513{
3514 return g_strcmp0(a, b);
3515}
3516
3517/* Parse "+feature,-feature,feature=foo" CPU feature string
3518 */
3519static void x86_cpu_parse_featurestr(const char *typename, char *features,
3520 Error **errp)
3521{
3522 char *featurestr; /* Single 'key=value" string being parsed */
3523 static bool cpu_globals_initialized;
3524 bool ambiguous = false;
3525
3526 if (cpu_globals_initialized) {
3527 return;
3528 }
3529 cpu_globals_initialized = true;
3530
3531 if (!features) {
3532 return;
3533 }
3534
3535 for (featurestr = strtok(features, ",");
3536 featurestr;
3537 featurestr = strtok(NULL, ",")) {
3538 const char *name;
3539 const char *val = NULL;
3540 char *eq = NULL;
3541 char num[32];
3542 GlobalProperty *prop;
3543
3544 /* Compatibility syntax: */
3545 if (featurestr[0] == '+') {
3546 plus_features = g_list_append(plus_features,
3547 g_strdup(featurestr + 1));
3548 continue;
3549 } else if (featurestr[0] == '-') {
3550 minus_features = g_list_append(minus_features,
3551 g_strdup(featurestr + 1));
3552 continue;
3553 }
3554
3555 eq = strchr(featurestr, '=');
3556 if (eq) {
3557 *eq++ = 0;
3558 val = eq;
3559 } else {
3560 val = "on";
3561 }
3562
3563 feat2prop(featurestr);
3564 name = featurestr;
3565
3566 if (g_list_find_custom(plus_features, name, compare_string)) {
3567 warn_report("Ambiguous CPU model string. "
3568 "Don't mix both \"+%s\" and \"%s=%s\"",
3569 name, name, val);
3570 ambiguous = true;
3571 }
3572 if (g_list_find_custom(minus_features, name, compare_string)) {
3573 warn_report("Ambiguous CPU model string. "
3574 "Don't mix both \"-%s\" and \"%s=%s\"",
3575 name, name, val);
3576 ambiguous = true;
3577 }
3578
3579 /* Special case: */
3580 if (!strcmp(name, "tsc-freq")) {
3581 int ret;
3582 uint64_t tsc_freq;
3583
3584 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3585 if (ret < 0 || tsc_freq > INT64_MAX) {
3586 error_setg(errp, "bad numerical value %s", val);
3587 return;
3588 }
3589 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3590 val = num;
3591 name = "tsc-frequency";
3592 }
3593
3594 prop = g_new0(typeof(*prop), 1);
3595 prop->driver = typename;
3596 prop->property = g_strdup(name);
3597 prop->value = g_strdup(val);
3598 qdev_prop_register_global(prop);
3599 }
3600
3601 if (ambiguous) {
3602 warn_report("Compatibility of ambiguous CPU model "
3603 "strings won't be kept on future QEMU versions");
3604 }
3605}
3606
3607static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3608static int x86_cpu_filter_features(X86CPU *cpu);
3609
3610/* Check for missing features that may prevent the CPU class from
3611 * running using the current machine and accelerator.
3612 */
3613static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3614 strList **missing_feats)
3615{
3616 X86CPU *xc;
3617 FeatureWord w;
3618 Error *err = NULL;
3619 strList **next = missing_feats;
3620
3621 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3622 strList *new = g_new0(strList, 1);
3623 new->value = g_strdup("kvm");
3624 *missing_feats = new;
3625 return;
3626 }
3627
3628 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3629
3630 x86_cpu_expand_features(xc, &err);
3631 if (err) {
3632 /* Errors at x86_cpu_expand_features should never happen,
3633 * but in case it does, just report the model as not
3634 * runnable at all using the "type" property.
3635 */
3636 strList *new = g_new0(strList, 1);
3637 new->value = g_strdup("type");
3638 *next = new;
3639 next = &new->next;
3640 }
3641
3642 x86_cpu_filter_features(xc);
3643
3644 for (w = 0; w < FEATURE_WORDS; w++) {
3645 uint32_t filtered = xc->filtered_features[w];
3646 int i;
3647 for (i = 0; i < 32; i++) {
3648 if (filtered & (1UL << i)) {
3649 strList *new = g_new0(strList, 1);
3650 new->value = g_strdup(x86_cpu_feature_name(w, i));
3651 *next = new;
3652 next = &new->next;
3653 }
3654 }
3655 }
3656
3657 object_unref(OBJECT(xc));
3658}
3659
3660/* Print all cpuid feature names in featureset
3661 */
3662static void listflags(FILE *f, fprintf_function print, GList *features)
3663{
3664 size_t len = 0;
3665 GList *tmp;
3666
3667 for (tmp = features; tmp; tmp = tmp->next) {
3668 const char *name = tmp->data;
3669 if ((len + strlen(name) + 1) >= 75) {
3670 print(f, "\n");
3671 len = 0;
3672 }
3673 print(f, "%s%s", len == 0 ? " " : " ", name);
3674 len += strlen(name) + 1;
3675 }
3676 print(f, "\n");
3677}
3678
3679/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3680static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3681{
3682 ObjectClass *class_a = (ObjectClass *)a;
3683 ObjectClass *class_b = (ObjectClass *)b;
3684 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3685 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3686 char *name_a, *name_b;
3687 int ret;
3688
3689 if (cc_a->ordering != cc_b->ordering) {
3690 ret = cc_a->ordering - cc_b->ordering;
3691 } else {
3692 name_a = x86_cpu_class_get_model_name(cc_a);
3693 name_b = x86_cpu_class_get_model_name(cc_b);
3694 ret = strcmp(name_a, name_b);
3695 g_free(name_a);
3696 g_free(name_b);
3697 }
3698 return ret;
3699}
3700
3701static GSList *get_sorted_cpu_model_list(void)
3702{
3703 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3704 list = g_slist_sort(list, x86_cpu_list_compare);
3705 return list;
3706}
3707
3708static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3709{
3710 ObjectClass *oc = data;
3711 X86CPUClass *cc = X86_CPU_CLASS(oc);
3712 CPUListState *s = user_data;
3713 char *name = x86_cpu_class_get_model_name(cc);
3714 const char *desc = cc->model_description;
3715 if (!desc && cc->cpu_def) {
3716 desc = cc->cpu_def->model_id;
3717 }
3718
3719 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3720 name, desc);
3721 g_free(name);
3722}
3723
3724/* list available CPU models and flags */
3725void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3726{
3727 int i, j;
3728 CPUListState s = {
3729 .file = f,
3730 .cpu_fprintf = cpu_fprintf,
3731 };
3732 GSList *list;
3733 GList *names = NULL;
3734
3735 (*cpu_fprintf)(f, "Available CPUs:\n");
3736 list = get_sorted_cpu_model_list();
3737 g_slist_foreach(list, x86_cpu_list_entry, &s);
3738 g_slist_free(list);
3739
3740 names = NULL;
3741 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3742 FeatureWordInfo *fw = &feature_word_info[i];
3743 for (j = 0; j < 32; j++) {
3744 if (fw->feat_names[j]) {
3745 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3746 }
3747 }
3748 }
3749
3750 names = g_list_sort(names, (GCompareFunc)strcmp);
3751
3752 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3753 listflags(f, cpu_fprintf, names);
3754 (*cpu_fprintf)(f, "\n");
3755 g_list_free(names);
3756}
3757
3758static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3759{
3760 ObjectClass *oc = data;
3761 X86CPUClass *cc = X86_CPU_CLASS(oc);
3762 CpuDefinitionInfoList **cpu_list = user_data;
3763 CpuDefinitionInfoList *entry;
3764 CpuDefinitionInfo *info;
3765
3766 info = g_malloc0(sizeof(*info));
3767 info->name = x86_cpu_class_get_model_name(cc);
3768 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3769 info->has_unavailable_features = true;
3770 info->q_typename = g_strdup(object_class_get_name(oc));
3771 info->migration_safe = cc->migration_safe;
3772 info->has_migration_safe = true;
3773 info->q_static = cc->static_model;
3774
3775 entry = g_malloc0(sizeof(*entry));
3776 entry->value = info;
3777 entry->next = *cpu_list;
3778 *cpu_list = entry;
3779}
3780
3781CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3782{
3783 CpuDefinitionInfoList *cpu_list = NULL;
3784 GSList *list = get_sorted_cpu_model_list();
3785 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3786 g_slist_free(list);
3787 return cpu_list;
3788}
3789
3790static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3791 bool migratable_only)
3792{
3793 FeatureWordInfo *wi = &feature_word_info[w];
3794 uint32_t r = 0;
3795
3796 if (kvm_enabled()) {
3797 switch (wi->type) {
3798 case CPUID_FEATURE_WORD:
3799 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3800 wi->cpuid.ecx,
3801 wi->cpuid.reg);
3802 break;
3803 case MSR_FEATURE_WORD:
3804 r = kvm_arch_get_supported_msr_feature(kvm_state,
3805 wi->msr.index);
3806 break;
3807 }
3808 } else if (hvf_enabled()) {
3809 if (wi->type != CPUID_FEATURE_WORD) {
3810 return 0;
3811 }
3812 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3813 wi->cpuid.ecx,
3814 wi->cpuid.reg);
3815 } else if (tcg_enabled()) {
3816 r = wi->tcg_features;
3817 } else {
3818 return ~0;
3819 }
3820 if (migratable_only) {
3821 r &= x86_cpu_get_migratable_flags(w);
3822 }
3823 return r;
3824}
3825
3826static void x86_cpu_report_filtered_features(X86CPU *cpu)
3827{
3828 FeatureWord w;
3829
3830 for (w = 0; w < FEATURE_WORDS; w++) {
3831 report_unavailable_features(w, cpu->filtered_features[w]);
3832 }
3833}
3834
3835static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3836{
3837 PropValue *pv;
3838 for (pv = props; pv->prop; pv++) {
3839 if (!pv->value) {
3840 continue;
3841 }
3842 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3843 &error_abort);
3844 }
3845}
3846
3847/* Load data from X86CPUDefinition into a X86CPU object
3848 */
3849static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3850{
3851 CPUX86State *env = &cpu->env;
3852 const char *vendor;
3853 char host_vendor[CPUID_VENDOR_SZ + 1];
3854 FeatureWord w;
3855
3856 /*NOTE: any property set by this function should be returned by
3857 * x86_cpu_static_props(), so static expansion of
3858 * query-cpu-model-expansion is always complete.
3859 */
3860
3861 /* CPU models only set _minimum_ values for level/xlevel: */
3862 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3863 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3864
3865 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3866 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3867 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3868 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3869 for (w = 0; w < FEATURE_WORDS; w++) {
3870 env->features[w] = def->features[w];
3871 }
3872
3873 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3874 cpu->legacy_cache = !def->cache_info;
3875
3876 /* Special cases not set in the X86CPUDefinition structs: */
3877 /* TODO: in-kernel irqchip for hvf */
3878 if (kvm_enabled()) {
3879 if (!kvm_irqchip_in_kernel()) {
3880 x86_cpu_change_kvm_default("x2apic", "off");
3881 }
3882
3883 x86_cpu_apply_props(cpu, kvm_default_props);
3884 } else if (tcg_enabled()) {
3885 x86_cpu_apply_props(cpu, tcg_default_props);
3886 }
3887
3888 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3889
3890 /* sysenter isn't supported in compatibility mode on AMD,
3891 * syscall isn't supported in compatibility mode on Intel.
3892 * Normally we advertise the actual CPU vendor, but you can
3893 * override this using the 'vendor' property if you want to use
3894 * KVM's sysenter/syscall emulation in compatibility mode and
3895 * when doing cross vendor migration
3896 */
3897 vendor = def->vendor;
3898 if (accel_uses_host_cpuid()) {
3899 uint32_t ebx = 0, ecx = 0, edx = 0;
3900 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3901 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3902 vendor = host_vendor;
3903 }
3904
3905 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3906
3907}
3908
3909/* Return a QDict containing keys for all properties that can be included
3910 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3911 * must be included in the dictionary.
3912 */
3913static QDict *x86_cpu_static_props(void)
3914{
3915 FeatureWord w;
3916 int i;
3917 static const char *props[] = {
3918 "min-level",
3919 "min-xlevel",
3920 "family",
3921 "model",
3922 "stepping",
3923 "model-id",
3924 "vendor",
3925 "lmce",
3926 NULL,
3927 };
3928 static QDict *d;
3929
3930 if (d) {
3931 return d;
3932 }
3933
3934 d = qdict_new();
3935 for (i = 0; props[i]; i++) {
3936 qdict_put_null(d, props[i]);
3937 }
3938
3939 for (w = 0; w < FEATURE_WORDS; w++) {
3940 FeatureWordInfo *fi = &feature_word_info[w];
3941 int bit;
3942 for (bit = 0; bit < 32; bit++) {
3943 if (!fi->feat_names[bit]) {
3944 continue;
3945 }
3946 qdict_put_null(d, fi->feat_names[bit]);
3947 }
3948 }
3949
3950 return d;
3951}
3952
3953/* Add an entry to @props dict, with the value for property. */
3954static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3955{
3956 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3957 &error_abort);
3958
3959 qdict_put_obj(props, prop, value);
3960}
3961
3962/* Convert CPU model data from X86CPU object to a property dictionary
3963 * that can recreate exactly the same CPU model.
3964 */
3965static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3966{
3967 QDict *sprops = x86_cpu_static_props();
3968 const QDictEntry *e;
3969
3970 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3971 const char *prop = qdict_entry_key(e);
3972 x86_cpu_expand_prop(cpu, props, prop);
3973 }
3974}
3975
3976/* Convert CPU model data from X86CPU object to a property dictionary
3977 * that can recreate exactly the same CPU model, including every
3978 * writeable QOM property.
3979 */
3980static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3981{
3982 ObjectPropertyIterator iter;
3983 ObjectProperty *prop;
3984
3985 object_property_iter_init(&iter, OBJECT(cpu));
3986 while ((prop = object_property_iter_next(&iter))) {
3987 /* skip read-only or write-only properties */
3988 if (!prop->get || !prop->set) {
3989 continue;
3990 }
3991
3992 /* "hotplugged" is the only property that is configurable
3993 * on the command-line but will be set differently on CPUs
3994 * created using "-cpu ... -smp ..." and by CPUs created
3995 * on the fly by x86_cpu_from_model() for querying. Skip it.
3996 */
3997 if (!strcmp(prop->name, "hotplugged")) {
3998 continue;
3999 }
4000 x86_cpu_expand_prop(cpu, props, prop->name);
4001 }
4002}
4003
4004static void object_apply_props(Object *obj, QDict *props, Error **errp)
4005{
4006 const QDictEntry *prop;
4007 Error *err = NULL;
4008
4009 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4010 object_property_set_qobject(obj, qdict_entry_value(prop),
4011 qdict_entry_key(prop), &err);
4012 if (err) {
4013 break;
4014 }
4015 }
4016
4017 error_propagate(errp, err);
4018}
4019
4020/* Create X86CPU object according to model+props specification */
4021static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4022{
4023 X86CPU *xc = NULL;
4024 X86CPUClass *xcc;
4025 Error *err = NULL;
4026
4027 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4028 if (xcc == NULL) {
4029 error_setg(&err, "CPU model '%s' not found", model);
4030 goto out;
4031 }
4032
4033 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4034 if (props) {
4035 object_apply_props(OBJECT(xc), props, &err);
4036 if (err) {
4037 goto out;
4038 }
4039 }
4040
4041 x86_cpu_expand_features(xc, &err);
4042 if (err) {
4043 goto out;
4044 }
4045
4046out:
4047 if (err) {
4048 error_propagate(errp, err);
4049 object_unref(OBJECT(xc));
4050 xc = NULL;
4051 }
4052 return xc;
4053}
4054
4055CpuModelExpansionInfo *
4056arch_query_cpu_model_expansion(CpuModelExpansionType type,
4057 CpuModelInfo *model,
4058 Error **errp)
4059{
4060 X86CPU *xc = NULL;
4061 Error *err = NULL;
4062 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4063 QDict *props = NULL;
4064 const char *base_name;
4065
4066 xc = x86_cpu_from_model(model->name,
4067 model->has_props ?
4068 qobject_to(QDict, model->props) :
4069 NULL, &err);
4070 if (err) {
4071 goto out;
4072 }
4073
4074 props = qdict_new();
4075 ret->model = g_new0(CpuModelInfo, 1);
4076 ret->model->props = QOBJECT(props);
4077 ret->model->has_props = true;
4078
4079 switch (type) {
4080 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4081 /* Static expansion will be based on "base" only */
4082 base_name = "base";
4083 x86_cpu_to_dict(xc, props);
4084 break;
4085 case CPU_MODEL_EXPANSION_TYPE_FULL:
4086 /* As we don't return every single property, full expansion needs
4087 * to keep the original model name+props, and add extra
4088 * properties on top of that.
4089 */
4090 base_name = model->name;
4091 x86_cpu_to_dict_full(xc, props);
4092 break;
4093 default:
4094 error_setg(&err, "Unsupported expansion type");
4095 goto out;
4096 }
4097
4098 x86_cpu_to_dict(xc, props);
4099
4100 ret->model->name = g_strdup(base_name);
4101
4102out:
4103 object_unref(OBJECT(xc));
4104 if (err) {
4105 error_propagate(errp, err);
4106 qapi_free_CpuModelExpansionInfo(ret);
4107 ret = NULL;
4108 }
4109 return ret;
4110}
4111
4112static gchar *x86_gdb_arch_name(CPUState *cs)
4113{
4114#ifdef TARGET_X86_64
4115 return g_strdup("i386:x86-64");
4116#else
4117 return g_strdup("i386");
4118#endif
4119}
4120
4121static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4122{
4123 X86CPUDefinition *cpudef = data;
4124 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4125
4126 xcc->cpu_def = cpudef;
4127 xcc->migration_safe = true;
4128}
4129
4130static void x86_register_cpudef_type(X86CPUDefinition *def)
4131{
4132 char *typename = x86_cpu_type_name(def->name);
4133 TypeInfo ti = {
4134 .name = typename,
4135 .parent = TYPE_X86_CPU,
4136 .class_init = x86_cpu_cpudef_class_init,
4137 .class_data = def,
4138 };
4139
4140 /* AMD aliases are handled at runtime based on CPUID vendor, so
4141 * they shouldn't be set on the CPU model table.
4142 */
4143 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4144 /* catch mistakes instead of silently truncating model_id when too long */
4145 assert(def->model_id && strlen(def->model_id) <= 48);
4146
4147
4148 type_register(&ti);
4149 g_free(typename);
4150}
4151
4152#if !defined(CONFIG_USER_ONLY)
4153
4154void cpu_clear_apic_feature(CPUX86State *env)
4155{
4156 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4157}
4158
4159#endif /* !CONFIG_USER_ONLY */
4160
4161void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4162 uint32_t *eax, uint32_t *ebx,
4163 uint32_t *ecx, uint32_t *edx)
4164{
4165 X86CPU *cpu = x86_env_get_cpu(env);
4166 CPUState *cs = CPU(cpu);
4167 uint32_t pkg_offset;
4168 uint32_t limit;
4169 uint32_t signature[3];
4170
4171 /* Calculate & apply limits for different index ranges */
4172 if (index >= 0xC0000000) {
4173 limit = env->cpuid_xlevel2;
4174 } else if (index >= 0x80000000) {
4175 limit = env->cpuid_xlevel;
4176 } else if (index >= 0x40000000) {
4177 limit = 0x40000001;
4178 } else {
4179 limit = env->cpuid_level;
4180 }
4181
4182 if (index > limit) {
4183 /* Intel documentation states that invalid EAX input will
4184 * return the same information as EAX=cpuid_level
4185 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4186 */
4187 index = env->cpuid_level;
4188 }
4189
4190 switch(index) {
4191 case 0:
4192 *eax = env->cpuid_level;
4193 *ebx = env->cpuid_vendor1;
4194 *edx = env->cpuid_vendor2;
4195 *ecx = env->cpuid_vendor3;
4196 break;
4197 case 1:
4198 *eax = env->cpuid_version;
4199 *ebx = (cpu->apic_id << 24) |
4200 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4201 *ecx = env->features[FEAT_1_ECX];
4202 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4203 *ecx |= CPUID_EXT_OSXSAVE;
4204 }
4205 *edx = env->features[FEAT_1_EDX];
4206 if (cs->nr_cores * cs->nr_threads > 1) {
4207 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4208 *edx |= CPUID_HT;
4209 }
4210 break;
4211 case 2:
4212 /* cache info: needed for Pentium Pro compatibility */
4213 if (cpu->cache_info_passthrough) {
4214 host_cpuid(index, 0, eax, ebx, ecx, edx);
4215 break;
4216 }
4217 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4218 *ebx = 0;
4219 if (!cpu->enable_l3_cache) {
4220 *ecx = 0;
4221 } else {
4222 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4223 }
4224 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4225 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4226 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4227 break;
4228 case 4:
4229 /* cache info: needed for Core compatibility */
4230 if (cpu->cache_info_passthrough) {
4231 host_cpuid(index, count, eax, ebx, ecx, edx);
4232 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4233 *eax &= ~0xFC000000;
4234 if ((*eax & 31) && cs->nr_cores > 1) {
4235 *eax |= (cs->nr_cores - 1) << 26;
4236 }
4237 } else {
4238 *eax = 0;
4239 switch (count) {
4240 case 0: /* L1 dcache info */
4241 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4242 1, cs->nr_cores,
4243 eax, ebx, ecx, edx);
4244 break;
4245 case 1: /* L1 icache info */
4246 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4247 1, cs->nr_cores,
4248 eax, ebx, ecx, edx);
4249 break;
4250 case 2: /* L2 cache info */
4251 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4252 cs->nr_threads, cs->nr_cores,
4253 eax, ebx, ecx, edx);
4254 break;
4255 case 3: /* L3 cache info */
4256 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4257 if (cpu->enable_l3_cache) {
4258 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4259 (1 << pkg_offset), cs->nr_cores,
4260 eax, ebx, ecx, edx);
4261 break;
4262 }
4263 /* fall through */
4264 default: /* end of info */
4265 *eax = *ebx = *ecx = *edx = 0;
4266 break;
4267 }
4268 }
4269 break;
4270 case 5:
4271 /* MONITOR/MWAIT Leaf */
4272 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4273 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4274 *ecx = cpu->mwait.ecx; /* flags */
4275 *edx = cpu->mwait.edx; /* mwait substates */
4276 break;
4277 case 6:
4278 /* Thermal and Power Leaf */
4279 *eax = env->features[FEAT_6_EAX];
4280 *ebx = 0;
4281 *ecx = 0;
4282 *edx = 0;
4283 break;
4284 case 7:
4285 /* Structured Extended Feature Flags Enumeration Leaf */
4286 if (count == 0) {
4287 *eax = 0; /* Maximum ECX value for sub-leaves */
4288 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4289 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4290 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4291 *ecx |= CPUID_7_0_ECX_OSPKE;
4292 }
4293 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4294 } else {
4295 *eax = 0;
4296 *ebx = 0;
4297 *ecx = 0;
4298 *edx = 0;
4299 }
4300 break;
4301 case 9:
4302 /* Direct Cache Access Information Leaf */
4303 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4304 *ebx = 0;
4305 *ecx = 0;
4306 *edx = 0;
4307 break;
4308 case 0xA:
4309 /* Architectural Performance Monitoring Leaf */
4310 if (kvm_enabled() && cpu->enable_pmu) {
4311 KVMState *s = cs->kvm_state;
4312
4313 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4314 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4315 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4316 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4317 } else if (hvf_enabled() && cpu->enable_pmu) {
4318 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4319 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4320 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4321 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4322 } else {
4323 *eax = 0;
4324 *ebx = 0;
4325 *ecx = 0;
4326 *edx = 0;
4327 }
4328 break;
4329 case 0xB:
4330 /* Extended Topology Enumeration Leaf */
4331 if (!cpu->enable_cpuid_0xb) {
4332 *eax = *ebx = *ecx = *edx = 0;
4333 break;
4334 }
4335
4336 *ecx = count & 0xff;
4337 *edx = cpu->apic_id;
4338
4339 switch (count) {
4340 case 0:
4341 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4342 *ebx = cs->nr_threads;
4343 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4344 break;
4345 case 1:
4346 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4347 *ebx = cs->nr_cores * cs->nr_threads;
4348 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4349 break;
4350 default:
4351 *eax = 0;
4352 *ebx = 0;
4353 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4354 }
4355
4356 assert(!(*eax & ~0x1f));
4357 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4358 break;
4359 case 0xD: {
4360 /* Processor Extended State */
4361 *eax = 0;
4362 *ebx = 0;
4363 *ecx = 0;
4364 *edx = 0;
4365 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4366 break;
4367 }
4368
4369 if (count == 0) {
4370 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4371 *eax = env->features[FEAT_XSAVE_COMP_LO];
4372 *edx = env->features[FEAT_XSAVE_COMP_HI];
4373 *ebx = xsave_area_size(env->xcr0);
4374 } else if (count == 1) {
4375 *eax = env->features[FEAT_XSAVE];
4376 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4377 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4378 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4379 *eax = esa->size;
4380 *ebx = esa->offset;
4381 }
4382 }
4383 break;
4384 }
4385 case 0x14: {
4386 /* Intel Processor Trace Enumeration */
4387 *eax = 0;
4388 *ebx = 0;
4389 *ecx = 0;
4390 *edx = 0;
4391 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4392 !kvm_enabled()) {
4393 break;
4394 }
4395
4396 if (count == 0) {
4397 *eax = INTEL_PT_MAX_SUBLEAF;
4398 *ebx = INTEL_PT_MINIMAL_EBX;
4399 *ecx = INTEL_PT_MINIMAL_ECX;
4400 } else if (count == 1) {
4401 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4402 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4403 }
4404 break;
4405 }
4406 case 0x40000000:
4407 /*
4408 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4409 * set here, but we restrict to TCG none the less.
4410 */
4411 if (tcg_enabled() && cpu->expose_tcg) {
4412 memcpy(signature, "TCGTCGTCGTCG", 12);
4413 *eax = 0x40000001;
4414 *ebx = signature[0];
4415 *ecx = signature[1];
4416 *edx = signature[2];
4417 } else {
4418 *eax = 0;
4419 *ebx = 0;
4420 *ecx = 0;
4421 *edx = 0;
4422 }
4423 break;
4424 case 0x40000001:
4425 *eax = 0;
4426 *ebx = 0;
4427 *ecx = 0;
4428 *edx = 0;
4429 break;
4430 case 0x80000000:
4431 *eax = env->cpuid_xlevel;
4432 *ebx = env->cpuid_vendor1;
4433 *edx = env->cpuid_vendor2;
4434 *ecx = env->cpuid_vendor3;
4435 break;
4436 case 0x80000001:
4437 *eax = env->cpuid_version;
4438 *ebx = 0;
4439 *ecx = env->features[FEAT_8000_0001_ECX];
4440 *edx = env->features[FEAT_8000_0001_EDX];
4441
4442 /* The Linux kernel checks for the CMPLegacy bit and
4443 * discards multiple thread information if it is set.
4444 * So don't set it here for Intel to make Linux guests happy.
4445 */
4446 if (cs->nr_cores * cs->nr_threads > 1) {
4447 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4448 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4449 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4450 *ecx |= 1 << 1; /* CmpLegacy bit */
4451 }
4452 }
4453 break;
4454 case 0x80000002:
4455 case 0x80000003:
4456 case 0x80000004:
4457 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4458 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4459 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4460 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4461 break;
4462 case 0x80000005:
4463 /* cache info (L1 cache) */
4464 if (cpu->cache_info_passthrough) {
4465 host_cpuid(index, 0, eax, ebx, ecx, edx);
4466 break;
4467 }
4468 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4469 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4470 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4471 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4472 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4473 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4474 break;
4475 case 0x80000006:
4476 /* cache info (L2 cache) */
4477 if (cpu->cache_info_passthrough) {
4478 host_cpuid(index, 0, eax, ebx, ecx, edx);
4479 break;
4480 }
4481 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4482 (L2_DTLB_2M_ENTRIES << 16) | \
4483 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4484 (L2_ITLB_2M_ENTRIES);
4485 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4486 (L2_DTLB_4K_ENTRIES << 16) | \
4487 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4488 (L2_ITLB_4K_ENTRIES);
4489 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4490 cpu->enable_l3_cache ?
4491 env->cache_info_amd.l3_cache : NULL,
4492 ecx, edx);
4493 break;
4494 case 0x80000007:
4495 *eax = 0;
4496 *ebx = 0;
4497 *ecx = 0;
4498 *edx = env->features[FEAT_8000_0007_EDX];
4499 break;
4500 case 0x80000008:
4501 /* virtual & phys address size in low 2 bytes. */
4502 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4503 /* 64 bit processor */
4504 *eax = cpu->phys_bits; /* configurable physical bits */
4505 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4506 *eax |= 0x00003900; /* 57 bits virtual */
4507 } else {
4508 *eax |= 0x00003000; /* 48 bits virtual */
4509 }
4510 } else {
4511 *eax = cpu->phys_bits;
4512 }
4513 *ebx = env->features[FEAT_8000_0008_EBX];
4514 *ecx = 0;
4515 *edx = 0;
4516 if (cs->nr_cores * cs->nr_threads > 1) {
4517 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4518 }
4519 break;
4520 case 0x8000000A:
4521 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4522 *eax = 0x00000001; /* SVM Revision */
4523 *ebx = 0x00000010; /* nr of ASIDs */
4524 *ecx = 0;
4525 *edx = env->features[FEAT_SVM]; /* optional features */
4526 } else {
4527 *eax = 0;
4528 *ebx = 0;
4529 *ecx = 0;
4530 *edx = 0;
4531 }
4532 break;
4533 case 0x8000001D:
4534 *eax = 0;
4535 switch (count) {
4536 case 0: /* L1 dcache info */
4537 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4538 eax, ebx, ecx, edx);
4539 break;
4540 case 1: /* L1 icache info */
4541 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4542 eax, ebx, ecx, edx);
4543 break;
4544 case 2: /* L2 cache info */
4545 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4546 eax, ebx, ecx, edx);
4547 break;
4548 case 3: /* L3 cache info */
4549 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4550 eax, ebx, ecx, edx);
4551 break;
4552 default: /* end of info */
4553 *eax = *ebx = *ecx = *edx = 0;
4554 break;
4555 }
4556 break;
4557 case 0x8000001E:
4558 assert(cpu->core_id <= 255);
4559 encode_topo_cpuid8000001e(cs, cpu,
4560 eax, ebx, ecx, edx);
4561 break;
4562 case 0xC0000000:
4563 *eax = env->cpuid_xlevel2;
4564 *ebx = 0;
4565 *ecx = 0;
4566 *edx = 0;
4567 break;
4568 case 0xC0000001:
4569 /* Support for VIA CPU's CPUID instruction */
4570 *eax = env->cpuid_version;
4571 *ebx = 0;
4572 *ecx = 0;
4573 *edx = env->features[FEAT_C000_0001_EDX];
4574 break;
4575 case 0xC0000002:
4576 case 0xC0000003:
4577 case 0xC0000004:
4578 /* Reserved for the future, and now filled with zero */
4579 *eax = 0;
4580 *ebx = 0;
4581 *ecx = 0;
4582 *edx = 0;
4583 break;
4584 case 0x8000001F:
4585 *eax = sev_enabled() ? 0x2 : 0;
4586 *ebx = sev_get_cbit_position();
4587 *ebx |= sev_get_reduced_phys_bits() << 6;
4588 *ecx = 0;
4589 *edx = 0;
4590 break;
4591 default:
4592 /* reserved values: zero */
4593 *eax = 0;
4594 *ebx = 0;
4595 *ecx = 0;
4596 *edx = 0;
4597 break;
4598 }
4599}
4600
4601/* CPUClass::reset() */
4602static void x86_cpu_reset(CPUState *s)
4603{
4604 X86CPU *cpu = X86_CPU(s);
4605 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4606 CPUX86State *env = &cpu->env;
4607 target_ulong cr4;
4608 uint64_t xcr0;
4609 int i;
4610
4611 xcc->parent_reset(s);
4612
4613 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4614
4615 env->old_exception = -1;
4616
4617 /* init to reset state */
4618
4619 env->hflags2 |= HF2_GIF_MASK;
4620
4621 cpu_x86_update_cr0(env, 0x60000010);
4622 env->a20_mask = ~0x0;
4623 env->smbase = 0x30000;
4624 env->msr_smi_count = 0;
4625
4626 env->idt.limit = 0xffff;
4627 env->gdt.limit = 0xffff;
4628 env->ldt.limit = 0xffff;
4629 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4630 env->tr.limit = 0xffff;
4631 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4632
4633 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4634 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4635 DESC_R_MASK | DESC_A_MASK);
4636 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4637 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4638 DESC_A_MASK);
4639 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4640 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4641 DESC_A_MASK);
4642 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4643 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4644 DESC_A_MASK);
4645 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4646 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4647 DESC_A_MASK);
4648 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4649 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4650 DESC_A_MASK);
4651
4652 env->eip = 0xfff0;
4653 env->regs[R_EDX] = env->cpuid_version;
4654
4655 env->eflags = 0x2;
4656
4657 /* FPU init */
4658 for (i = 0; i < 8; i++) {
4659 env->fptags[i] = 1;
4660 }
4661 cpu_set_fpuc(env, 0x37f);
4662
4663 env->mxcsr = 0x1f80;
4664 /* All units are in INIT state. */
4665 env->xstate_bv = 0;
4666
4667 env->pat = 0x0007040600070406ULL;
4668 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4669
4670 memset(env->dr, 0, sizeof(env->dr));
4671 env->dr[6] = DR6_FIXED_1;
4672 env->dr[7] = DR7_FIXED_1;
4673 cpu_breakpoint_remove_all(s, BP_CPU);
4674 cpu_watchpoint_remove_all(s, BP_CPU);
4675
4676 cr4 = 0;
4677 xcr0 = XSTATE_FP_MASK;
4678
4679#ifdef CONFIG_USER_ONLY
4680 /* Enable all the features for user-mode. */
4681 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4682 xcr0 |= XSTATE_SSE_MASK;
4683 }
4684 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4685 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4686 if (env->features[esa->feature] & esa->bits) {
4687 xcr0 |= 1ull << i;
4688 }
4689 }
4690
4691 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4692 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4693 }
4694 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4695 cr4 |= CR4_FSGSBASE_MASK;
4696 }
4697#endif
4698
4699 env->xcr0 = xcr0;
4700 cpu_x86_update_cr4(env, cr4);
4701
4702 /*
4703 * SDM 11.11.5 requires:
4704 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4705 * - IA32_MTRR_PHYSMASKn.V = 0
4706 * All other bits are undefined. For simplification, zero it all.
4707 */
4708 env->mtrr_deftype = 0;
4709 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4710 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4711
4712 env->interrupt_injected = -1;
4713 env->exception_injected = -1;
4714 env->nmi_injected = false;
4715#if !defined(CONFIG_USER_ONLY)
4716 /* We hard-wire the BSP to the first CPU. */
4717 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4718
4719 s->halted = !cpu_is_bsp(cpu);
4720
4721 if (kvm_enabled()) {
4722 kvm_arch_reset_vcpu(cpu);
4723 }
4724 else if (hvf_enabled()) {
4725 hvf_reset_vcpu(s);
4726 }
4727#endif
4728}
4729
4730#ifndef CONFIG_USER_ONLY
4731bool cpu_is_bsp(X86CPU *cpu)
4732{
4733 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4734}
4735
4736/* TODO: remove me, when reset over QOM tree is implemented */
4737static void x86_cpu_machine_reset_cb(void *opaque)
4738{
4739 X86CPU *cpu = opaque;
4740 cpu_reset(CPU(cpu));
4741}
4742#endif
4743
4744static void mce_init(X86CPU *cpu)
4745{
4746 CPUX86State *cenv = &cpu->env;
4747 unsigned int bank;
4748
4749 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4750 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4751 (CPUID_MCE | CPUID_MCA)) {
4752 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4753 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4754 cenv->mcg_ctl = ~(uint64_t)0;
4755 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4756 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4757 }
4758 }
4759}
4760
4761#ifndef CONFIG_USER_ONLY
4762APICCommonClass *apic_get_class(void)
4763{
4764 const char *apic_type = "apic";
4765
4766 /* TODO: in-kernel irqchip for hvf */
4767 if (kvm_apic_in_kernel()) {
4768 apic_type = "kvm-apic";
4769 } else if (xen_enabled()) {
4770 apic_type = "xen-apic";
4771 }
4772
4773 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4774}
4775
4776static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4777{
4778 APICCommonState *apic;
4779 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4780
4781 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4782
4783 object_property_add_child(OBJECT(cpu), "lapic",
4784 OBJECT(cpu->apic_state), &error_abort);
4785 object_unref(OBJECT(cpu->apic_state));
4786
4787 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4788 /* TODO: convert to link<> */
4789 apic = APIC_COMMON(cpu->apic_state);
4790 apic->cpu = cpu;
4791 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4792}
4793
4794static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4795{
4796 APICCommonState *apic;
4797 static bool apic_mmio_map_once;
4798
4799 if (cpu->apic_state == NULL) {
4800 return;
4801 }
4802 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4803 errp);
4804
4805 /* Map APIC MMIO area */
4806 apic = APIC_COMMON(cpu->apic_state);
4807 if (!apic_mmio_map_once) {
4808 memory_region_add_subregion_overlap(get_system_memory(),
4809 apic->apicbase &
4810 MSR_IA32_APICBASE_BASE,
4811 &apic->io_memory,
4812 0x1000);
4813 apic_mmio_map_once = true;
4814 }
4815}
4816
4817static void x86_cpu_machine_done(Notifier *n, void *unused)
4818{
4819 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4820 MemoryRegion *smram =
4821 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4822
4823 if (smram) {
4824 cpu->smram = g_new(MemoryRegion, 1);
4825 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4826 smram, 0, 1ull << 32);
4827 memory_region_set_enabled(cpu->smram, true);
4828 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4829 }
4830}
4831#else
4832static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4833{
4834}
4835#endif
4836
4837/* Note: Only safe for use on x86(-64) hosts */
4838static uint32_t x86_host_phys_bits(void)
4839{
4840 uint32_t eax;
4841 uint32_t host_phys_bits;
4842
4843 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4844 if (eax >= 0x80000008) {
4845 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4846 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4847 * at 23:16 that can specify a maximum physical address bits for
4848 * the guest that can override this value; but I've not seen
4849 * anything with that set.
4850 */
4851 host_phys_bits = eax & 0xff;
4852 } else {
4853 /* It's an odd 64 bit machine that doesn't have the leaf for
4854 * physical address bits; fall back to 36 that's most older
4855 * Intel.
4856 */
4857 host_phys_bits = 36;
4858 }
4859
4860 return host_phys_bits;
4861}
4862
4863static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4864{
4865 if (*min < value) {
4866 *min = value;
4867 }
4868}
4869
4870/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4871static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4872{
4873 CPUX86State *env = &cpu->env;
4874 FeatureWordInfo *fi = &feature_word_info[w];
4875 uint32_t eax = fi->cpuid.eax;
4876 uint32_t region = eax & 0xF0000000;
4877
4878 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
4879 if (!env->features[w]) {
4880 return;
4881 }
4882
4883 switch (region) {
4884 case 0x00000000:
4885 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4886 break;
4887 case 0x80000000:
4888 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4889 break;
4890 case 0xC0000000:
4891 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4892 break;
4893 }
4894}
4895
4896/* Calculate XSAVE components based on the configured CPU feature flags */
4897static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4898{
4899 CPUX86State *env = &cpu->env;
4900 int i;
4901 uint64_t mask;
4902
4903 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4904 return;
4905 }
4906
4907 mask = 0;
4908 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4909 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4910 if (env->features[esa->feature] & esa->bits) {
4911 mask |= (1ULL << i);
4912 }
4913 }
4914
4915 env->features[FEAT_XSAVE_COMP_LO] = mask;
4916 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4917}
4918
4919/***** Steps involved on loading and filtering CPUID data
4920 *
4921 * When initializing and realizing a CPU object, the steps
4922 * involved in setting up CPUID data are:
4923 *
4924 * 1) Loading CPU model definition (X86CPUDefinition). This is
4925 * implemented by x86_cpu_load_def() and should be completely
4926 * transparent, as it is done automatically by instance_init.
4927 * No code should need to look at X86CPUDefinition structs
4928 * outside instance_init.
4929 *
4930 * 2) CPU expansion. This is done by realize before CPUID
4931 * filtering, and will make sure host/accelerator data is
4932 * loaded for CPU models that depend on host capabilities
4933 * (e.g. "host"). Done by x86_cpu_expand_features().
4934 *
4935 * 3) CPUID filtering. This initializes extra data related to
4936 * CPUID, and checks if the host supports all capabilities
4937 * required by the CPU. Runnability of a CPU model is
4938 * determined at this step. Done by x86_cpu_filter_features().
4939 *
4940 * Some operations don't require all steps to be performed.
4941 * More precisely:
4942 *
4943 * - CPU instance creation (instance_init) will run only CPU
4944 * model loading. CPU expansion can't run at instance_init-time
4945 * because host/accelerator data may be not available yet.
4946 * - CPU realization will perform both CPU model expansion and CPUID
4947 * filtering, and return an error in case one of them fails.
4948 * - query-cpu-definitions needs to run all 3 steps. It needs
4949 * to run CPUID filtering, as the 'unavailable-features'
4950 * field is set based on the filtering results.
4951 * - The query-cpu-model-expansion QMP command only needs to run
4952 * CPU model loading and CPU expansion. It should not filter
4953 * any CPUID data based on host capabilities.
4954 */
4955
4956/* Expand CPU configuration data, based on configured features
4957 * and host/accelerator capabilities when appropriate.
4958 */
4959static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4960{
4961 CPUX86State *env = &cpu->env;
4962 FeatureWord w;
4963 GList *l;
4964 Error *local_err = NULL;
4965
4966 /*TODO: Now cpu->max_features doesn't overwrite features
4967 * set using QOM properties, and we can convert
4968 * plus_features & minus_features to global properties
4969 * inside x86_cpu_parse_featurestr() too.
4970 */
4971 if (cpu->max_features) {
4972 for (w = 0; w < FEATURE_WORDS; w++) {
4973 /* Override only features that weren't set explicitly
4974 * by the user.
4975 */
4976 env->features[w] |=
4977 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4978 ~env->user_features[w] & \
4979 ~feature_word_info[w].no_autoenable_flags;
4980 }
4981 }
4982
4983 for (l = plus_features; l; l = l->next) {
4984 const char *prop = l->data;
4985 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4986 if (local_err) {
4987 goto out;
4988 }
4989 }
4990
4991 for (l = minus_features; l; l = l->next) {
4992 const char *prop = l->data;
4993 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4994 if (local_err) {
4995 goto out;
4996 }
4997 }
4998
4999 if (!kvm_enabled() || !cpu->expose_kvm) {
5000 env->features[FEAT_KVM] = 0;
5001 }
5002
5003 x86_cpu_enable_xsave_components(cpu);
5004
5005 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5006 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5007 if (cpu->full_cpuid_auto_level) {
5008 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5009 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5010 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5011 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5012 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5013 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5014 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5015 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5016 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5017 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5018 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5019 /* SVM requires CPUID[0x8000000A] */
5020 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5021 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5022 }
5023
5024 /* SEV requires CPUID[0x8000001F] */
5025 if (sev_enabled()) {
5026 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5027 }
5028 }
5029
5030 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5031 if (env->cpuid_level == UINT32_MAX) {
5032 env->cpuid_level = env->cpuid_min_level;
5033 }
5034 if (env->cpuid_xlevel == UINT32_MAX) {
5035 env->cpuid_xlevel = env->cpuid_min_xlevel;
5036 }
5037 if (env->cpuid_xlevel2 == UINT32_MAX) {
5038 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5039 }
5040
5041out:
5042 if (local_err != NULL) {
5043 error_propagate(errp, local_err);
5044 }
5045}
5046
5047/*
5048 * Finishes initialization of CPUID data, filters CPU feature
5049 * words based on host availability of each feature.
5050 *
5051 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5052 */
5053static int x86_cpu_filter_features(X86CPU *cpu)
5054{
5055 CPUX86State *env = &cpu->env;
5056 FeatureWord w;
5057 int rv = 0;
5058
5059 for (w = 0; w < FEATURE_WORDS; w++) {
5060 uint32_t host_feat =
5061 x86_cpu_get_supported_feature_word(w, false);
5062 uint32_t requested_features = env->features[w];
5063 env->features[w] &= host_feat;
5064 cpu->filtered_features[w] = requested_features & ~env->features[w];
5065 if (cpu->filtered_features[w]) {
5066 rv = 1;
5067 }
5068 }
5069
5070 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5071 kvm_enabled()) {
5072 KVMState *s = CPU(cpu)->kvm_state;
5073 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5074 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5075 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5076 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5077 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5078
5079 if (!eax_0 ||
5080 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5081 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5082 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5083 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5084 INTEL_PT_ADDR_RANGES_NUM) ||
5085 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5086 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5087 (ecx_0 & INTEL_PT_IP_LIP)) {
5088 /*
5089 * Processor Trace capabilities aren't configurable, so if the
5090 * host can't emulate the capabilities we report on
5091 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5092 */
5093 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5094 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5095 rv = 1;
5096 }
5097 }
5098
5099 return rv;
5100}
5101
5102#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
5103 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
5104 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
5105#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
5106 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
5107 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
5108static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5109{
5110 CPUState *cs = CPU(dev);
5111 X86CPU *cpu = X86_CPU(dev);
5112 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5113 CPUX86State *env = &cpu->env;
5114 Error *local_err = NULL;
5115 static bool ht_warned;
5116
5117 if (xcc->host_cpuid_required) {
5118 if (!accel_uses_host_cpuid()) {
5119 char *name = x86_cpu_class_get_model_name(xcc);
5120 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5121 g_free(name);
5122 goto out;
5123 }
5124
5125 if (enable_cpu_pm) {
5126 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5127 &cpu->mwait.ecx, &cpu->mwait.edx);
5128 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5129 }
5130 }
5131
5132 /* mwait extended info: needed for Core compatibility */
5133 /* We always wake on interrupt even if host does not have the capability */
5134 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5135
5136 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5137 error_setg(errp, "apic-id property was not initialized properly");
5138 return;
5139 }
5140
5141 x86_cpu_expand_features(cpu, &local_err);
5142 if (local_err) {
5143 goto out;
5144 }
5145
5146 if (x86_cpu_filter_features(cpu) &&
5147 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5148 x86_cpu_report_filtered_features(cpu);
5149 if (cpu->enforce_cpuid) {
5150 error_setg(&local_err,
5151 accel_uses_host_cpuid() ?
5152 "Host doesn't support requested features" :
5153 "TCG doesn't support requested features");
5154 goto out;
5155 }
5156 }
5157
5158 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5159 * CPUID[1].EDX.
5160 */
5161 if (IS_AMD_CPU(env)) {
5162 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5163 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5164 & CPUID_EXT2_AMD_ALIASES);
5165 }
5166
5167 /* For 64bit systems think about the number of physical bits to present.
5168 * ideally this should be the same as the host; anything other than matching
5169 * the host can cause incorrect guest behaviour.
5170 * QEMU used to pick the magic value of 40 bits that corresponds to
5171 * consumer AMD devices but nothing else.
5172 */
5173 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5174 if (accel_uses_host_cpuid()) {
5175 uint32_t host_phys_bits = x86_host_phys_bits();
5176 static bool warned;
5177
5178 if (cpu->host_phys_bits) {
5179 /* The user asked for us to use the host physical bits */
5180 cpu->phys_bits = host_phys_bits;
5181 }
5182
5183 /* Print a warning if the user set it to a value that's not the
5184 * host value.
5185 */
5186 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5187 !warned) {
5188 warn_report("Host physical bits (%u)"
5189 " does not match phys-bits property (%u)",
5190 host_phys_bits, cpu->phys_bits);
5191 warned = true;
5192 }
5193
5194 if (cpu->phys_bits &&
5195 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5196 cpu->phys_bits < 32)) {
5197 error_setg(errp, "phys-bits should be between 32 and %u "
5198 " (but is %u)",
5199 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5200 return;
5201 }
5202 } else {
5203 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5204 error_setg(errp, "TCG only supports phys-bits=%u",
5205 TCG_PHYS_ADDR_BITS);
5206 return;
5207 }
5208 }
5209 /* 0 means it was not explicitly set by the user (or by machine
5210 * compat_props or by the host code above). In this case, the default
5211 * is the value used by TCG (40).
5212 */
5213 if (cpu->phys_bits == 0) {
5214 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5215 }
5216 } else {
5217 /* For 32 bit systems don't use the user set value, but keep
5218 * phys_bits consistent with what we tell the guest.
5219 */
5220 if (cpu->phys_bits != 0) {
5221 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5222 return;
5223 }
5224
5225 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5226 cpu->phys_bits = 36;
5227 } else {
5228 cpu->phys_bits = 32;
5229 }
5230 }
5231
5232 /* Cache information initialization */
5233 if (!cpu->legacy_cache) {
5234 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5235 char *name = x86_cpu_class_get_model_name(xcc);
5236 error_setg(errp,
5237 "CPU model '%s' doesn't support legacy-cache=off", name);
5238 g_free(name);
5239 return;
5240 }
5241 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5242 *xcc->cpu_def->cache_info;
5243 } else {
5244 /* Build legacy cache information */
5245 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5246 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5247 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5248 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5249
5250 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5251 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5252 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5253 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5254
5255 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5256 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5257 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5258 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5259 }
5260
5261
5262 cpu_exec_realizefn(cs, &local_err);
5263 if (local_err != NULL) {
5264 error_propagate(errp, local_err);
5265 return;
5266 }
5267
5268#ifndef CONFIG_USER_ONLY
5269 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5270
5271 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
5272 x86_cpu_apic_create(cpu, &local_err);
5273 if (local_err != NULL) {
5274 goto out;
5275 }
5276 }
5277#endif
5278
5279 mce_init(cpu);
5280
5281#ifndef CONFIG_USER_ONLY
5282 if (tcg_enabled()) {
5283 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5284 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5285
5286 /* Outer container... */
5287 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5288 memory_region_set_enabled(cpu->cpu_as_root, true);
5289
5290 /* ... with two regions inside: normal system memory with low
5291 * priority, and...
5292 */
5293 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5294 get_system_memory(), 0, ~0ull);
5295 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5296 memory_region_set_enabled(cpu->cpu_as_mem, true);
5297
5298 cs->num_ases = 2;
5299 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5300 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5301
5302 /* ... SMRAM with higher priority, linked from /machine/smram. */
5303 cpu->machine_done.notify = x86_cpu_machine_done;
5304 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5305 }
5306#endif
5307
5308 qemu_init_vcpu(cs);
5309
5310 /*
5311 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5312 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5313 * based on inputs (sockets,cores,threads), it is still better to give
5314 * users a warning.
5315 *
5316 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5317 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5318 */
5319 if (IS_AMD_CPU(env) &&
5320 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5321 cs->nr_threads > 1 && !ht_warned) {
5322 warn_report("This family of AMD CPU doesn't support "
5323 "hyperthreading(%d)",
5324 cs->nr_threads);
5325 error_printf("Please configure -smp options properly"
5326 " or try enabling topoext feature.\n");
5327 ht_warned = true;
5328 }
5329
5330 x86_cpu_apic_realize(cpu, &local_err);
5331 if (local_err != NULL) {
5332 goto out;
5333 }
5334 cpu_reset(cs);
5335
5336 xcc->parent_realize(dev, &local_err);
5337
5338out:
5339 if (local_err != NULL) {
5340 error_propagate(errp, local_err);
5341 return;
5342 }
5343}
5344
5345static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5346{
5347 X86CPU *cpu = X86_CPU(dev);
5348 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5349 Error *local_err = NULL;
5350
5351#ifndef CONFIG_USER_ONLY
5352 cpu_remove_sync(CPU(dev));
5353 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5354#endif
5355
5356 if (cpu->apic_state) {
5357 object_unparent(OBJECT(cpu->apic_state));
5358 cpu->apic_state = NULL;
5359 }
5360
5361 xcc->parent_unrealize(dev, &local_err);
5362 if (local_err != NULL) {
5363 error_propagate(errp, local_err);
5364 return;
5365 }
5366}
5367
5368typedef struct BitProperty {
5369 FeatureWord w;
5370 uint32_t mask;
5371} BitProperty;
5372
5373static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5374 void *opaque, Error **errp)
5375{
5376 X86CPU *cpu = X86_CPU(obj);
5377 BitProperty *fp = opaque;
5378 uint32_t f = cpu->env.features[fp->w];
5379 bool value = (f & fp->mask) == fp->mask;
5380 visit_type_bool(v, name, &value, errp);
5381}
5382
5383static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5384 void *opaque, Error **errp)
5385{
5386 DeviceState *dev = DEVICE(obj);
5387 X86CPU *cpu = X86_CPU(obj);
5388 BitProperty *fp = opaque;
5389 Error *local_err = NULL;
5390 bool value;
5391
5392 if (dev->realized) {
5393 qdev_prop_set_after_realize(dev, name, errp);
5394 return;
5395 }
5396
5397 visit_type_bool(v, name, &value, &local_err);
5398 if (local_err) {
5399 error_propagate(errp, local_err);
5400 return;
5401 }
5402
5403 if (value) {
5404 cpu->env.features[fp->w] |= fp->mask;
5405 } else {
5406 cpu->env.features[fp->w] &= ~fp->mask;
5407 }
5408 cpu->env.user_features[fp->w] |= fp->mask;
5409}
5410
5411static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5412 void *opaque)
5413{
5414 BitProperty *prop = opaque;
5415 g_free(prop);
5416}
5417
5418/* Register a boolean property to get/set a single bit in a uint32_t field.
5419 *
5420 * The same property name can be registered multiple times to make it affect
5421 * multiple bits in the same FeatureWord. In that case, the getter will return
5422 * true only if all bits are set.
5423 */
5424static void x86_cpu_register_bit_prop(X86CPU *cpu,
5425 const char *prop_name,
5426 FeatureWord w,
5427 int bitnr)
5428{
5429 BitProperty *fp;
5430 ObjectProperty *op;
5431 uint32_t mask = (1UL << bitnr);
5432
5433 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5434 if (op) {
5435 fp = op->opaque;
5436 assert(fp->w == w);
5437 fp->mask |= mask;
5438 } else {
5439 fp = g_new0(BitProperty, 1);
5440 fp->w = w;
5441 fp->mask = mask;
5442 object_property_add(OBJECT(cpu), prop_name, "bool",
5443 x86_cpu_get_bit_prop,
5444 x86_cpu_set_bit_prop,
5445 x86_cpu_release_bit_prop, fp, &error_abort);
5446 }
5447}
5448
5449static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5450 FeatureWord w,
5451 int bitnr)
5452{
5453 FeatureWordInfo *fi = &feature_word_info[w];
5454 const char *name = fi->feat_names[bitnr];
5455
5456 if (!name) {
5457 return;
5458 }
5459
5460 /* Property names should use "-" instead of "_".
5461 * Old names containing underscores are registered as aliases
5462 * using object_property_add_alias()
5463 */
5464 assert(!strchr(name, '_'));
5465 /* aliases don't use "|" delimiters anymore, they are registered
5466 * manually using object_property_add_alias() */
5467 assert(!strchr(name, '|'));
5468 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5469}
5470
5471static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5472{
5473 X86CPU *cpu = X86_CPU(cs);
5474 CPUX86State *env = &cpu->env;
5475 GuestPanicInformation *panic_info = NULL;
5476
5477 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5478 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5479
5480 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5481
5482 assert(HV_CRASH_PARAMS >= 5);
5483 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5484 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5485 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5486 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5487 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5488 }
5489
5490 return panic_info;
5491}
5492static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5493 const char *name, void *opaque,
5494 Error **errp)
5495{
5496 CPUState *cs = CPU(obj);
5497 GuestPanicInformation *panic_info;
5498
5499 if (!cs->crash_occurred) {
5500 error_setg(errp, "No crash occured");
5501 return;
5502 }
5503
5504 panic_info = x86_cpu_get_crash_info(cs);
5505 if (panic_info == NULL) {
5506 error_setg(errp, "No crash information");
5507 return;
5508 }
5509
5510 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5511 errp);
5512 qapi_free_GuestPanicInformation(panic_info);
5513}
5514
5515static void x86_cpu_initfn(Object *obj)
5516{
5517 CPUState *cs = CPU(obj);
5518 X86CPU *cpu = X86_CPU(obj);
5519 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5520 CPUX86State *env = &cpu->env;
5521 FeatureWord w;
5522
5523 cs->env_ptr = env;
5524
5525 object_property_add(obj, "family", "int",
5526 x86_cpuid_version_get_family,
5527 x86_cpuid_version_set_family, NULL, NULL, NULL);
5528 object_property_add(obj, "model", "int",
5529 x86_cpuid_version_get_model,
5530 x86_cpuid_version_set_model, NULL, NULL, NULL);
5531 object_property_add(obj, "stepping", "int",
5532 x86_cpuid_version_get_stepping,
5533 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5534 object_property_add_str(obj, "vendor",
5535 x86_cpuid_get_vendor,
5536 x86_cpuid_set_vendor, NULL);
5537 object_property_add_str(obj, "model-id",
5538 x86_cpuid_get_model_id,
5539 x86_cpuid_set_model_id, NULL);
5540 object_property_add(obj, "tsc-frequency", "int",
5541 x86_cpuid_get_tsc_freq,
5542 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5543 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5544 x86_cpu_get_feature_words,
5545 NULL, NULL, (void *)env->features, NULL);
5546 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5547 x86_cpu_get_feature_words,
5548 NULL, NULL, (void *)cpu->filtered_features, NULL);
5549
5550 object_property_add(obj, "crash-information", "GuestPanicInformation",
5551 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5552
5553 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5554
5555 for (w = 0; w < FEATURE_WORDS; w++) {
5556 int bitnr;
5557
5558 for (bitnr = 0; bitnr < 32; bitnr++) {
5559 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5560 }
5561 }
5562
5563 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5564 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5565 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5566 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5567 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5568 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5569 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5570
5571 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5572 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5573 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5574 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5575 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5576 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5577 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5578 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5579 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5580 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5581 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5582 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5583 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5584 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5585 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5586 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5587 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5588 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5589 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5590 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5591 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5592
5593 if (xcc->cpu_def) {
5594 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5595 }
5596}
5597
5598static int64_t x86_cpu_get_arch_id(CPUState *cs)
5599{
5600 X86CPU *cpu = X86_CPU(cs);
5601
5602 return cpu->apic_id;
5603}
5604
5605static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5606{
5607 X86CPU *cpu = X86_CPU(cs);
5608
5609 return cpu->env.cr[0] & CR0_PG_MASK;
5610}
5611
5612static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5613{
5614 X86CPU *cpu = X86_CPU(cs);
5615
5616 cpu->env.eip = value;
5617}
5618
5619static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5620{
5621 X86CPU *cpu = X86_CPU(cs);
5622
5623 cpu->env.eip = tb->pc - tb->cs_base;
5624}
5625
5626int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5627{
5628 X86CPU *cpu = X86_CPU(cs);
5629 CPUX86State *env = &cpu->env;
5630
5631#if !defined(CONFIG_USER_ONLY)
5632 if (interrupt_request & CPU_INTERRUPT_POLL) {
5633 return CPU_INTERRUPT_POLL;
5634 }
5635#endif
5636 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5637 return CPU_INTERRUPT_SIPI;
5638 }
5639
5640 if (env->hflags2 & HF2_GIF_MASK) {
5641 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5642 !(env->hflags & HF_SMM_MASK)) {
5643 return CPU_INTERRUPT_SMI;
5644 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5645 !(env->hflags2 & HF2_NMI_MASK)) {
5646 return CPU_INTERRUPT_NMI;
5647 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5648 return CPU_INTERRUPT_MCE;
5649 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5650 (((env->hflags2 & HF2_VINTR_MASK) &&
5651 (env->hflags2 & HF2_HIF_MASK)) ||
5652 (!(env->hflags2 & HF2_VINTR_MASK) &&
5653 (env->eflags & IF_MASK &&
5654 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5655 return CPU_INTERRUPT_HARD;
5656#if !defined(CONFIG_USER_ONLY)
5657 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5658 (env->eflags & IF_MASK) &&
5659 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5660 return CPU_INTERRUPT_VIRQ;
5661#endif
5662 }
5663 }
5664
5665 return 0;
5666}
5667
5668static bool x86_cpu_has_work(CPUState *cs)
5669{
5670 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5671}
5672
5673static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5674{
5675 X86CPU *cpu = X86_CPU(cs);
5676 CPUX86State *env = &cpu->env;
5677
5678 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5679 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5680 : bfd_mach_i386_i8086);
5681 info->print_insn = print_insn_i386;
5682
5683 info->cap_arch = CS_ARCH_X86;
5684 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5685 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5686 : CS_MODE_16);
5687 info->cap_insn_unit = 1;
5688 info->cap_insn_split = 8;
5689}
5690
5691void x86_update_hflags(CPUX86State *env)
5692{
5693 uint32_t hflags;
5694#define HFLAG_COPY_MASK \
5695 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5696 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5697 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5698 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5699
5700 hflags = env->hflags & HFLAG_COPY_MASK;
5701 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5702 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5703 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5704 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5705 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5706
5707 if (env->cr[4] & CR4_OSFXSR_MASK) {
5708 hflags |= HF_OSFXSR_MASK;
5709 }
5710
5711 if (env->efer & MSR_EFER_LMA) {
5712 hflags |= HF_LMA_MASK;
5713 }
5714
5715 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5716 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5717 } else {
5718 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5719 (DESC_B_SHIFT - HF_CS32_SHIFT);
5720 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5721 (DESC_B_SHIFT - HF_SS32_SHIFT);
5722 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5723 !(hflags & HF_CS32_MASK)) {
5724 hflags |= HF_ADDSEG_MASK;
5725 } else {
5726 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5727 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5728 }
5729 }
5730 env->hflags = hflags;
5731}
5732
5733static Property x86_cpu_properties[] = {
5734#ifdef CONFIG_USER_ONLY
5735 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5736 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5737 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5738 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5739 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5740#else
5741 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5742 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5743 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5744 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5745#endif
5746 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5747 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5748 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5749 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5750 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5751 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5752 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5753 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5754 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5755 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5756 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5757 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5758 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5759 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5760 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
5761 DEFINE_PROP_BOOL("hv-evmcs", X86CPU, hyperv_evmcs, false),
5762 DEFINE_PROP_BOOL("hv-ipi", X86CPU, hyperv_ipi, false),
5763 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5764 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5765 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5766 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5767 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5768 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5769 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5770 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5771 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5772 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5773 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5774 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5775 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5776 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5777 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5778 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5779 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5780 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5781 false),
5782 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5783 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5784 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5785 true),
5786 /*
5787 * lecacy_cache defaults to true unless the CPU model provides its
5788 * own cache information (see x86_cpu_load_def()).
5789 */
5790 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5791
5792 /*
5793 * From "Requirements for Implementing the Microsoft
5794 * Hypervisor Interface":
5795 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5796 *
5797 * "Starting with Windows Server 2012 and Windows 8, if
5798 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5799 * the hypervisor imposes no specific limit to the number of VPs.
5800 * In this case, Windows Server 2012 guest VMs may use more than
5801 * 64 VPs, up to the maximum supported number of processors applicable
5802 * to the specific Windows version being used."
5803 */
5804 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5805 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
5806 false),
5807 DEFINE_PROP_END_OF_LIST()
5808};
5809
5810static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5811{
5812 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5813 CPUClass *cc = CPU_CLASS(oc);
5814 DeviceClass *dc = DEVICE_CLASS(oc);
5815
5816 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5817 &xcc->parent_realize);
5818 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5819 &xcc->parent_unrealize);
5820 dc->props = x86_cpu_properties;
5821
5822 xcc->parent_reset = cc->reset;
5823 cc->reset = x86_cpu_reset;
5824 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5825
5826 cc->class_by_name = x86_cpu_class_by_name;
5827 cc->parse_features = x86_cpu_parse_featurestr;
5828 cc->has_work = x86_cpu_has_work;
5829#ifdef CONFIG_TCG
5830 cc->do_interrupt = x86_cpu_do_interrupt;
5831 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5832#endif
5833 cc->dump_state = x86_cpu_dump_state;
5834 cc->get_crash_info = x86_cpu_get_crash_info;
5835 cc->set_pc = x86_cpu_set_pc;
5836 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5837 cc->gdb_read_register = x86_cpu_gdb_read_register;
5838 cc->gdb_write_register = x86_cpu_gdb_write_register;
5839 cc->get_arch_id = x86_cpu_get_arch_id;
5840 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5841#ifdef CONFIG_USER_ONLY
5842 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5843#else
5844 cc->asidx_from_attrs = x86_asidx_from_attrs;
5845 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5846 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5847 cc->write_elf64_note = x86_cpu_write_elf64_note;
5848 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5849 cc->write_elf32_note = x86_cpu_write_elf32_note;
5850 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5851 cc->vmsd = &vmstate_x86_cpu;
5852#endif
5853 cc->gdb_arch_name = x86_gdb_arch_name;
5854#ifdef TARGET_X86_64
5855 cc->gdb_core_xml_file = "i386-64bit.xml";
5856 cc->gdb_num_core_regs = 57;
5857#else
5858 cc->gdb_core_xml_file = "i386-32bit.xml";
5859 cc->gdb_num_core_regs = 41;
5860#endif
5861#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5862 cc->debug_excp_handler = breakpoint_handler;
5863#endif
5864 cc->cpu_exec_enter = x86_cpu_exec_enter;
5865 cc->cpu_exec_exit = x86_cpu_exec_exit;
5866#ifdef CONFIG_TCG
5867 cc->tcg_initialize = tcg_x86_init;
5868#endif
5869 cc->disas_set_info = x86_disas_set_info;
5870
5871 dc->user_creatable = true;
5872}
5873
5874static const TypeInfo x86_cpu_type_info = {
5875 .name = TYPE_X86_CPU,
5876 .parent = TYPE_CPU,
5877 .instance_size = sizeof(X86CPU),
5878 .instance_init = x86_cpu_initfn,
5879 .abstract = true,
5880 .class_size = sizeof(X86CPUClass),
5881 .class_init = x86_cpu_common_class_init,
5882};
5883
5884
5885/* "base" CPU model, used by query-cpu-model-expansion */
5886static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5887{
5888 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5889
5890 xcc->static_model = true;
5891 xcc->migration_safe = true;
5892 xcc->model_description = "base CPU model type with no features enabled";
5893 xcc->ordering = 8;
5894}
5895
5896static const TypeInfo x86_base_cpu_type_info = {
5897 .name = X86_CPU_TYPE_NAME("base"),
5898 .parent = TYPE_X86_CPU,
5899 .class_init = x86_cpu_base_class_init,
5900};
5901
5902static void x86_cpu_register_types(void)
5903{
5904 int i;
5905
5906 type_register_static(&x86_cpu_type_info);
5907 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5908 x86_register_cpudef_type(&builtin_x86_defs[i]);
5909 }
5910 type_register_static(&max_x86_cpu_type_info);
5911 type_register_static(&x86_base_cpu_type_info);
5912#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5913 type_register_static(&host_x86_cpu_type_info);
5914#endif
5915}
5916
5917type_init(x86_cpu_register_types)