]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - target/i386/cpu.c
i386: Add new CPU model Icelake-{Server,Client}
[mirror_qemu.git] / target / i386 / cpu.c
... / ...
CommitLineData
1/*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "qemu/units.h"
22#include "qemu/cutils.h"
23#include "qemu/bitops.h"
24
25#include "cpu.h"
26#include "exec/exec-all.h"
27#include "sysemu/kvm.h"
28#include "sysemu/hvf.h"
29#include "sysemu/cpus.h"
30#include "kvm_i386.h"
31#include "sev_i386.h"
32
33#include "qemu/error-report.h"
34#include "qemu/option.h"
35#include "qemu/config-file.h"
36#include "qapi/error.h"
37#include "qapi/qapi-visit-misc.h"
38#include "qapi/qapi-visit-run-state.h"
39#include "qapi/qmp/qdict.h"
40#include "qapi/qmp/qerror.h"
41#include "qapi/visitor.h"
42#include "qom/qom-qobject.h"
43#include "sysemu/arch_init.h"
44
45#include "standard-headers/asm-x86/kvm_para.h"
46
47#include "sysemu/sysemu.h"
48#include "hw/qdev-properties.h"
49#include "hw/i386/topology.h"
50#ifndef CONFIG_USER_ONLY
51#include "exec/address-spaces.h"
52#include "hw/hw.h"
53#include "hw/xen/xen.h"
54#include "hw/i386/apic_internal.h"
55#endif
56
57#include "disas/capstone.h"
58
59/* Helpers for building CPUID[2] descriptors: */
60
61struct CPUID2CacheDescriptorInfo {
62 enum CacheType type;
63 int level;
64 int size;
65 int line_size;
66 int associativity;
67};
68
69/*
70 * Known CPUID 2 cache descriptors.
71 * From Intel SDM Volume 2A, CPUID instruction
72 */
73struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
74 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
75 .associativity = 4, .line_size = 32, },
76 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
77 .associativity = 4, .line_size = 32, },
78 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
79 .associativity = 4, .line_size = 64, },
80 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
81 .associativity = 2, .line_size = 32, },
82 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
83 .associativity = 4, .line_size = 32, },
84 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
85 .associativity = 4, .line_size = 64, },
86 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
87 .associativity = 6, .line_size = 64, },
88 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
89 .associativity = 2, .line_size = 64, },
90 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
91 .associativity = 8, .line_size = 64, },
92 /* lines per sector is not supported cpuid2_cache_descriptor(),
93 * so descriptors 0x22, 0x23 are not included
94 */
95 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
96 .associativity = 16, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x25, 0x20 are not included
99 */
100 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
101 .associativity = 8, .line_size = 64, },
102 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
103 .associativity = 8, .line_size = 64, },
104 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
105 .associativity = 4, .line_size = 32, },
106 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
107 .associativity = 4, .line_size = 32, },
108 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
109 .associativity = 4, .line_size = 32, },
110 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
111 .associativity = 4, .line_size = 32, },
112 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
113 .associativity = 4, .line_size = 32, },
114 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
115 .associativity = 4, .line_size = 64, },
116 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
117 .associativity = 8, .line_size = 64, },
118 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
119 .associativity = 12, .line_size = 64, },
120 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
121 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
122 .associativity = 12, .line_size = 64, },
123 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
124 .associativity = 16, .line_size = 64, },
125 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
126 .associativity = 12, .line_size = 64, },
127 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
128 .associativity = 16, .line_size = 64, },
129 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
130 .associativity = 24, .line_size = 64, },
131 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
132 .associativity = 8, .line_size = 64, },
133 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
134 .associativity = 4, .line_size = 64, },
135 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
136 .associativity = 4, .line_size = 64, },
137 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
138 .associativity = 4, .line_size = 64, },
139 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
140 .associativity = 4, .line_size = 64, },
141 /* lines per sector is not supported cpuid2_cache_descriptor(),
142 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
143 */
144 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
145 .associativity = 8, .line_size = 64, },
146 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
147 .associativity = 2, .line_size = 64, },
148 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
149 .associativity = 8, .line_size = 64, },
150 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
151 .associativity = 8, .line_size = 32, },
152 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
153 .associativity = 8, .line_size = 32, },
154 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
155 .associativity = 8, .line_size = 32, },
156 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
157 .associativity = 8, .line_size = 32, },
158 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
159 .associativity = 4, .line_size = 64, },
160 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
161 .associativity = 8, .line_size = 64, },
162 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
163 .associativity = 4, .line_size = 64, },
164 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
165 .associativity = 4, .line_size = 64, },
166 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
167 .associativity = 4, .line_size = 64, },
168 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
169 .associativity = 8, .line_size = 64, },
170 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
171 .associativity = 8, .line_size = 64, },
172 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
173 .associativity = 8, .line_size = 64, },
174 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
175 .associativity = 12, .line_size = 64, },
176 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
177 .associativity = 12, .line_size = 64, },
178 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
179 .associativity = 12, .line_size = 64, },
180 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
181 .associativity = 16, .line_size = 64, },
182 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
183 .associativity = 16, .line_size = 64, },
184 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
185 .associativity = 16, .line_size = 64, },
186 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
187 .associativity = 24, .line_size = 64, },
188 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
189 .associativity = 24, .line_size = 64, },
190 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
191 .associativity = 24, .line_size = 64, },
192};
193
194/*
195 * "CPUID leaf 2 does not report cache descriptor information,
196 * use CPUID leaf 4 to query cache parameters"
197 */
198#define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
199
200/*
201 * Return a CPUID 2 cache descriptor for a given cache.
202 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
203 */
204static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
205{
206 int i;
207
208 assert(cache->size > 0);
209 assert(cache->level > 0);
210 assert(cache->line_size > 0);
211 assert(cache->associativity > 0);
212 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
213 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
214 if (d->level == cache->level && d->type == cache->type &&
215 d->size == cache->size && d->line_size == cache->line_size &&
216 d->associativity == cache->associativity) {
217 return i;
218 }
219 }
220
221 return CACHE_DESCRIPTOR_UNAVAILABLE;
222}
223
224/* CPUID Leaf 4 constants: */
225
226/* EAX: */
227#define CACHE_TYPE_D 1
228#define CACHE_TYPE_I 2
229#define CACHE_TYPE_UNIFIED 3
230
231#define CACHE_LEVEL(l) (l << 5)
232
233#define CACHE_SELF_INIT_LEVEL (1 << 8)
234
235/* EDX: */
236#define CACHE_NO_INVD_SHARING (1 << 0)
237#define CACHE_INCLUSIVE (1 << 1)
238#define CACHE_COMPLEX_IDX (1 << 2)
239
240/* Encode CacheType for CPUID[4].EAX */
241#define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
242 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
243 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
244 0 /* Invalid value */)
245
246
247/* Encode cache info for CPUID[4] */
248static void encode_cache_cpuid4(CPUCacheInfo *cache,
249 int num_apic_ids, int num_cores,
250 uint32_t *eax, uint32_t *ebx,
251 uint32_t *ecx, uint32_t *edx)
252{
253 assert(cache->size == cache->line_size * cache->associativity *
254 cache->partitions * cache->sets);
255
256 assert(num_apic_ids > 0);
257 *eax = CACHE_TYPE(cache->type) |
258 CACHE_LEVEL(cache->level) |
259 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
260 ((num_cores - 1) << 26) |
261 ((num_apic_ids - 1) << 14);
262
263 assert(cache->line_size > 0);
264 assert(cache->partitions > 0);
265 assert(cache->associativity > 0);
266 /* We don't implement fully-associative caches */
267 assert(cache->associativity < cache->sets);
268 *ebx = (cache->line_size - 1) |
269 ((cache->partitions - 1) << 12) |
270 ((cache->associativity - 1) << 22);
271
272 assert(cache->sets > 0);
273 *ecx = cache->sets - 1;
274
275 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
276 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
277 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
278}
279
280/* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
281static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
282{
283 assert(cache->size % 1024 == 0);
284 assert(cache->lines_per_tag > 0);
285 assert(cache->associativity > 0);
286 assert(cache->line_size > 0);
287 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
288 (cache->lines_per_tag << 8) | (cache->line_size);
289}
290
291#define ASSOC_FULL 0xFF
292
293/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
294#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
295 a == 2 ? 0x2 : \
296 a == 4 ? 0x4 : \
297 a == 8 ? 0x6 : \
298 a == 16 ? 0x8 : \
299 a == 32 ? 0xA : \
300 a == 48 ? 0xB : \
301 a == 64 ? 0xC : \
302 a == 96 ? 0xD : \
303 a == 128 ? 0xE : \
304 a == ASSOC_FULL ? 0xF : \
305 0 /* invalid value */)
306
307/*
308 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
309 * @l3 can be NULL.
310 */
311static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
312 CPUCacheInfo *l3,
313 uint32_t *ecx, uint32_t *edx)
314{
315 assert(l2->size % 1024 == 0);
316 assert(l2->associativity > 0);
317 assert(l2->lines_per_tag > 0);
318 assert(l2->line_size > 0);
319 *ecx = ((l2->size / 1024) << 16) |
320 (AMD_ENC_ASSOC(l2->associativity) << 12) |
321 (l2->lines_per_tag << 8) | (l2->line_size);
322
323 if (l3) {
324 assert(l3->size % (512 * 1024) == 0);
325 assert(l3->associativity > 0);
326 assert(l3->lines_per_tag > 0);
327 assert(l3->line_size > 0);
328 *edx = ((l3->size / (512 * 1024)) << 18) |
329 (AMD_ENC_ASSOC(l3->associativity) << 12) |
330 (l3->lines_per_tag << 8) | (l3->line_size);
331 } else {
332 *edx = 0;
333 }
334}
335
336/*
337 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
338 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
339 * Define the constants to build the cpu topology. Right now, TOPOEXT
340 * feature is enabled only on EPYC. So, these constants are based on
341 * EPYC supported configurations. We may need to handle the cases if
342 * these values change in future.
343 */
344/* Maximum core complexes in a node */
345#define MAX_CCX 2
346/* Maximum cores in a core complex */
347#define MAX_CORES_IN_CCX 4
348/* Maximum cores in a node */
349#define MAX_CORES_IN_NODE 8
350/* Maximum nodes in a socket */
351#define MAX_NODES_PER_SOCKET 4
352
353/*
354 * Figure out the number of nodes required to build this config.
355 * Max cores in a node is 8
356 */
357static int nodes_in_socket(int nr_cores)
358{
359 int nodes;
360
361 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
362
363 /* Hardware does not support config with 3 nodes, return 4 in that case */
364 return (nodes == 3) ? 4 : nodes;
365}
366
367/*
368 * Decide the number of cores in a core complex with the given nr_cores using
369 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
370 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
371 * L3 cache is shared across all cores in a core complex. So, this will also
372 * tell us how many cores are sharing the L3 cache.
373 */
374static int cores_in_core_complex(int nr_cores)
375{
376 int nodes;
377
378 /* Check if we can fit all the cores in one core complex */
379 if (nr_cores <= MAX_CORES_IN_CCX) {
380 return nr_cores;
381 }
382 /* Get the number of nodes required to build this config */
383 nodes = nodes_in_socket(nr_cores);
384
385 /*
386 * Divide the cores accros all the core complexes
387 * Return rounded up value
388 */
389 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
390}
391
392/* Encode cache info for CPUID[8000001D] */
393static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
394 uint32_t *eax, uint32_t *ebx,
395 uint32_t *ecx, uint32_t *edx)
396{
397 uint32_t l3_cores;
398 assert(cache->size == cache->line_size * cache->associativity *
399 cache->partitions * cache->sets);
400
401 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
402 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
403
404 /* L3 is shared among multiple cores */
405 if (cache->level == 3) {
406 l3_cores = cores_in_core_complex(cs->nr_cores);
407 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
408 } else {
409 *eax |= ((cs->nr_threads - 1) << 14);
410 }
411
412 assert(cache->line_size > 0);
413 assert(cache->partitions > 0);
414 assert(cache->associativity > 0);
415 /* We don't implement fully-associative caches */
416 assert(cache->associativity < cache->sets);
417 *ebx = (cache->line_size - 1) |
418 ((cache->partitions - 1) << 12) |
419 ((cache->associativity - 1) << 22);
420
421 assert(cache->sets > 0);
422 *ecx = cache->sets - 1;
423
424 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
425 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
426 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
427}
428
429/* Data structure to hold the configuration info for a given core index */
430struct core_topology {
431 /* core complex id of the current core index */
432 int ccx_id;
433 /*
434 * Adjusted core index for this core in the topology
435 * This can be 0,1,2,3 with max 4 cores in a core complex
436 */
437 int core_id;
438 /* Node id for this core index */
439 int node_id;
440 /* Number of nodes in this config */
441 int num_nodes;
442};
443
444/*
445 * Build the configuration closely match the EPYC hardware. Using the EPYC
446 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
447 * right now. This could change in future.
448 * nr_cores : Total number of cores in the config
449 * core_id : Core index of the current CPU
450 * topo : Data structure to hold all the config info for this core index
451 */
452static void build_core_topology(int nr_cores, int core_id,
453 struct core_topology *topo)
454{
455 int nodes, cores_in_ccx;
456
457 /* First get the number of nodes required */
458 nodes = nodes_in_socket(nr_cores);
459
460 cores_in_ccx = cores_in_core_complex(nr_cores);
461
462 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
463 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
464 topo->core_id = core_id % cores_in_ccx;
465 topo->num_nodes = nodes;
466}
467
468/* Encode cache info for CPUID[8000001E] */
469static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
470 uint32_t *eax, uint32_t *ebx,
471 uint32_t *ecx, uint32_t *edx)
472{
473 struct core_topology topo = {0};
474 unsigned long nodes;
475 int shift;
476
477 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
478 *eax = cpu->apic_id;
479 /*
480 * CPUID_Fn8000001E_EBX
481 * 31:16 Reserved
482 * 15:8 Threads per core (The number of threads per core is
483 * Threads per core + 1)
484 * 7:0 Core id (see bit decoding below)
485 * SMT:
486 * 4:3 node id
487 * 2 Core complex id
488 * 1:0 Core id
489 * Non SMT:
490 * 5:4 node id
491 * 3 Core complex id
492 * 1:0 Core id
493 */
494 if (cs->nr_threads - 1) {
495 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
496 (topo.ccx_id << 2) | topo.core_id;
497 } else {
498 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
499 }
500 /*
501 * CPUID_Fn8000001E_ECX
502 * 31:11 Reserved
503 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
504 * 7:0 Node id (see bit decoding below)
505 * 2 Socket id
506 * 1:0 Node id
507 */
508 if (topo.num_nodes <= 4) {
509 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
510 topo.node_id;
511 } else {
512 /*
513 * Node id fix up. Actual hardware supports up to 4 nodes. But with
514 * more than 32 cores, we may end up with more than 4 nodes.
515 * Node id is a combination of socket id and node id. Only requirement
516 * here is that this number should be unique accross the system.
517 * Shift the socket id to accommodate more nodes. We dont expect both
518 * socket id and node id to be big number at the same time. This is not
519 * an ideal config but we need to to support it. Max nodes we can have
520 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
521 * 5 bits for nodes. Find the left most set bit to represent the total
522 * number of nodes. find_last_bit returns last set bit(0 based). Left
523 * shift(+1) the socket id to represent all the nodes.
524 */
525 nodes = topo.num_nodes - 1;
526 shift = find_last_bit(&nodes, 8);
527 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
528 topo.node_id;
529 }
530 *edx = 0;
531}
532
533/*
534 * Definitions of the hardcoded cache entries we expose:
535 * These are legacy cache values. If there is a need to change any
536 * of these values please use builtin_x86_defs
537 */
538
539/* L1 data cache: */
540static CPUCacheInfo legacy_l1d_cache = {
541 .type = DATA_CACHE,
542 .level = 1,
543 .size = 32 * KiB,
544 .self_init = 1,
545 .line_size = 64,
546 .associativity = 8,
547 .sets = 64,
548 .partitions = 1,
549 .no_invd_sharing = true,
550};
551
552/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
553static CPUCacheInfo legacy_l1d_cache_amd = {
554 .type = DATA_CACHE,
555 .level = 1,
556 .size = 64 * KiB,
557 .self_init = 1,
558 .line_size = 64,
559 .associativity = 2,
560 .sets = 512,
561 .partitions = 1,
562 .lines_per_tag = 1,
563 .no_invd_sharing = true,
564};
565
566/* L1 instruction cache: */
567static CPUCacheInfo legacy_l1i_cache = {
568 .type = INSTRUCTION_CACHE,
569 .level = 1,
570 .size = 32 * KiB,
571 .self_init = 1,
572 .line_size = 64,
573 .associativity = 8,
574 .sets = 64,
575 .partitions = 1,
576 .no_invd_sharing = true,
577};
578
579/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
580static CPUCacheInfo legacy_l1i_cache_amd = {
581 .type = INSTRUCTION_CACHE,
582 .level = 1,
583 .size = 64 * KiB,
584 .self_init = 1,
585 .line_size = 64,
586 .associativity = 2,
587 .sets = 512,
588 .partitions = 1,
589 .lines_per_tag = 1,
590 .no_invd_sharing = true,
591};
592
593/* Level 2 unified cache: */
594static CPUCacheInfo legacy_l2_cache = {
595 .type = UNIFIED_CACHE,
596 .level = 2,
597 .size = 4 * MiB,
598 .self_init = 1,
599 .line_size = 64,
600 .associativity = 16,
601 .sets = 4096,
602 .partitions = 1,
603 .no_invd_sharing = true,
604};
605
606/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
607static CPUCacheInfo legacy_l2_cache_cpuid2 = {
608 .type = UNIFIED_CACHE,
609 .level = 2,
610 .size = 2 * MiB,
611 .line_size = 64,
612 .associativity = 8,
613};
614
615
616/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
617static CPUCacheInfo legacy_l2_cache_amd = {
618 .type = UNIFIED_CACHE,
619 .level = 2,
620 .size = 512 * KiB,
621 .line_size = 64,
622 .lines_per_tag = 1,
623 .associativity = 16,
624 .sets = 512,
625 .partitions = 1,
626};
627
628/* Level 3 unified cache: */
629static CPUCacheInfo legacy_l3_cache = {
630 .type = UNIFIED_CACHE,
631 .level = 3,
632 .size = 16 * MiB,
633 .line_size = 64,
634 .associativity = 16,
635 .sets = 16384,
636 .partitions = 1,
637 .lines_per_tag = 1,
638 .self_init = true,
639 .inclusive = true,
640 .complex_indexing = true,
641};
642
643/* TLB definitions: */
644
645#define L1_DTLB_2M_ASSOC 1
646#define L1_DTLB_2M_ENTRIES 255
647#define L1_DTLB_4K_ASSOC 1
648#define L1_DTLB_4K_ENTRIES 255
649
650#define L1_ITLB_2M_ASSOC 1
651#define L1_ITLB_2M_ENTRIES 255
652#define L1_ITLB_4K_ASSOC 1
653#define L1_ITLB_4K_ENTRIES 255
654
655#define L2_DTLB_2M_ASSOC 0 /* disabled */
656#define L2_DTLB_2M_ENTRIES 0 /* disabled */
657#define L2_DTLB_4K_ASSOC 4
658#define L2_DTLB_4K_ENTRIES 512
659
660#define L2_ITLB_2M_ASSOC 0 /* disabled */
661#define L2_ITLB_2M_ENTRIES 0 /* disabled */
662#define L2_ITLB_4K_ASSOC 4
663#define L2_ITLB_4K_ENTRIES 512
664
665/* CPUID Leaf 0x14 constants: */
666#define INTEL_PT_MAX_SUBLEAF 0x1
667/*
668 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
669 * MSR can be accessed;
670 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
671 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
672 * of Intel PT MSRs across warm reset;
673 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
674 */
675#define INTEL_PT_MINIMAL_EBX 0xf
676/*
677 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
678 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
679 * accessed;
680 * bit[01]: ToPA tables can hold any number of output entries, up to the
681 * maximum allowed by the MaskOrTableOffset field of
682 * IA32_RTIT_OUTPUT_MASK_PTRS;
683 * bit[02]: Support Single-Range Output scheme;
684 */
685#define INTEL_PT_MINIMAL_ECX 0x7
686/* generated packets which contain IP payloads have LIP values */
687#define INTEL_PT_IP_LIP (1 << 31)
688#define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
689#define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
690#define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
691#define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
692#define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
693
694static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
695 uint32_t vendor2, uint32_t vendor3)
696{
697 int i;
698 for (i = 0; i < 4; i++) {
699 dst[i] = vendor1 >> (8 * i);
700 dst[i + 4] = vendor2 >> (8 * i);
701 dst[i + 8] = vendor3 >> (8 * i);
702 }
703 dst[CPUID_VENDOR_SZ] = '\0';
704}
705
706#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
707#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
708 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
709#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
710 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
711 CPUID_PSE36 | CPUID_FXSR)
712#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
713#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
714 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
715 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
716 CPUID_PAE | CPUID_SEP | CPUID_APIC)
717
718#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
719 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
720 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
721 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
722 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
723 /* partly implemented:
724 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
725 /* missing:
726 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
727#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
728 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
729 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
730 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
731 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
732 /* missing:
733 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
734 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
735 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
736 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
737 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
738
739#ifdef TARGET_X86_64
740#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
741#else
742#define TCG_EXT2_X86_64_FEATURES 0
743#endif
744
745#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
746 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
747 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
748 TCG_EXT2_X86_64_FEATURES)
749#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
750 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
751#define TCG_EXT4_FEATURES 0
752#define TCG_SVM_FEATURES CPUID_SVM_NPT
753#define TCG_KVM_FEATURES 0
754#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
755 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
756 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
757 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
758 CPUID_7_0_EBX_ERMS)
759 /* missing:
760 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
761 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
762 CPUID_7_0_EBX_RDSEED */
763#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
764 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
765 CPUID_7_0_ECX_LA57)
766#define TCG_7_0_EDX_FEATURES 0
767#define TCG_APM_FEATURES 0
768#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
769#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
770 /* missing:
771 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
772
773typedef struct FeatureWordInfo {
774 /* feature flags names are taken from "Intel Processor Identification and
775 * the CPUID Instruction" and AMD's "CPUID Specification".
776 * In cases of disagreement between feature naming conventions,
777 * aliases may be added.
778 */
779 const char *feat_names[32];
780 uint32_t cpuid_eax; /* Input EAX for CPUID */
781 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
782 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
783 int cpuid_reg; /* output register (R_* constant) */
784 uint32_t tcg_features; /* Feature flags supported by TCG */
785 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
786 uint32_t migratable_flags; /* Feature flags known to be migratable */
787 /* Features that shouldn't be auto-enabled by "-cpu host" */
788 uint32_t no_autoenable_flags;
789} FeatureWordInfo;
790
791static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
792 [FEAT_1_EDX] = {
793 .feat_names = {
794 "fpu", "vme", "de", "pse",
795 "tsc", "msr", "pae", "mce",
796 "cx8", "apic", NULL, "sep",
797 "mtrr", "pge", "mca", "cmov",
798 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
799 NULL, "ds" /* Intel dts */, "acpi", "mmx",
800 "fxsr", "sse", "sse2", "ss",
801 "ht" /* Intel htt */, "tm", "ia64", "pbe",
802 },
803 .cpuid_eax = 1, .cpuid_reg = R_EDX,
804 .tcg_features = TCG_FEATURES,
805 },
806 [FEAT_1_ECX] = {
807 .feat_names = {
808 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
809 "ds-cpl", "vmx", "smx", "est",
810 "tm2", "ssse3", "cid", NULL,
811 "fma", "cx16", "xtpr", "pdcm",
812 NULL, "pcid", "dca", "sse4.1",
813 "sse4.2", "x2apic", "movbe", "popcnt",
814 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
815 "avx", "f16c", "rdrand", "hypervisor",
816 },
817 .cpuid_eax = 1, .cpuid_reg = R_ECX,
818 .tcg_features = TCG_EXT_FEATURES,
819 },
820 /* Feature names that are already defined on feature_name[] but
821 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
822 * names on feat_names below. They are copied automatically
823 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
824 */
825 [FEAT_8000_0001_EDX] = {
826 .feat_names = {
827 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
828 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
829 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
830 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
831 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
832 "nx", NULL, "mmxext", NULL /* mmx */,
833 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
834 NULL, "lm", "3dnowext", "3dnow",
835 },
836 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
837 .tcg_features = TCG_EXT2_FEATURES,
838 },
839 [FEAT_8000_0001_ECX] = {
840 .feat_names = {
841 "lahf-lm", "cmp-legacy", "svm", "extapic",
842 "cr8legacy", "abm", "sse4a", "misalignsse",
843 "3dnowprefetch", "osvw", "ibs", "xop",
844 "skinit", "wdt", NULL, "lwp",
845 "fma4", "tce", NULL, "nodeid-msr",
846 NULL, "tbm", "topoext", "perfctr-core",
847 "perfctr-nb", NULL, NULL, NULL,
848 NULL, NULL, NULL, NULL,
849 },
850 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
851 .tcg_features = TCG_EXT3_FEATURES,
852 },
853 [FEAT_C000_0001_EDX] = {
854 .feat_names = {
855 NULL, NULL, "xstore", "xstore-en",
856 NULL, NULL, "xcrypt", "xcrypt-en",
857 "ace2", "ace2-en", "phe", "phe-en",
858 "pmm", "pmm-en", NULL, NULL,
859 NULL, NULL, NULL, NULL,
860 NULL, NULL, NULL, NULL,
861 NULL, NULL, NULL, NULL,
862 NULL, NULL, NULL, NULL,
863 },
864 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
865 .tcg_features = TCG_EXT4_FEATURES,
866 },
867 [FEAT_KVM] = {
868 .feat_names = {
869 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
870 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
871 NULL, "kvm-pv-tlb-flush", NULL, NULL,
872 NULL, NULL, NULL, NULL,
873 NULL, NULL, NULL, NULL,
874 NULL, NULL, NULL, NULL,
875 "kvmclock-stable-bit", NULL, NULL, NULL,
876 NULL, NULL, NULL, NULL,
877 },
878 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
879 .tcg_features = TCG_KVM_FEATURES,
880 },
881 [FEAT_KVM_HINTS] = {
882 .feat_names = {
883 "kvm-hint-dedicated", NULL, NULL, NULL,
884 NULL, NULL, NULL, NULL,
885 NULL, NULL, NULL, NULL,
886 NULL, NULL, NULL, NULL,
887 NULL, NULL, NULL, NULL,
888 NULL, NULL, NULL, NULL,
889 NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
891 },
892 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
893 .tcg_features = TCG_KVM_FEATURES,
894 /*
895 * KVM hints aren't auto-enabled by -cpu host, they need to be
896 * explicitly enabled in the command-line.
897 */
898 .no_autoenable_flags = ~0U,
899 },
900 [FEAT_HYPERV_EAX] = {
901 .feat_names = {
902 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
903 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
904 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
905 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
906 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
907 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
908 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
909 NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 NULL, NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
914 },
915 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
916 },
917 [FEAT_HYPERV_EBX] = {
918 .feat_names = {
919 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
920 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
921 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
922 NULL /* hv_create_port */, NULL /* hv_connect_port */,
923 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
924 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
925 NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 NULL, NULL, NULL, NULL,
930 },
931 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
932 },
933 [FEAT_HYPERV_EDX] = {
934 .feat_names = {
935 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
936 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
937 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
938 NULL, NULL,
939 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
940 NULL, NULL, NULL, NULL,
941 NULL, NULL, NULL, NULL,
942 NULL, NULL, NULL, NULL,
943 NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL,
945 },
946 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
947 },
948 [FEAT_SVM] = {
949 .feat_names = {
950 "npt", "lbrv", "svm-lock", "nrip-save",
951 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
952 NULL, NULL, "pause-filter", NULL,
953 "pfthreshold", NULL, NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 },
959 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
960 .tcg_features = TCG_SVM_FEATURES,
961 },
962 [FEAT_7_0_EBX] = {
963 .feat_names = {
964 "fsgsbase", "tsc-adjust", NULL, "bmi1",
965 "hle", "avx2", NULL, "smep",
966 "bmi2", "erms", "invpcid", "rtm",
967 NULL, NULL, "mpx", NULL,
968 "avx512f", "avx512dq", "rdseed", "adx",
969 "smap", "avx512ifma", "pcommit", "clflushopt",
970 "clwb", "intel-pt", "avx512pf", "avx512er",
971 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
972 },
973 .cpuid_eax = 7,
974 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
975 .cpuid_reg = R_EBX,
976 .tcg_features = TCG_7_0_EBX_FEATURES,
977 },
978 [FEAT_7_0_ECX] = {
979 .feat_names = {
980 NULL, "avx512vbmi", "umip", "pku",
981 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
982 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
983 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
984 "la57", NULL, NULL, NULL,
985 NULL, NULL, "rdpid", NULL,
986 NULL, "cldemote", NULL, NULL,
987 NULL, NULL, NULL, NULL,
988 },
989 .cpuid_eax = 7,
990 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
991 .cpuid_reg = R_ECX,
992 .tcg_features = TCG_7_0_ECX_FEATURES,
993 },
994 [FEAT_7_0_EDX] = {
995 .feat_names = {
996 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
997 NULL, NULL, NULL, NULL,
998 NULL, NULL, NULL, NULL,
999 NULL, NULL, NULL, NULL,
1000 NULL, NULL, "pconfig", NULL,
1001 NULL, NULL, NULL, NULL,
1002 NULL, NULL, "spec-ctrl", NULL,
1003 NULL, "arch-capabilities", NULL, "ssbd",
1004 },
1005 .cpuid_eax = 7,
1006 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1007 .cpuid_reg = R_EDX,
1008 .tcg_features = TCG_7_0_EDX_FEATURES,
1009 .unmigratable_flags = CPUID_7_0_EDX_ARCH_CAPABILITIES,
1010 },
1011 [FEAT_8000_0007_EDX] = {
1012 .feat_names = {
1013 NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL,
1015 "invtsc", NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1019 NULL, NULL, NULL, NULL,
1020 NULL, NULL, NULL, NULL,
1021 },
1022 .cpuid_eax = 0x80000007,
1023 .cpuid_reg = R_EDX,
1024 .tcg_features = TCG_APM_FEATURES,
1025 .unmigratable_flags = CPUID_APM_INVTSC,
1026 },
1027 [FEAT_8000_0008_EBX] = {
1028 .feat_names = {
1029 NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, "wbnoinvd", NULL, NULL,
1032 "ibpb", NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1036 NULL, NULL, NULL, NULL,
1037 },
1038 .cpuid_eax = 0x80000008,
1039 .cpuid_reg = R_EBX,
1040 .tcg_features = 0,
1041 .unmigratable_flags = 0,
1042 },
1043 [FEAT_XSAVE] = {
1044 .feat_names = {
1045 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1046 NULL, NULL, NULL, NULL,
1047 NULL, NULL, NULL, NULL,
1048 NULL, NULL, NULL, NULL,
1049 NULL, NULL, NULL, NULL,
1050 NULL, NULL, NULL, NULL,
1051 NULL, NULL, NULL, NULL,
1052 NULL, NULL, NULL, NULL,
1053 },
1054 .cpuid_eax = 0xd,
1055 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
1056 .cpuid_reg = R_EAX,
1057 .tcg_features = TCG_XSAVE_FEATURES,
1058 },
1059 [FEAT_6_EAX] = {
1060 .feat_names = {
1061 NULL, NULL, "arat", NULL,
1062 NULL, NULL, NULL, NULL,
1063 NULL, NULL, NULL, NULL,
1064 NULL, NULL, NULL, NULL,
1065 NULL, NULL, NULL, NULL,
1066 NULL, NULL, NULL, NULL,
1067 NULL, NULL, NULL, NULL,
1068 NULL, NULL, NULL, NULL,
1069 },
1070 .cpuid_eax = 6, .cpuid_reg = R_EAX,
1071 .tcg_features = TCG_6_EAX_FEATURES,
1072 },
1073 [FEAT_XSAVE_COMP_LO] = {
1074 .cpuid_eax = 0xD,
1075 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1076 .cpuid_reg = R_EAX,
1077 .tcg_features = ~0U,
1078 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1079 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1080 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1081 XSTATE_PKRU_MASK,
1082 },
1083 [FEAT_XSAVE_COMP_HI] = {
1084 .cpuid_eax = 0xD,
1085 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1086 .cpuid_reg = R_EDX,
1087 .tcg_features = ~0U,
1088 },
1089};
1090
1091typedef struct X86RegisterInfo32 {
1092 /* Name of register */
1093 const char *name;
1094 /* QAPI enum value register */
1095 X86CPURegister32 qapi_enum;
1096} X86RegisterInfo32;
1097
1098#define REGISTER(reg) \
1099 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1100static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1101 REGISTER(EAX),
1102 REGISTER(ECX),
1103 REGISTER(EDX),
1104 REGISTER(EBX),
1105 REGISTER(ESP),
1106 REGISTER(EBP),
1107 REGISTER(ESI),
1108 REGISTER(EDI),
1109};
1110#undef REGISTER
1111
1112typedef struct ExtSaveArea {
1113 uint32_t feature, bits;
1114 uint32_t offset, size;
1115} ExtSaveArea;
1116
1117static const ExtSaveArea x86_ext_save_areas[] = {
1118 [XSTATE_FP_BIT] = {
1119 /* x87 FP state component is always enabled if XSAVE is supported */
1120 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1121 /* x87 state is in the legacy region of the XSAVE area */
1122 .offset = 0,
1123 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1124 },
1125 [XSTATE_SSE_BIT] = {
1126 /* SSE state component is always enabled if XSAVE is supported */
1127 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1128 /* SSE state is in the legacy region of the XSAVE area */
1129 .offset = 0,
1130 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1131 },
1132 [XSTATE_YMM_BIT] =
1133 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1134 .offset = offsetof(X86XSaveArea, avx_state),
1135 .size = sizeof(XSaveAVX) },
1136 [XSTATE_BNDREGS_BIT] =
1137 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1138 .offset = offsetof(X86XSaveArea, bndreg_state),
1139 .size = sizeof(XSaveBNDREG) },
1140 [XSTATE_BNDCSR_BIT] =
1141 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1142 .offset = offsetof(X86XSaveArea, bndcsr_state),
1143 .size = sizeof(XSaveBNDCSR) },
1144 [XSTATE_OPMASK_BIT] =
1145 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1146 .offset = offsetof(X86XSaveArea, opmask_state),
1147 .size = sizeof(XSaveOpmask) },
1148 [XSTATE_ZMM_Hi256_BIT] =
1149 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1150 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1151 .size = sizeof(XSaveZMM_Hi256) },
1152 [XSTATE_Hi16_ZMM_BIT] =
1153 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1154 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1155 .size = sizeof(XSaveHi16_ZMM) },
1156 [XSTATE_PKRU_BIT] =
1157 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1158 .offset = offsetof(X86XSaveArea, pkru_state),
1159 .size = sizeof(XSavePKRU) },
1160};
1161
1162static uint32_t xsave_area_size(uint64_t mask)
1163{
1164 int i;
1165 uint64_t ret = 0;
1166
1167 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1168 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1169 if ((mask >> i) & 1) {
1170 ret = MAX(ret, esa->offset + esa->size);
1171 }
1172 }
1173 return ret;
1174}
1175
1176static inline bool accel_uses_host_cpuid(void)
1177{
1178 return kvm_enabled() || hvf_enabled();
1179}
1180
1181static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1182{
1183 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1184 cpu->env.features[FEAT_XSAVE_COMP_LO];
1185}
1186
1187const char *get_register_name_32(unsigned int reg)
1188{
1189 if (reg >= CPU_NB_REGS32) {
1190 return NULL;
1191 }
1192 return x86_reg_info_32[reg].name;
1193}
1194
1195/*
1196 * Returns the set of feature flags that are supported and migratable by
1197 * QEMU, for a given FeatureWord.
1198 */
1199static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1200{
1201 FeatureWordInfo *wi = &feature_word_info[w];
1202 uint32_t r = 0;
1203 int i;
1204
1205 for (i = 0; i < 32; i++) {
1206 uint32_t f = 1U << i;
1207
1208 /* If the feature name is known, it is implicitly considered migratable,
1209 * unless it is explicitly set in unmigratable_flags */
1210 if ((wi->migratable_flags & f) ||
1211 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1212 r |= f;
1213 }
1214 }
1215 return r;
1216}
1217
1218void host_cpuid(uint32_t function, uint32_t count,
1219 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1220{
1221 uint32_t vec[4];
1222
1223#ifdef __x86_64__
1224 asm volatile("cpuid"
1225 : "=a"(vec[0]), "=b"(vec[1]),
1226 "=c"(vec[2]), "=d"(vec[3])
1227 : "0"(function), "c"(count) : "cc");
1228#elif defined(__i386__)
1229 asm volatile("pusha \n\t"
1230 "cpuid \n\t"
1231 "mov %%eax, 0(%2) \n\t"
1232 "mov %%ebx, 4(%2) \n\t"
1233 "mov %%ecx, 8(%2) \n\t"
1234 "mov %%edx, 12(%2) \n\t"
1235 "popa"
1236 : : "a"(function), "c"(count), "S"(vec)
1237 : "memory", "cc");
1238#else
1239 abort();
1240#endif
1241
1242 if (eax)
1243 *eax = vec[0];
1244 if (ebx)
1245 *ebx = vec[1];
1246 if (ecx)
1247 *ecx = vec[2];
1248 if (edx)
1249 *edx = vec[3];
1250}
1251
1252void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1253{
1254 uint32_t eax, ebx, ecx, edx;
1255
1256 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1257 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1258
1259 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1260 if (family) {
1261 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1262 }
1263 if (model) {
1264 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1265 }
1266 if (stepping) {
1267 *stepping = eax & 0x0F;
1268 }
1269}
1270
1271/* CPU class name definitions: */
1272
1273/* Return type name for a given CPU model name
1274 * Caller is responsible for freeing the returned string.
1275 */
1276static char *x86_cpu_type_name(const char *model_name)
1277{
1278 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1279}
1280
1281static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1282{
1283 ObjectClass *oc;
1284 char *typename = x86_cpu_type_name(cpu_model);
1285 oc = object_class_by_name(typename);
1286 g_free(typename);
1287 return oc;
1288}
1289
1290static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1291{
1292 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1293 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1294 return g_strndup(class_name,
1295 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1296}
1297
1298struct X86CPUDefinition {
1299 const char *name;
1300 uint32_t level;
1301 uint32_t xlevel;
1302 /* vendor is zero-terminated, 12 character ASCII string */
1303 char vendor[CPUID_VENDOR_SZ + 1];
1304 int family;
1305 int model;
1306 int stepping;
1307 FeatureWordArray features;
1308 const char *model_id;
1309 CPUCaches *cache_info;
1310};
1311
1312static CPUCaches epyc_cache_info = {
1313 .l1d_cache = &(CPUCacheInfo) {
1314 .type = DATA_CACHE,
1315 .level = 1,
1316 .size = 32 * KiB,
1317 .line_size = 64,
1318 .associativity = 8,
1319 .partitions = 1,
1320 .sets = 64,
1321 .lines_per_tag = 1,
1322 .self_init = 1,
1323 .no_invd_sharing = true,
1324 },
1325 .l1i_cache = &(CPUCacheInfo) {
1326 .type = INSTRUCTION_CACHE,
1327 .level = 1,
1328 .size = 64 * KiB,
1329 .line_size = 64,
1330 .associativity = 4,
1331 .partitions = 1,
1332 .sets = 256,
1333 .lines_per_tag = 1,
1334 .self_init = 1,
1335 .no_invd_sharing = true,
1336 },
1337 .l2_cache = &(CPUCacheInfo) {
1338 .type = UNIFIED_CACHE,
1339 .level = 2,
1340 .size = 512 * KiB,
1341 .line_size = 64,
1342 .associativity = 8,
1343 .partitions = 1,
1344 .sets = 1024,
1345 .lines_per_tag = 1,
1346 },
1347 .l3_cache = &(CPUCacheInfo) {
1348 .type = UNIFIED_CACHE,
1349 .level = 3,
1350 .size = 8 * MiB,
1351 .line_size = 64,
1352 .associativity = 16,
1353 .partitions = 1,
1354 .sets = 8192,
1355 .lines_per_tag = 1,
1356 .self_init = true,
1357 .inclusive = true,
1358 .complex_indexing = true,
1359 },
1360};
1361
1362static X86CPUDefinition builtin_x86_defs[] = {
1363 {
1364 .name = "qemu64",
1365 .level = 0xd,
1366 .vendor = CPUID_VENDOR_AMD,
1367 .family = 6,
1368 .model = 6,
1369 .stepping = 3,
1370 .features[FEAT_1_EDX] =
1371 PPRO_FEATURES |
1372 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1373 CPUID_PSE36,
1374 .features[FEAT_1_ECX] =
1375 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1376 .features[FEAT_8000_0001_EDX] =
1377 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1378 .features[FEAT_8000_0001_ECX] =
1379 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1380 .xlevel = 0x8000000A,
1381 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1382 },
1383 {
1384 .name = "phenom",
1385 .level = 5,
1386 .vendor = CPUID_VENDOR_AMD,
1387 .family = 16,
1388 .model = 2,
1389 .stepping = 3,
1390 /* Missing: CPUID_HT */
1391 .features[FEAT_1_EDX] =
1392 PPRO_FEATURES |
1393 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1394 CPUID_PSE36 | CPUID_VME,
1395 .features[FEAT_1_ECX] =
1396 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1397 CPUID_EXT_POPCNT,
1398 .features[FEAT_8000_0001_EDX] =
1399 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1400 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1401 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1402 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1403 CPUID_EXT3_CR8LEG,
1404 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1405 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1406 .features[FEAT_8000_0001_ECX] =
1407 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1408 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1409 /* Missing: CPUID_SVM_LBRV */
1410 .features[FEAT_SVM] =
1411 CPUID_SVM_NPT,
1412 .xlevel = 0x8000001A,
1413 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1414 },
1415 {
1416 .name = "core2duo",
1417 .level = 10,
1418 .vendor = CPUID_VENDOR_INTEL,
1419 .family = 6,
1420 .model = 15,
1421 .stepping = 11,
1422 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1423 .features[FEAT_1_EDX] =
1424 PPRO_FEATURES |
1425 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1426 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1427 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1428 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1429 .features[FEAT_1_ECX] =
1430 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1431 CPUID_EXT_CX16,
1432 .features[FEAT_8000_0001_EDX] =
1433 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1434 .features[FEAT_8000_0001_ECX] =
1435 CPUID_EXT3_LAHF_LM,
1436 .xlevel = 0x80000008,
1437 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1438 },
1439 {
1440 .name = "kvm64",
1441 .level = 0xd,
1442 .vendor = CPUID_VENDOR_INTEL,
1443 .family = 15,
1444 .model = 6,
1445 .stepping = 1,
1446 /* Missing: CPUID_HT */
1447 .features[FEAT_1_EDX] =
1448 PPRO_FEATURES | CPUID_VME |
1449 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1450 CPUID_PSE36,
1451 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1452 .features[FEAT_1_ECX] =
1453 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1454 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1455 .features[FEAT_8000_0001_EDX] =
1456 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1457 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1458 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1459 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1460 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1461 .features[FEAT_8000_0001_ECX] =
1462 0,
1463 .xlevel = 0x80000008,
1464 .model_id = "Common KVM processor"
1465 },
1466 {
1467 .name = "qemu32",
1468 .level = 4,
1469 .vendor = CPUID_VENDOR_INTEL,
1470 .family = 6,
1471 .model = 6,
1472 .stepping = 3,
1473 .features[FEAT_1_EDX] =
1474 PPRO_FEATURES,
1475 .features[FEAT_1_ECX] =
1476 CPUID_EXT_SSE3,
1477 .xlevel = 0x80000004,
1478 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1479 },
1480 {
1481 .name = "kvm32",
1482 .level = 5,
1483 .vendor = CPUID_VENDOR_INTEL,
1484 .family = 15,
1485 .model = 6,
1486 .stepping = 1,
1487 .features[FEAT_1_EDX] =
1488 PPRO_FEATURES | CPUID_VME |
1489 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1490 .features[FEAT_1_ECX] =
1491 CPUID_EXT_SSE3,
1492 .features[FEAT_8000_0001_ECX] =
1493 0,
1494 .xlevel = 0x80000008,
1495 .model_id = "Common 32-bit KVM processor"
1496 },
1497 {
1498 .name = "coreduo",
1499 .level = 10,
1500 .vendor = CPUID_VENDOR_INTEL,
1501 .family = 6,
1502 .model = 14,
1503 .stepping = 8,
1504 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1505 .features[FEAT_1_EDX] =
1506 PPRO_FEATURES | CPUID_VME |
1507 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1508 CPUID_SS,
1509 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1510 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1511 .features[FEAT_1_ECX] =
1512 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1513 .features[FEAT_8000_0001_EDX] =
1514 CPUID_EXT2_NX,
1515 .xlevel = 0x80000008,
1516 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1517 },
1518 {
1519 .name = "486",
1520 .level = 1,
1521 .vendor = CPUID_VENDOR_INTEL,
1522 .family = 4,
1523 .model = 8,
1524 .stepping = 0,
1525 .features[FEAT_1_EDX] =
1526 I486_FEATURES,
1527 .xlevel = 0,
1528 .model_id = "",
1529 },
1530 {
1531 .name = "pentium",
1532 .level = 1,
1533 .vendor = CPUID_VENDOR_INTEL,
1534 .family = 5,
1535 .model = 4,
1536 .stepping = 3,
1537 .features[FEAT_1_EDX] =
1538 PENTIUM_FEATURES,
1539 .xlevel = 0,
1540 .model_id = "",
1541 },
1542 {
1543 .name = "pentium2",
1544 .level = 2,
1545 .vendor = CPUID_VENDOR_INTEL,
1546 .family = 6,
1547 .model = 5,
1548 .stepping = 2,
1549 .features[FEAT_1_EDX] =
1550 PENTIUM2_FEATURES,
1551 .xlevel = 0,
1552 .model_id = "",
1553 },
1554 {
1555 .name = "pentium3",
1556 .level = 3,
1557 .vendor = CPUID_VENDOR_INTEL,
1558 .family = 6,
1559 .model = 7,
1560 .stepping = 3,
1561 .features[FEAT_1_EDX] =
1562 PENTIUM3_FEATURES,
1563 .xlevel = 0,
1564 .model_id = "",
1565 },
1566 {
1567 .name = "athlon",
1568 .level = 2,
1569 .vendor = CPUID_VENDOR_AMD,
1570 .family = 6,
1571 .model = 2,
1572 .stepping = 3,
1573 .features[FEAT_1_EDX] =
1574 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1575 CPUID_MCA,
1576 .features[FEAT_8000_0001_EDX] =
1577 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1578 .xlevel = 0x80000008,
1579 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1580 },
1581 {
1582 .name = "n270",
1583 .level = 10,
1584 .vendor = CPUID_VENDOR_INTEL,
1585 .family = 6,
1586 .model = 28,
1587 .stepping = 2,
1588 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1589 .features[FEAT_1_EDX] =
1590 PPRO_FEATURES |
1591 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1592 CPUID_ACPI | CPUID_SS,
1593 /* Some CPUs got no CPUID_SEP */
1594 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1595 * CPUID_EXT_XTPR */
1596 .features[FEAT_1_ECX] =
1597 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1598 CPUID_EXT_MOVBE,
1599 .features[FEAT_8000_0001_EDX] =
1600 CPUID_EXT2_NX,
1601 .features[FEAT_8000_0001_ECX] =
1602 CPUID_EXT3_LAHF_LM,
1603 .xlevel = 0x80000008,
1604 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1605 },
1606 {
1607 .name = "Conroe",
1608 .level = 10,
1609 .vendor = CPUID_VENDOR_INTEL,
1610 .family = 6,
1611 .model = 15,
1612 .stepping = 3,
1613 .features[FEAT_1_EDX] =
1614 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1615 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1616 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1617 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1618 CPUID_DE | CPUID_FP87,
1619 .features[FEAT_1_ECX] =
1620 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1621 .features[FEAT_8000_0001_EDX] =
1622 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1623 .features[FEAT_8000_0001_ECX] =
1624 CPUID_EXT3_LAHF_LM,
1625 .xlevel = 0x80000008,
1626 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1627 },
1628 {
1629 .name = "Penryn",
1630 .level = 10,
1631 .vendor = CPUID_VENDOR_INTEL,
1632 .family = 6,
1633 .model = 23,
1634 .stepping = 3,
1635 .features[FEAT_1_EDX] =
1636 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1637 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1638 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1639 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1640 CPUID_DE | CPUID_FP87,
1641 .features[FEAT_1_ECX] =
1642 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1643 CPUID_EXT_SSE3,
1644 .features[FEAT_8000_0001_EDX] =
1645 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1646 .features[FEAT_8000_0001_ECX] =
1647 CPUID_EXT3_LAHF_LM,
1648 .xlevel = 0x80000008,
1649 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1650 },
1651 {
1652 .name = "Nehalem",
1653 .level = 11,
1654 .vendor = CPUID_VENDOR_INTEL,
1655 .family = 6,
1656 .model = 26,
1657 .stepping = 3,
1658 .features[FEAT_1_EDX] =
1659 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1660 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1661 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1662 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1663 CPUID_DE | CPUID_FP87,
1664 .features[FEAT_1_ECX] =
1665 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1666 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1667 .features[FEAT_8000_0001_EDX] =
1668 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1669 .features[FEAT_8000_0001_ECX] =
1670 CPUID_EXT3_LAHF_LM,
1671 .xlevel = 0x80000008,
1672 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1673 },
1674 {
1675 .name = "Nehalem-IBRS",
1676 .level = 11,
1677 .vendor = CPUID_VENDOR_INTEL,
1678 .family = 6,
1679 .model = 26,
1680 .stepping = 3,
1681 .features[FEAT_1_EDX] =
1682 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1683 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1684 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1685 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1686 CPUID_DE | CPUID_FP87,
1687 .features[FEAT_1_ECX] =
1688 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1689 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1690 .features[FEAT_7_0_EDX] =
1691 CPUID_7_0_EDX_SPEC_CTRL,
1692 .features[FEAT_8000_0001_EDX] =
1693 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1694 .features[FEAT_8000_0001_ECX] =
1695 CPUID_EXT3_LAHF_LM,
1696 .xlevel = 0x80000008,
1697 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1698 },
1699 {
1700 .name = "Westmere",
1701 .level = 11,
1702 .vendor = CPUID_VENDOR_INTEL,
1703 .family = 6,
1704 .model = 44,
1705 .stepping = 1,
1706 .features[FEAT_1_EDX] =
1707 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1708 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1709 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1710 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1711 CPUID_DE | CPUID_FP87,
1712 .features[FEAT_1_ECX] =
1713 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1714 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1715 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1716 .features[FEAT_8000_0001_EDX] =
1717 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1718 .features[FEAT_8000_0001_ECX] =
1719 CPUID_EXT3_LAHF_LM,
1720 .features[FEAT_6_EAX] =
1721 CPUID_6_EAX_ARAT,
1722 .xlevel = 0x80000008,
1723 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1724 },
1725 {
1726 .name = "Westmere-IBRS",
1727 .level = 11,
1728 .vendor = CPUID_VENDOR_INTEL,
1729 .family = 6,
1730 .model = 44,
1731 .stepping = 1,
1732 .features[FEAT_1_EDX] =
1733 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1734 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1735 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1736 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1737 CPUID_DE | CPUID_FP87,
1738 .features[FEAT_1_ECX] =
1739 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1740 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1741 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1742 .features[FEAT_8000_0001_EDX] =
1743 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1744 .features[FEAT_8000_0001_ECX] =
1745 CPUID_EXT3_LAHF_LM,
1746 .features[FEAT_7_0_EDX] =
1747 CPUID_7_0_EDX_SPEC_CTRL,
1748 .features[FEAT_6_EAX] =
1749 CPUID_6_EAX_ARAT,
1750 .xlevel = 0x80000008,
1751 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1752 },
1753 {
1754 .name = "SandyBridge",
1755 .level = 0xd,
1756 .vendor = CPUID_VENDOR_INTEL,
1757 .family = 6,
1758 .model = 42,
1759 .stepping = 1,
1760 .features[FEAT_1_EDX] =
1761 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1762 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1763 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1764 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1765 CPUID_DE | CPUID_FP87,
1766 .features[FEAT_1_ECX] =
1767 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1768 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1769 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1770 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1771 CPUID_EXT_SSE3,
1772 .features[FEAT_8000_0001_EDX] =
1773 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1774 CPUID_EXT2_SYSCALL,
1775 .features[FEAT_8000_0001_ECX] =
1776 CPUID_EXT3_LAHF_LM,
1777 .features[FEAT_XSAVE] =
1778 CPUID_XSAVE_XSAVEOPT,
1779 .features[FEAT_6_EAX] =
1780 CPUID_6_EAX_ARAT,
1781 .xlevel = 0x80000008,
1782 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1783 },
1784 {
1785 .name = "SandyBridge-IBRS",
1786 .level = 0xd,
1787 .vendor = CPUID_VENDOR_INTEL,
1788 .family = 6,
1789 .model = 42,
1790 .stepping = 1,
1791 .features[FEAT_1_EDX] =
1792 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1793 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1794 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1795 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1796 CPUID_DE | CPUID_FP87,
1797 .features[FEAT_1_ECX] =
1798 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1799 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1800 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1801 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1802 CPUID_EXT_SSE3,
1803 .features[FEAT_8000_0001_EDX] =
1804 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1805 CPUID_EXT2_SYSCALL,
1806 .features[FEAT_8000_0001_ECX] =
1807 CPUID_EXT3_LAHF_LM,
1808 .features[FEAT_7_0_EDX] =
1809 CPUID_7_0_EDX_SPEC_CTRL,
1810 .features[FEAT_XSAVE] =
1811 CPUID_XSAVE_XSAVEOPT,
1812 .features[FEAT_6_EAX] =
1813 CPUID_6_EAX_ARAT,
1814 .xlevel = 0x80000008,
1815 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1816 },
1817 {
1818 .name = "IvyBridge",
1819 .level = 0xd,
1820 .vendor = CPUID_VENDOR_INTEL,
1821 .family = 6,
1822 .model = 58,
1823 .stepping = 9,
1824 .features[FEAT_1_EDX] =
1825 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1826 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1827 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1828 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1829 CPUID_DE | CPUID_FP87,
1830 .features[FEAT_1_ECX] =
1831 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1832 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1833 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1834 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1835 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1836 .features[FEAT_7_0_EBX] =
1837 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1838 CPUID_7_0_EBX_ERMS,
1839 .features[FEAT_8000_0001_EDX] =
1840 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1841 CPUID_EXT2_SYSCALL,
1842 .features[FEAT_8000_0001_ECX] =
1843 CPUID_EXT3_LAHF_LM,
1844 .features[FEAT_XSAVE] =
1845 CPUID_XSAVE_XSAVEOPT,
1846 .features[FEAT_6_EAX] =
1847 CPUID_6_EAX_ARAT,
1848 .xlevel = 0x80000008,
1849 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1850 },
1851 {
1852 .name = "IvyBridge-IBRS",
1853 .level = 0xd,
1854 .vendor = CPUID_VENDOR_INTEL,
1855 .family = 6,
1856 .model = 58,
1857 .stepping = 9,
1858 .features[FEAT_1_EDX] =
1859 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1860 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1861 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1862 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1863 CPUID_DE | CPUID_FP87,
1864 .features[FEAT_1_ECX] =
1865 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1866 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1867 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1868 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1869 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1870 .features[FEAT_7_0_EBX] =
1871 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1872 CPUID_7_0_EBX_ERMS,
1873 .features[FEAT_8000_0001_EDX] =
1874 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1875 CPUID_EXT2_SYSCALL,
1876 .features[FEAT_8000_0001_ECX] =
1877 CPUID_EXT3_LAHF_LM,
1878 .features[FEAT_7_0_EDX] =
1879 CPUID_7_0_EDX_SPEC_CTRL,
1880 .features[FEAT_XSAVE] =
1881 CPUID_XSAVE_XSAVEOPT,
1882 .features[FEAT_6_EAX] =
1883 CPUID_6_EAX_ARAT,
1884 .xlevel = 0x80000008,
1885 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1886 },
1887 {
1888 .name = "Haswell-noTSX",
1889 .level = 0xd,
1890 .vendor = CPUID_VENDOR_INTEL,
1891 .family = 6,
1892 .model = 60,
1893 .stepping = 1,
1894 .features[FEAT_1_EDX] =
1895 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1896 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1897 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1898 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1899 CPUID_DE | CPUID_FP87,
1900 .features[FEAT_1_ECX] =
1901 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1902 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1903 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1904 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1905 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1906 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1907 .features[FEAT_8000_0001_EDX] =
1908 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1909 CPUID_EXT2_SYSCALL,
1910 .features[FEAT_8000_0001_ECX] =
1911 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1912 .features[FEAT_7_0_EBX] =
1913 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1914 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1915 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1916 .features[FEAT_XSAVE] =
1917 CPUID_XSAVE_XSAVEOPT,
1918 .features[FEAT_6_EAX] =
1919 CPUID_6_EAX_ARAT,
1920 .xlevel = 0x80000008,
1921 .model_id = "Intel Core Processor (Haswell, no TSX)",
1922 },
1923 {
1924 .name = "Haswell-noTSX-IBRS",
1925 .level = 0xd,
1926 .vendor = CPUID_VENDOR_INTEL,
1927 .family = 6,
1928 .model = 60,
1929 .stepping = 1,
1930 .features[FEAT_1_EDX] =
1931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1935 CPUID_DE | CPUID_FP87,
1936 .features[FEAT_1_ECX] =
1937 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1938 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1939 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1940 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1941 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1942 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1943 .features[FEAT_8000_0001_EDX] =
1944 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1945 CPUID_EXT2_SYSCALL,
1946 .features[FEAT_8000_0001_ECX] =
1947 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1948 .features[FEAT_7_0_EDX] =
1949 CPUID_7_0_EDX_SPEC_CTRL,
1950 .features[FEAT_7_0_EBX] =
1951 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1952 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1953 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1954 .features[FEAT_XSAVE] =
1955 CPUID_XSAVE_XSAVEOPT,
1956 .features[FEAT_6_EAX] =
1957 CPUID_6_EAX_ARAT,
1958 .xlevel = 0x80000008,
1959 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1960 },
1961 {
1962 .name = "Haswell",
1963 .level = 0xd,
1964 .vendor = CPUID_VENDOR_INTEL,
1965 .family = 6,
1966 .model = 60,
1967 .stepping = 4,
1968 .features[FEAT_1_EDX] =
1969 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1970 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1971 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1972 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1973 CPUID_DE | CPUID_FP87,
1974 .features[FEAT_1_ECX] =
1975 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1976 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1977 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1978 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1979 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1980 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1981 .features[FEAT_8000_0001_EDX] =
1982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1983 CPUID_EXT2_SYSCALL,
1984 .features[FEAT_8000_0001_ECX] =
1985 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1986 .features[FEAT_7_0_EBX] =
1987 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1988 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1989 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1990 CPUID_7_0_EBX_RTM,
1991 .features[FEAT_XSAVE] =
1992 CPUID_XSAVE_XSAVEOPT,
1993 .features[FEAT_6_EAX] =
1994 CPUID_6_EAX_ARAT,
1995 .xlevel = 0x80000008,
1996 .model_id = "Intel Core Processor (Haswell)",
1997 },
1998 {
1999 .name = "Haswell-IBRS",
2000 .level = 0xd,
2001 .vendor = CPUID_VENDOR_INTEL,
2002 .family = 6,
2003 .model = 60,
2004 .stepping = 4,
2005 .features[FEAT_1_EDX] =
2006 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2007 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2008 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2009 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2010 CPUID_DE | CPUID_FP87,
2011 .features[FEAT_1_ECX] =
2012 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2013 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2014 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2015 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2016 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2017 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2018 .features[FEAT_8000_0001_EDX] =
2019 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2020 CPUID_EXT2_SYSCALL,
2021 .features[FEAT_8000_0001_ECX] =
2022 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2023 .features[FEAT_7_0_EDX] =
2024 CPUID_7_0_EDX_SPEC_CTRL,
2025 .features[FEAT_7_0_EBX] =
2026 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2027 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2028 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2029 CPUID_7_0_EBX_RTM,
2030 .features[FEAT_XSAVE] =
2031 CPUID_XSAVE_XSAVEOPT,
2032 .features[FEAT_6_EAX] =
2033 CPUID_6_EAX_ARAT,
2034 .xlevel = 0x80000008,
2035 .model_id = "Intel Core Processor (Haswell, IBRS)",
2036 },
2037 {
2038 .name = "Broadwell-noTSX",
2039 .level = 0xd,
2040 .vendor = CPUID_VENDOR_INTEL,
2041 .family = 6,
2042 .model = 61,
2043 .stepping = 2,
2044 .features[FEAT_1_EDX] =
2045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2049 CPUID_DE | CPUID_FP87,
2050 .features[FEAT_1_ECX] =
2051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2052 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2053 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2054 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2055 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2056 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2057 .features[FEAT_8000_0001_EDX] =
2058 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2059 CPUID_EXT2_SYSCALL,
2060 .features[FEAT_8000_0001_ECX] =
2061 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2062 .features[FEAT_7_0_EBX] =
2063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2064 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2065 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2066 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2067 CPUID_7_0_EBX_SMAP,
2068 .features[FEAT_XSAVE] =
2069 CPUID_XSAVE_XSAVEOPT,
2070 .features[FEAT_6_EAX] =
2071 CPUID_6_EAX_ARAT,
2072 .xlevel = 0x80000008,
2073 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2074 },
2075 {
2076 .name = "Broadwell-noTSX-IBRS",
2077 .level = 0xd,
2078 .vendor = CPUID_VENDOR_INTEL,
2079 .family = 6,
2080 .model = 61,
2081 .stepping = 2,
2082 .features[FEAT_1_EDX] =
2083 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2084 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2085 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2086 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2087 CPUID_DE | CPUID_FP87,
2088 .features[FEAT_1_ECX] =
2089 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2090 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2091 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2092 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2093 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2094 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2095 .features[FEAT_8000_0001_EDX] =
2096 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2097 CPUID_EXT2_SYSCALL,
2098 .features[FEAT_8000_0001_ECX] =
2099 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2100 .features[FEAT_7_0_EDX] =
2101 CPUID_7_0_EDX_SPEC_CTRL,
2102 .features[FEAT_7_0_EBX] =
2103 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2104 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2105 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2106 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2107 CPUID_7_0_EBX_SMAP,
2108 .features[FEAT_XSAVE] =
2109 CPUID_XSAVE_XSAVEOPT,
2110 .features[FEAT_6_EAX] =
2111 CPUID_6_EAX_ARAT,
2112 .xlevel = 0x80000008,
2113 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2114 },
2115 {
2116 .name = "Broadwell",
2117 .level = 0xd,
2118 .vendor = CPUID_VENDOR_INTEL,
2119 .family = 6,
2120 .model = 61,
2121 .stepping = 2,
2122 .features[FEAT_1_EDX] =
2123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2127 CPUID_DE | CPUID_FP87,
2128 .features[FEAT_1_ECX] =
2129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2130 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2131 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2132 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2133 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2134 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2135 .features[FEAT_8000_0001_EDX] =
2136 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2137 CPUID_EXT2_SYSCALL,
2138 .features[FEAT_8000_0001_ECX] =
2139 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2140 .features[FEAT_7_0_EBX] =
2141 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2142 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2143 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2144 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2145 CPUID_7_0_EBX_SMAP,
2146 .features[FEAT_XSAVE] =
2147 CPUID_XSAVE_XSAVEOPT,
2148 .features[FEAT_6_EAX] =
2149 CPUID_6_EAX_ARAT,
2150 .xlevel = 0x80000008,
2151 .model_id = "Intel Core Processor (Broadwell)",
2152 },
2153 {
2154 .name = "Broadwell-IBRS",
2155 .level = 0xd,
2156 .vendor = CPUID_VENDOR_INTEL,
2157 .family = 6,
2158 .model = 61,
2159 .stepping = 2,
2160 .features[FEAT_1_EDX] =
2161 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2162 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2163 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2164 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2165 CPUID_DE | CPUID_FP87,
2166 .features[FEAT_1_ECX] =
2167 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2168 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2169 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2170 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2171 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2172 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2173 .features[FEAT_8000_0001_EDX] =
2174 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2175 CPUID_EXT2_SYSCALL,
2176 .features[FEAT_8000_0001_ECX] =
2177 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2178 .features[FEAT_7_0_EDX] =
2179 CPUID_7_0_EDX_SPEC_CTRL,
2180 .features[FEAT_7_0_EBX] =
2181 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2182 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2183 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2184 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2185 CPUID_7_0_EBX_SMAP,
2186 .features[FEAT_XSAVE] =
2187 CPUID_XSAVE_XSAVEOPT,
2188 .features[FEAT_6_EAX] =
2189 CPUID_6_EAX_ARAT,
2190 .xlevel = 0x80000008,
2191 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2192 },
2193 {
2194 .name = "Skylake-Client",
2195 .level = 0xd,
2196 .vendor = CPUID_VENDOR_INTEL,
2197 .family = 6,
2198 .model = 94,
2199 .stepping = 3,
2200 .features[FEAT_1_EDX] =
2201 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2202 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2203 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2204 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2205 CPUID_DE | CPUID_FP87,
2206 .features[FEAT_1_ECX] =
2207 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2208 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2209 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2210 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2211 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2212 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2213 .features[FEAT_8000_0001_EDX] =
2214 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2215 CPUID_EXT2_SYSCALL,
2216 .features[FEAT_8000_0001_ECX] =
2217 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2218 .features[FEAT_7_0_EBX] =
2219 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2220 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2221 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2222 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2223 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2224 /* Missing: XSAVES (not supported by some Linux versions,
2225 * including v4.1 to v4.12).
2226 * KVM doesn't yet expose any XSAVES state save component,
2227 * and the only one defined in Skylake (processor tracing)
2228 * probably will block migration anyway.
2229 */
2230 .features[FEAT_XSAVE] =
2231 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2232 CPUID_XSAVE_XGETBV1,
2233 .features[FEAT_6_EAX] =
2234 CPUID_6_EAX_ARAT,
2235 .xlevel = 0x80000008,
2236 .model_id = "Intel Core Processor (Skylake)",
2237 },
2238 {
2239 .name = "Skylake-Client-IBRS",
2240 .level = 0xd,
2241 .vendor = CPUID_VENDOR_INTEL,
2242 .family = 6,
2243 .model = 94,
2244 .stepping = 3,
2245 .features[FEAT_1_EDX] =
2246 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2247 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2248 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2249 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2250 CPUID_DE | CPUID_FP87,
2251 .features[FEAT_1_ECX] =
2252 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2253 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2254 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2255 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2256 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2257 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2258 .features[FEAT_8000_0001_EDX] =
2259 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2260 CPUID_EXT2_SYSCALL,
2261 .features[FEAT_8000_0001_ECX] =
2262 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2263 .features[FEAT_7_0_EDX] =
2264 CPUID_7_0_EDX_SPEC_CTRL,
2265 .features[FEAT_7_0_EBX] =
2266 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2267 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2268 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2269 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2270 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2271 /* Missing: XSAVES (not supported by some Linux versions,
2272 * including v4.1 to v4.12).
2273 * KVM doesn't yet expose any XSAVES state save component,
2274 * and the only one defined in Skylake (processor tracing)
2275 * probably will block migration anyway.
2276 */
2277 .features[FEAT_XSAVE] =
2278 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2279 CPUID_XSAVE_XGETBV1,
2280 .features[FEAT_6_EAX] =
2281 CPUID_6_EAX_ARAT,
2282 .xlevel = 0x80000008,
2283 .model_id = "Intel Core Processor (Skylake, IBRS)",
2284 },
2285 {
2286 .name = "Skylake-Server",
2287 .level = 0xd,
2288 .vendor = CPUID_VENDOR_INTEL,
2289 .family = 6,
2290 .model = 85,
2291 .stepping = 4,
2292 .features[FEAT_1_EDX] =
2293 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2294 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2295 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2296 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2297 CPUID_DE | CPUID_FP87,
2298 .features[FEAT_1_ECX] =
2299 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2300 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2301 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2302 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2303 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2304 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2305 .features[FEAT_8000_0001_EDX] =
2306 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2307 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2308 .features[FEAT_8000_0001_ECX] =
2309 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2310 .features[FEAT_7_0_EBX] =
2311 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2312 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2313 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2314 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2315 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2316 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2317 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2318 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2319 /* Missing: XSAVES (not supported by some Linux versions,
2320 * including v4.1 to v4.12).
2321 * KVM doesn't yet expose any XSAVES state save component,
2322 * and the only one defined in Skylake (processor tracing)
2323 * probably will block migration anyway.
2324 */
2325 .features[FEAT_XSAVE] =
2326 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2327 CPUID_XSAVE_XGETBV1,
2328 .features[FEAT_6_EAX] =
2329 CPUID_6_EAX_ARAT,
2330 .xlevel = 0x80000008,
2331 .model_id = "Intel Xeon Processor (Skylake)",
2332 },
2333 {
2334 .name = "Skylake-Server-IBRS",
2335 .level = 0xd,
2336 .vendor = CPUID_VENDOR_INTEL,
2337 .family = 6,
2338 .model = 85,
2339 .stepping = 4,
2340 .features[FEAT_1_EDX] =
2341 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2342 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2343 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2344 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2345 CPUID_DE | CPUID_FP87,
2346 .features[FEAT_1_ECX] =
2347 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2348 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2349 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2350 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2351 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2352 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2353 .features[FEAT_8000_0001_EDX] =
2354 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2355 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2356 .features[FEAT_8000_0001_ECX] =
2357 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2358 .features[FEAT_7_0_EDX] =
2359 CPUID_7_0_EDX_SPEC_CTRL,
2360 .features[FEAT_7_0_EBX] =
2361 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2362 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2363 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2364 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2365 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2366 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2367 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2368 CPUID_7_0_EBX_AVX512VL,
2369 /* Missing: XSAVES (not supported by some Linux versions,
2370 * including v4.1 to v4.12).
2371 * KVM doesn't yet expose any XSAVES state save component,
2372 * and the only one defined in Skylake (processor tracing)
2373 * probably will block migration anyway.
2374 */
2375 .features[FEAT_XSAVE] =
2376 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2377 CPUID_XSAVE_XGETBV1,
2378 .features[FEAT_6_EAX] =
2379 CPUID_6_EAX_ARAT,
2380 .xlevel = 0x80000008,
2381 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2382 },
2383 {
2384 .name = "Icelake-Client",
2385 .level = 0xd,
2386 .vendor = CPUID_VENDOR_INTEL,
2387 .family = 6,
2388 .model = 126,
2389 .stepping = 0,
2390 .features[FEAT_1_EDX] =
2391 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2392 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2393 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2394 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2395 CPUID_DE | CPUID_FP87,
2396 .features[FEAT_1_ECX] =
2397 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2398 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2399 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2400 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2401 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2402 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2403 .features[FEAT_8000_0001_EDX] =
2404 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2405 CPUID_EXT2_SYSCALL,
2406 .features[FEAT_8000_0001_ECX] =
2407 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2408 .features[FEAT_8000_0008_EBX] =
2409 CPUID_8000_0008_EBX_WBNOINVD,
2410 .features[FEAT_7_0_EBX] =
2411 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2412 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2413 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2414 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2415 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_INTEL_PT,
2416 .features[FEAT_7_0_ECX] =
2417 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2418 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2419 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2420 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2421 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2422 .features[FEAT_7_0_EDX] =
2423 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2424 /* Missing: XSAVES (not supported by some Linux versions,
2425 * including v4.1 to v4.12).
2426 * KVM doesn't yet expose any XSAVES state save component,
2427 * and the only one defined in Skylake (processor tracing)
2428 * probably will block migration anyway.
2429 */
2430 .features[FEAT_XSAVE] =
2431 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2432 CPUID_XSAVE_XGETBV1,
2433 .features[FEAT_6_EAX] =
2434 CPUID_6_EAX_ARAT,
2435 .xlevel = 0x80000008,
2436 .model_id = "Intel Core Processor (Icelake)",
2437 },
2438 {
2439 .name = "Icelake-Server",
2440 .level = 0xd,
2441 .vendor = CPUID_VENDOR_INTEL,
2442 .family = 6,
2443 .model = 134,
2444 .stepping = 0,
2445 .features[FEAT_1_EDX] =
2446 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2447 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2448 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2449 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2450 CPUID_DE | CPUID_FP87,
2451 .features[FEAT_1_ECX] =
2452 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2453 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2454 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2455 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2456 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2457 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2458 .features[FEAT_8000_0001_EDX] =
2459 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2460 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2461 .features[FEAT_8000_0001_ECX] =
2462 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2463 .features[FEAT_8000_0008_EBX] =
2464 CPUID_8000_0008_EBX_WBNOINVD,
2465 .features[FEAT_7_0_EBX] =
2466 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2467 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2468 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2469 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2470 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2471 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2472 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2473 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT |
2474 CPUID_7_0_EBX_INTEL_PT,
2475 .features[FEAT_7_0_ECX] =
2476 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2477 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2478 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2479 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2480 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2481 .features[FEAT_7_0_EDX] =
2482 CPUID_7_0_EDX_PCONFIG | CPUID_7_0_EDX_SPEC_CTRL |
2483 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2484 /* Missing: XSAVES (not supported by some Linux versions,
2485 * including v4.1 to v4.12).
2486 * KVM doesn't yet expose any XSAVES state save component,
2487 * and the only one defined in Skylake (processor tracing)
2488 * probably will block migration anyway.
2489 */
2490 .features[FEAT_XSAVE] =
2491 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2492 CPUID_XSAVE_XGETBV1,
2493 .features[FEAT_6_EAX] =
2494 CPUID_6_EAX_ARAT,
2495 .xlevel = 0x80000008,
2496 .model_id = "Intel Xeon Processor (Icelake)",
2497 },
2498 {
2499 .name = "KnightsMill",
2500 .level = 0xd,
2501 .vendor = CPUID_VENDOR_INTEL,
2502 .family = 6,
2503 .model = 133,
2504 .stepping = 0,
2505 .features[FEAT_1_EDX] =
2506 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2507 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2508 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2509 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2510 CPUID_PSE | CPUID_DE | CPUID_FP87,
2511 .features[FEAT_1_ECX] =
2512 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2513 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2514 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2515 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2516 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2517 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2518 .features[FEAT_8000_0001_EDX] =
2519 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2520 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2521 .features[FEAT_8000_0001_ECX] =
2522 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2523 .features[FEAT_7_0_EBX] =
2524 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2525 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2526 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2527 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2528 CPUID_7_0_EBX_AVX512ER,
2529 .features[FEAT_7_0_ECX] =
2530 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2531 .features[FEAT_7_0_EDX] =
2532 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2533 .features[FEAT_XSAVE] =
2534 CPUID_XSAVE_XSAVEOPT,
2535 .features[FEAT_6_EAX] =
2536 CPUID_6_EAX_ARAT,
2537 .xlevel = 0x80000008,
2538 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2539 },
2540 {
2541 .name = "Opteron_G1",
2542 .level = 5,
2543 .vendor = CPUID_VENDOR_AMD,
2544 .family = 15,
2545 .model = 6,
2546 .stepping = 1,
2547 .features[FEAT_1_EDX] =
2548 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2549 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2550 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2551 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2552 CPUID_DE | CPUID_FP87,
2553 .features[FEAT_1_ECX] =
2554 CPUID_EXT_SSE3,
2555 .features[FEAT_8000_0001_EDX] =
2556 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2557 .xlevel = 0x80000008,
2558 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2559 },
2560 {
2561 .name = "Opteron_G2",
2562 .level = 5,
2563 .vendor = CPUID_VENDOR_AMD,
2564 .family = 15,
2565 .model = 6,
2566 .stepping = 1,
2567 .features[FEAT_1_EDX] =
2568 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2569 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2570 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2571 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2572 CPUID_DE | CPUID_FP87,
2573 .features[FEAT_1_ECX] =
2574 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2575 /* Missing: CPUID_EXT2_RDTSCP */
2576 .features[FEAT_8000_0001_EDX] =
2577 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2578 .features[FEAT_8000_0001_ECX] =
2579 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2580 .xlevel = 0x80000008,
2581 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2582 },
2583 {
2584 .name = "Opteron_G3",
2585 .level = 5,
2586 .vendor = CPUID_VENDOR_AMD,
2587 .family = 16,
2588 .model = 2,
2589 .stepping = 3,
2590 .features[FEAT_1_EDX] =
2591 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2592 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2593 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2594 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2595 CPUID_DE | CPUID_FP87,
2596 .features[FEAT_1_ECX] =
2597 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2598 CPUID_EXT_SSE3,
2599 /* Missing: CPUID_EXT2_RDTSCP */
2600 .features[FEAT_8000_0001_EDX] =
2601 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2602 .features[FEAT_8000_0001_ECX] =
2603 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2604 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2605 .xlevel = 0x80000008,
2606 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2607 },
2608 {
2609 .name = "Opteron_G4",
2610 .level = 0xd,
2611 .vendor = CPUID_VENDOR_AMD,
2612 .family = 21,
2613 .model = 1,
2614 .stepping = 2,
2615 .features[FEAT_1_EDX] =
2616 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2617 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2618 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2619 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2620 CPUID_DE | CPUID_FP87,
2621 .features[FEAT_1_ECX] =
2622 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2623 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2624 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2625 CPUID_EXT_SSE3,
2626 /* Missing: CPUID_EXT2_RDTSCP */
2627 .features[FEAT_8000_0001_EDX] =
2628 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2629 CPUID_EXT2_SYSCALL,
2630 .features[FEAT_8000_0001_ECX] =
2631 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2632 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2633 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2634 CPUID_EXT3_LAHF_LM,
2635 /* no xsaveopt! */
2636 .xlevel = 0x8000001A,
2637 .model_id = "AMD Opteron 62xx class CPU",
2638 },
2639 {
2640 .name = "Opteron_G5",
2641 .level = 0xd,
2642 .vendor = CPUID_VENDOR_AMD,
2643 .family = 21,
2644 .model = 2,
2645 .stepping = 0,
2646 .features[FEAT_1_EDX] =
2647 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2648 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2649 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2650 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2651 CPUID_DE | CPUID_FP87,
2652 .features[FEAT_1_ECX] =
2653 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2654 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2655 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2656 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2657 /* Missing: CPUID_EXT2_RDTSCP */
2658 .features[FEAT_8000_0001_EDX] =
2659 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2660 CPUID_EXT2_SYSCALL,
2661 .features[FEAT_8000_0001_ECX] =
2662 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2663 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2664 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2665 CPUID_EXT3_LAHF_LM,
2666 /* no xsaveopt! */
2667 .xlevel = 0x8000001A,
2668 .model_id = "AMD Opteron 63xx class CPU",
2669 },
2670 {
2671 .name = "EPYC",
2672 .level = 0xd,
2673 .vendor = CPUID_VENDOR_AMD,
2674 .family = 23,
2675 .model = 1,
2676 .stepping = 2,
2677 .features[FEAT_1_EDX] =
2678 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2679 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2680 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2681 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2682 CPUID_VME | CPUID_FP87,
2683 .features[FEAT_1_ECX] =
2684 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2685 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2686 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2687 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2688 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2689 .features[FEAT_8000_0001_EDX] =
2690 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2691 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2692 CPUID_EXT2_SYSCALL,
2693 .features[FEAT_8000_0001_ECX] =
2694 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2695 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2696 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2697 CPUID_EXT3_TOPOEXT,
2698 .features[FEAT_7_0_EBX] =
2699 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2700 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2701 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2702 CPUID_7_0_EBX_SHA_NI,
2703 /* Missing: XSAVES (not supported by some Linux versions,
2704 * including v4.1 to v4.12).
2705 * KVM doesn't yet expose any XSAVES state save component.
2706 */
2707 .features[FEAT_XSAVE] =
2708 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2709 CPUID_XSAVE_XGETBV1,
2710 .features[FEAT_6_EAX] =
2711 CPUID_6_EAX_ARAT,
2712 .xlevel = 0x8000001E,
2713 .model_id = "AMD EPYC Processor",
2714 .cache_info = &epyc_cache_info,
2715 },
2716 {
2717 .name = "EPYC-IBPB",
2718 .level = 0xd,
2719 .vendor = CPUID_VENDOR_AMD,
2720 .family = 23,
2721 .model = 1,
2722 .stepping = 2,
2723 .features[FEAT_1_EDX] =
2724 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2725 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2726 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2727 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2728 CPUID_VME | CPUID_FP87,
2729 .features[FEAT_1_ECX] =
2730 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2731 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2732 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2733 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2734 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2735 .features[FEAT_8000_0001_EDX] =
2736 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2737 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2738 CPUID_EXT2_SYSCALL,
2739 .features[FEAT_8000_0001_ECX] =
2740 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2741 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2742 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2743 CPUID_EXT3_TOPOEXT,
2744 .features[FEAT_8000_0008_EBX] =
2745 CPUID_8000_0008_EBX_IBPB,
2746 .features[FEAT_7_0_EBX] =
2747 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2748 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2749 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2750 CPUID_7_0_EBX_SHA_NI,
2751 /* Missing: XSAVES (not supported by some Linux versions,
2752 * including v4.1 to v4.12).
2753 * KVM doesn't yet expose any XSAVES state save component.
2754 */
2755 .features[FEAT_XSAVE] =
2756 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2757 CPUID_XSAVE_XGETBV1,
2758 .features[FEAT_6_EAX] =
2759 CPUID_6_EAX_ARAT,
2760 .xlevel = 0x8000001E,
2761 .model_id = "AMD EPYC Processor (with IBPB)",
2762 .cache_info = &epyc_cache_info,
2763 },
2764};
2765
2766typedef struct PropValue {
2767 const char *prop, *value;
2768} PropValue;
2769
2770/* KVM-specific features that are automatically added/removed
2771 * from all CPU models when KVM is enabled.
2772 */
2773static PropValue kvm_default_props[] = {
2774 { "kvmclock", "on" },
2775 { "kvm-nopiodelay", "on" },
2776 { "kvm-asyncpf", "on" },
2777 { "kvm-steal-time", "on" },
2778 { "kvm-pv-eoi", "on" },
2779 { "kvmclock-stable-bit", "on" },
2780 { "x2apic", "on" },
2781 { "acpi", "off" },
2782 { "monitor", "off" },
2783 { "svm", "off" },
2784 { NULL, NULL },
2785};
2786
2787/* TCG-specific defaults that override all CPU models when using TCG
2788 */
2789static PropValue tcg_default_props[] = {
2790 { "vme", "off" },
2791 { NULL, NULL },
2792};
2793
2794
2795void x86_cpu_change_kvm_default(const char *prop, const char *value)
2796{
2797 PropValue *pv;
2798 for (pv = kvm_default_props; pv->prop; pv++) {
2799 if (!strcmp(pv->prop, prop)) {
2800 pv->value = value;
2801 break;
2802 }
2803 }
2804
2805 /* It is valid to call this function only for properties that
2806 * are already present in the kvm_default_props table.
2807 */
2808 assert(pv->prop);
2809}
2810
2811static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2812 bool migratable_only);
2813
2814static bool lmce_supported(void)
2815{
2816 uint64_t mce_cap = 0;
2817
2818#ifdef CONFIG_KVM
2819 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2820 return false;
2821 }
2822#endif
2823
2824 return !!(mce_cap & MCG_LMCE_P);
2825}
2826
2827#define CPUID_MODEL_ID_SZ 48
2828
2829/**
2830 * cpu_x86_fill_model_id:
2831 * Get CPUID model ID string from host CPU.
2832 *
2833 * @str should have at least CPUID_MODEL_ID_SZ bytes
2834 *
2835 * The function does NOT add a null terminator to the string
2836 * automatically.
2837 */
2838static int cpu_x86_fill_model_id(char *str)
2839{
2840 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2841 int i;
2842
2843 for (i = 0; i < 3; i++) {
2844 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2845 memcpy(str + i * 16 + 0, &eax, 4);
2846 memcpy(str + i * 16 + 4, &ebx, 4);
2847 memcpy(str + i * 16 + 8, &ecx, 4);
2848 memcpy(str + i * 16 + 12, &edx, 4);
2849 }
2850 return 0;
2851}
2852
2853static Property max_x86_cpu_properties[] = {
2854 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2855 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2856 DEFINE_PROP_END_OF_LIST()
2857};
2858
2859static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2860{
2861 DeviceClass *dc = DEVICE_CLASS(oc);
2862 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2863
2864 xcc->ordering = 9;
2865
2866 xcc->model_description =
2867 "Enables all features supported by the accelerator in the current host";
2868
2869 dc->props = max_x86_cpu_properties;
2870}
2871
2872static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2873
2874static void max_x86_cpu_initfn(Object *obj)
2875{
2876 X86CPU *cpu = X86_CPU(obj);
2877 CPUX86State *env = &cpu->env;
2878 KVMState *s = kvm_state;
2879
2880 /* We can't fill the features array here because we don't know yet if
2881 * "migratable" is true or false.
2882 */
2883 cpu->max_features = true;
2884
2885 if (accel_uses_host_cpuid()) {
2886 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2887 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2888 int family, model, stepping;
2889 X86CPUDefinition host_cpudef = { };
2890 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2891
2892 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2893 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2894
2895 host_vendor_fms(vendor, &family, &model, &stepping);
2896
2897 cpu_x86_fill_model_id(model_id);
2898
2899 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2900 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2901 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2902 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2903 &error_abort);
2904 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2905 &error_abort);
2906
2907 if (kvm_enabled()) {
2908 env->cpuid_min_level =
2909 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2910 env->cpuid_min_xlevel =
2911 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2912 env->cpuid_min_xlevel2 =
2913 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2914 } else {
2915 env->cpuid_min_level =
2916 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2917 env->cpuid_min_xlevel =
2918 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2919 env->cpuid_min_xlevel2 =
2920 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2921 }
2922
2923 if (lmce_supported()) {
2924 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2925 }
2926 } else {
2927 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2928 "vendor", &error_abort);
2929 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2930 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2931 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2932 object_property_set_str(OBJECT(cpu),
2933 "QEMU TCG CPU version " QEMU_HW_VERSION,
2934 "model-id", &error_abort);
2935 }
2936
2937 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2938}
2939
2940static const TypeInfo max_x86_cpu_type_info = {
2941 .name = X86_CPU_TYPE_NAME("max"),
2942 .parent = TYPE_X86_CPU,
2943 .instance_init = max_x86_cpu_initfn,
2944 .class_init = max_x86_cpu_class_init,
2945};
2946
2947#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2948static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2949{
2950 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2951
2952 xcc->host_cpuid_required = true;
2953 xcc->ordering = 8;
2954
2955#if defined(CONFIG_KVM)
2956 xcc->model_description =
2957 "KVM processor with all supported host features ";
2958#elif defined(CONFIG_HVF)
2959 xcc->model_description =
2960 "HVF processor with all supported host features ";
2961#endif
2962}
2963
2964static const TypeInfo host_x86_cpu_type_info = {
2965 .name = X86_CPU_TYPE_NAME("host"),
2966 .parent = X86_CPU_TYPE_NAME("max"),
2967 .class_init = host_x86_cpu_class_init,
2968};
2969
2970#endif
2971
2972static void report_unavailable_features(FeatureWord w, uint32_t mask)
2973{
2974 FeatureWordInfo *f = &feature_word_info[w];
2975 int i;
2976
2977 for (i = 0; i < 32; ++i) {
2978 if ((1UL << i) & mask) {
2979 const char *reg = get_register_name_32(f->cpuid_reg);
2980 assert(reg);
2981 warn_report("%s doesn't support requested feature: "
2982 "CPUID.%02XH:%s%s%s [bit %d]",
2983 accel_uses_host_cpuid() ? "host" : "TCG",
2984 f->cpuid_eax, reg,
2985 f->feat_names[i] ? "." : "",
2986 f->feat_names[i] ? f->feat_names[i] : "", i);
2987 }
2988 }
2989}
2990
2991static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2992 const char *name, void *opaque,
2993 Error **errp)
2994{
2995 X86CPU *cpu = X86_CPU(obj);
2996 CPUX86State *env = &cpu->env;
2997 int64_t value;
2998
2999 value = (env->cpuid_version >> 8) & 0xf;
3000 if (value == 0xf) {
3001 value += (env->cpuid_version >> 20) & 0xff;
3002 }
3003 visit_type_int(v, name, &value, errp);
3004}
3005
3006static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3007 const char *name, void *opaque,
3008 Error **errp)
3009{
3010 X86CPU *cpu = X86_CPU(obj);
3011 CPUX86State *env = &cpu->env;
3012 const int64_t min = 0;
3013 const int64_t max = 0xff + 0xf;
3014 Error *local_err = NULL;
3015 int64_t value;
3016
3017 visit_type_int(v, name, &value, &local_err);
3018 if (local_err) {
3019 error_propagate(errp, local_err);
3020 return;
3021 }
3022 if (value < min || value > max) {
3023 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3024 name ? name : "null", value, min, max);
3025 return;
3026 }
3027
3028 env->cpuid_version &= ~0xff00f00;
3029 if (value > 0x0f) {
3030 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3031 } else {
3032 env->cpuid_version |= value << 8;
3033 }
3034}
3035
3036static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3037 const char *name, void *opaque,
3038 Error **errp)
3039{
3040 X86CPU *cpu = X86_CPU(obj);
3041 CPUX86State *env = &cpu->env;
3042 int64_t value;
3043
3044 value = (env->cpuid_version >> 4) & 0xf;
3045 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3046 visit_type_int(v, name, &value, errp);
3047}
3048
3049static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3050 const char *name, void *opaque,
3051 Error **errp)
3052{
3053 X86CPU *cpu = X86_CPU(obj);
3054 CPUX86State *env = &cpu->env;
3055 const int64_t min = 0;
3056 const int64_t max = 0xff;
3057 Error *local_err = NULL;
3058 int64_t value;
3059
3060 visit_type_int(v, name, &value, &local_err);
3061 if (local_err) {
3062 error_propagate(errp, local_err);
3063 return;
3064 }
3065 if (value < min || value > max) {
3066 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3067 name ? name : "null", value, min, max);
3068 return;
3069 }
3070
3071 env->cpuid_version &= ~0xf00f0;
3072 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3073}
3074
3075static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3076 const char *name, void *opaque,
3077 Error **errp)
3078{
3079 X86CPU *cpu = X86_CPU(obj);
3080 CPUX86State *env = &cpu->env;
3081 int64_t value;
3082
3083 value = env->cpuid_version & 0xf;
3084 visit_type_int(v, name, &value, errp);
3085}
3086
3087static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3088 const char *name, void *opaque,
3089 Error **errp)
3090{
3091 X86CPU *cpu = X86_CPU(obj);
3092 CPUX86State *env = &cpu->env;
3093 const int64_t min = 0;
3094 const int64_t max = 0xf;
3095 Error *local_err = NULL;
3096 int64_t value;
3097
3098 visit_type_int(v, name, &value, &local_err);
3099 if (local_err) {
3100 error_propagate(errp, local_err);
3101 return;
3102 }
3103 if (value < min || value > max) {
3104 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3105 name ? name : "null", value, min, max);
3106 return;
3107 }
3108
3109 env->cpuid_version &= ~0xf;
3110 env->cpuid_version |= value & 0xf;
3111}
3112
3113static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3114{
3115 X86CPU *cpu = X86_CPU(obj);
3116 CPUX86State *env = &cpu->env;
3117 char *value;
3118
3119 value = g_malloc(CPUID_VENDOR_SZ + 1);
3120 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3121 env->cpuid_vendor3);
3122 return value;
3123}
3124
3125static void x86_cpuid_set_vendor(Object *obj, const char *value,
3126 Error **errp)
3127{
3128 X86CPU *cpu = X86_CPU(obj);
3129 CPUX86State *env = &cpu->env;
3130 int i;
3131
3132 if (strlen(value) != CPUID_VENDOR_SZ) {
3133 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3134 return;
3135 }
3136
3137 env->cpuid_vendor1 = 0;
3138 env->cpuid_vendor2 = 0;
3139 env->cpuid_vendor3 = 0;
3140 for (i = 0; i < 4; i++) {
3141 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3142 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3143 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3144 }
3145}
3146
3147static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3148{
3149 X86CPU *cpu = X86_CPU(obj);
3150 CPUX86State *env = &cpu->env;
3151 char *value;
3152 int i;
3153
3154 value = g_malloc(48 + 1);
3155 for (i = 0; i < 48; i++) {
3156 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3157 }
3158 value[48] = '\0';
3159 return value;
3160}
3161
3162static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3163 Error **errp)
3164{
3165 X86CPU *cpu = X86_CPU(obj);
3166 CPUX86State *env = &cpu->env;
3167 int c, len, i;
3168
3169 if (model_id == NULL) {
3170 model_id = "";
3171 }
3172 len = strlen(model_id);
3173 memset(env->cpuid_model, 0, 48);
3174 for (i = 0; i < 48; i++) {
3175 if (i >= len) {
3176 c = '\0';
3177 } else {
3178 c = (uint8_t)model_id[i];
3179 }
3180 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3181 }
3182}
3183
3184static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3185 void *opaque, Error **errp)
3186{
3187 X86CPU *cpu = X86_CPU(obj);
3188 int64_t value;
3189
3190 value = cpu->env.tsc_khz * 1000;
3191 visit_type_int(v, name, &value, errp);
3192}
3193
3194static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3195 void *opaque, Error **errp)
3196{
3197 X86CPU *cpu = X86_CPU(obj);
3198 const int64_t min = 0;
3199 const int64_t max = INT64_MAX;
3200 Error *local_err = NULL;
3201 int64_t value;
3202
3203 visit_type_int(v, name, &value, &local_err);
3204 if (local_err) {
3205 error_propagate(errp, local_err);
3206 return;
3207 }
3208 if (value < min || value > max) {
3209 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3210 name ? name : "null", value, min, max);
3211 return;
3212 }
3213
3214 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3215}
3216
3217/* Generic getter for "feature-words" and "filtered-features" properties */
3218static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3219 const char *name, void *opaque,
3220 Error **errp)
3221{
3222 uint32_t *array = (uint32_t *)opaque;
3223 FeatureWord w;
3224 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3225 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3226 X86CPUFeatureWordInfoList *list = NULL;
3227
3228 for (w = 0; w < FEATURE_WORDS; w++) {
3229 FeatureWordInfo *wi = &feature_word_info[w];
3230 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3231 qwi->cpuid_input_eax = wi->cpuid_eax;
3232 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
3233 qwi->cpuid_input_ecx = wi->cpuid_ecx;
3234 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
3235 qwi->features = array[w];
3236
3237 /* List will be in reverse order, but order shouldn't matter */
3238 list_entries[w].next = list;
3239 list_entries[w].value = &word_infos[w];
3240 list = &list_entries[w];
3241 }
3242
3243 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3244}
3245
3246static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3247 void *opaque, Error **errp)
3248{
3249 X86CPU *cpu = X86_CPU(obj);
3250 int64_t value = cpu->hyperv_spinlock_attempts;
3251
3252 visit_type_int(v, name, &value, errp);
3253}
3254
3255static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3256 void *opaque, Error **errp)
3257{
3258 const int64_t min = 0xFFF;
3259 const int64_t max = UINT_MAX;
3260 X86CPU *cpu = X86_CPU(obj);
3261 Error *err = NULL;
3262 int64_t value;
3263
3264 visit_type_int(v, name, &value, &err);
3265 if (err) {
3266 error_propagate(errp, err);
3267 return;
3268 }
3269
3270 if (value < min || value > max) {
3271 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3272 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3273 object_get_typename(obj), name ? name : "null",
3274 value, min, max);
3275 return;
3276 }
3277 cpu->hyperv_spinlock_attempts = value;
3278}
3279
3280static const PropertyInfo qdev_prop_spinlocks = {
3281 .name = "int",
3282 .get = x86_get_hv_spinlocks,
3283 .set = x86_set_hv_spinlocks,
3284};
3285
3286/* Convert all '_' in a feature string option name to '-', to make feature
3287 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3288 */
3289static inline void feat2prop(char *s)
3290{
3291 while ((s = strchr(s, '_'))) {
3292 *s = '-';
3293 }
3294}
3295
3296/* Return the feature property name for a feature flag bit */
3297static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3298{
3299 /* XSAVE components are automatically enabled by other features,
3300 * so return the original feature name instead
3301 */
3302 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3303 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3304
3305 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3306 x86_ext_save_areas[comp].bits) {
3307 w = x86_ext_save_areas[comp].feature;
3308 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3309 }
3310 }
3311
3312 assert(bitnr < 32);
3313 assert(w < FEATURE_WORDS);
3314 return feature_word_info[w].feat_names[bitnr];
3315}
3316
3317/* Compatibily hack to maintain legacy +-feat semantic,
3318 * where +-feat overwrites any feature set by
3319 * feat=on|feat even if the later is parsed after +-feat
3320 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3321 */
3322static GList *plus_features, *minus_features;
3323
3324static gint compare_string(gconstpointer a, gconstpointer b)
3325{
3326 return g_strcmp0(a, b);
3327}
3328
3329/* Parse "+feature,-feature,feature=foo" CPU feature string
3330 */
3331static void x86_cpu_parse_featurestr(const char *typename, char *features,
3332 Error **errp)
3333{
3334 char *featurestr; /* Single 'key=value" string being parsed */
3335 static bool cpu_globals_initialized;
3336 bool ambiguous = false;
3337
3338 if (cpu_globals_initialized) {
3339 return;
3340 }
3341 cpu_globals_initialized = true;
3342
3343 if (!features) {
3344 return;
3345 }
3346
3347 for (featurestr = strtok(features, ",");
3348 featurestr;
3349 featurestr = strtok(NULL, ",")) {
3350 const char *name;
3351 const char *val = NULL;
3352 char *eq = NULL;
3353 char num[32];
3354 GlobalProperty *prop;
3355
3356 /* Compatibility syntax: */
3357 if (featurestr[0] == '+') {
3358 plus_features = g_list_append(plus_features,
3359 g_strdup(featurestr + 1));
3360 continue;
3361 } else if (featurestr[0] == '-') {
3362 minus_features = g_list_append(minus_features,
3363 g_strdup(featurestr + 1));
3364 continue;
3365 }
3366
3367 eq = strchr(featurestr, '=');
3368 if (eq) {
3369 *eq++ = 0;
3370 val = eq;
3371 } else {
3372 val = "on";
3373 }
3374
3375 feat2prop(featurestr);
3376 name = featurestr;
3377
3378 if (g_list_find_custom(plus_features, name, compare_string)) {
3379 warn_report("Ambiguous CPU model string. "
3380 "Don't mix both \"+%s\" and \"%s=%s\"",
3381 name, name, val);
3382 ambiguous = true;
3383 }
3384 if (g_list_find_custom(minus_features, name, compare_string)) {
3385 warn_report("Ambiguous CPU model string. "
3386 "Don't mix both \"-%s\" and \"%s=%s\"",
3387 name, name, val);
3388 ambiguous = true;
3389 }
3390
3391 /* Special case: */
3392 if (!strcmp(name, "tsc-freq")) {
3393 int ret;
3394 uint64_t tsc_freq;
3395
3396 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3397 if (ret < 0 || tsc_freq > INT64_MAX) {
3398 error_setg(errp, "bad numerical value %s", val);
3399 return;
3400 }
3401 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3402 val = num;
3403 name = "tsc-frequency";
3404 }
3405
3406 prop = g_new0(typeof(*prop), 1);
3407 prop->driver = typename;
3408 prop->property = g_strdup(name);
3409 prop->value = g_strdup(val);
3410 prop->errp = &error_fatal;
3411 qdev_prop_register_global(prop);
3412 }
3413
3414 if (ambiguous) {
3415 warn_report("Compatibility of ambiguous CPU model "
3416 "strings won't be kept on future QEMU versions");
3417 }
3418}
3419
3420static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3421static int x86_cpu_filter_features(X86CPU *cpu);
3422
3423/* Check for missing features that may prevent the CPU class from
3424 * running using the current machine and accelerator.
3425 */
3426static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3427 strList **missing_feats)
3428{
3429 X86CPU *xc;
3430 FeatureWord w;
3431 Error *err = NULL;
3432 strList **next = missing_feats;
3433
3434 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3435 strList *new = g_new0(strList, 1);
3436 new->value = g_strdup("kvm");
3437 *missing_feats = new;
3438 return;
3439 }
3440
3441 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3442
3443 x86_cpu_expand_features(xc, &err);
3444 if (err) {
3445 /* Errors at x86_cpu_expand_features should never happen,
3446 * but in case it does, just report the model as not
3447 * runnable at all using the "type" property.
3448 */
3449 strList *new = g_new0(strList, 1);
3450 new->value = g_strdup("type");
3451 *next = new;
3452 next = &new->next;
3453 }
3454
3455 x86_cpu_filter_features(xc);
3456
3457 for (w = 0; w < FEATURE_WORDS; w++) {
3458 uint32_t filtered = xc->filtered_features[w];
3459 int i;
3460 for (i = 0; i < 32; i++) {
3461 if (filtered & (1UL << i)) {
3462 strList *new = g_new0(strList, 1);
3463 new->value = g_strdup(x86_cpu_feature_name(w, i));
3464 *next = new;
3465 next = &new->next;
3466 }
3467 }
3468 }
3469
3470 object_unref(OBJECT(xc));
3471}
3472
3473/* Print all cpuid feature names in featureset
3474 */
3475static void listflags(FILE *f, fprintf_function print, GList *features)
3476{
3477 size_t len = 0;
3478 GList *tmp;
3479
3480 for (tmp = features; tmp; tmp = tmp->next) {
3481 const char *name = tmp->data;
3482 if ((len + strlen(name) + 1) >= 75) {
3483 print(f, "\n");
3484 len = 0;
3485 }
3486 print(f, "%s%s", len == 0 ? " " : " ", name);
3487 len += strlen(name) + 1;
3488 }
3489 print(f, "\n");
3490}
3491
3492/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3493static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3494{
3495 ObjectClass *class_a = (ObjectClass *)a;
3496 ObjectClass *class_b = (ObjectClass *)b;
3497 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3498 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3499 char *name_a, *name_b;
3500 int ret;
3501
3502 if (cc_a->ordering != cc_b->ordering) {
3503 ret = cc_a->ordering - cc_b->ordering;
3504 } else {
3505 name_a = x86_cpu_class_get_model_name(cc_a);
3506 name_b = x86_cpu_class_get_model_name(cc_b);
3507 ret = strcmp(name_a, name_b);
3508 g_free(name_a);
3509 g_free(name_b);
3510 }
3511 return ret;
3512}
3513
3514static GSList *get_sorted_cpu_model_list(void)
3515{
3516 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3517 list = g_slist_sort(list, x86_cpu_list_compare);
3518 return list;
3519}
3520
3521static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3522{
3523 ObjectClass *oc = data;
3524 X86CPUClass *cc = X86_CPU_CLASS(oc);
3525 CPUListState *s = user_data;
3526 char *name = x86_cpu_class_get_model_name(cc);
3527 const char *desc = cc->model_description;
3528 if (!desc && cc->cpu_def) {
3529 desc = cc->cpu_def->model_id;
3530 }
3531
3532 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3533 name, desc);
3534 g_free(name);
3535}
3536
3537/* list available CPU models and flags */
3538void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3539{
3540 int i, j;
3541 CPUListState s = {
3542 .file = f,
3543 .cpu_fprintf = cpu_fprintf,
3544 };
3545 GSList *list;
3546 GList *names = NULL;
3547
3548 (*cpu_fprintf)(f, "Available CPUs:\n");
3549 list = get_sorted_cpu_model_list();
3550 g_slist_foreach(list, x86_cpu_list_entry, &s);
3551 g_slist_free(list);
3552
3553 names = NULL;
3554 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3555 FeatureWordInfo *fw = &feature_word_info[i];
3556 for (j = 0; j < 32; j++) {
3557 if (fw->feat_names[j]) {
3558 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3559 }
3560 }
3561 }
3562
3563 names = g_list_sort(names, (GCompareFunc)strcmp);
3564
3565 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3566 listflags(f, cpu_fprintf, names);
3567 (*cpu_fprintf)(f, "\n");
3568 g_list_free(names);
3569}
3570
3571static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3572{
3573 ObjectClass *oc = data;
3574 X86CPUClass *cc = X86_CPU_CLASS(oc);
3575 CpuDefinitionInfoList **cpu_list = user_data;
3576 CpuDefinitionInfoList *entry;
3577 CpuDefinitionInfo *info;
3578
3579 info = g_malloc0(sizeof(*info));
3580 info->name = x86_cpu_class_get_model_name(cc);
3581 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3582 info->has_unavailable_features = true;
3583 info->q_typename = g_strdup(object_class_get_name(oc));
3584 info->migration_safe = cc->migration_safe;
3585 info->has_migration_safe = true;
3586 info->q_static = cc->static_model;
3587
3588 entry = g_malloc0(sizeof(*entry));
3589 entry->value = info;
3590 entry->next = *cpu_list;
3591 *cpu_list = entry;
3592}
3593
3594CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3595{
3596 CpuDefinitionInfoList *cpu_list = NULL;
3597 GSList *list = get_sorted_cpu_model_list();
3598 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3599 g_slist_free(list);
3600 return cpu_list;
3601}
3602
3603static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3604 bool migratable_only)
3605{
3606 FeatureWordInfo *wi = &feature_word_info[w];
3607 uint32_t r;
3608
3609 if (kvm_enabled()) {
3610 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3611 wi->cpuid_ecx,
3612 wi->cpuid_reg);
3613 } else if (hvf_enabled()) {
3614 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3615 wi->cpuid_ecx,
3616 wi->cpuid_reg);
3617 } else if (tcg_enabled()) {
3618 r = wi->tcg_features;
3619 } else {
3620 return ~0;
3621 }
3622 if (migratable_only) {
3623 r &= x86_cpu_get_migratable_flags(w);
3624 }
3625 return r;
3626}
3627
3628static void x86_cpu_report_filtered_features(X86CPU *cpu)
3629{
3630 FeatureWord w;
3631
3632 for (w = 0; w < FEATURE_WORDS; w++) {
3633 report_unavailable_features(w, cpu->filtered_features[w]);
3634 }
3635}
3636
3637static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3638{
3639 PropValue *pv;
3640 for (pv = props; pv->prop; pv++) {
3641 if (!pv->value) {
3642 continue;
3643 }
3644 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3645 &error_abort);
3646 }
3647}
3648
3649/* Load data from X86CPUDefinition into a X86CPU object
3650 */
3651static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3652{
3653 CPUX86State *env = &cpu->env;
3654 const char *vendor;
3655 char host_vendor[CPUID_VENDOR_SZ + 1];
3656 FeatureWord w;
3657
3658 /*NOTE: any property set by this function should be returned by
3659 * x86_cpu_static_props(), so static expansion of
3660 * query-cpu-model-expansion is always complete.
3661 */
3662
3663 /* CPU models only set _minimum_ values for level/xlevel: */
3664 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3665 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3666
3667 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3668 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3669 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3670 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3671 for (w = 0; w < FEATURE_WORDS; w++) {
3672 env->features[w] = def->features[w];
3673 }
3674
3675 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3676 cpu->legacy_cache = !def->cache_info;
3677
3678 /* Special cases not set in the X86CPUDefinition structs: */
3679 /* TODO: in-kernel irqchip for hvf */
3680 if (kvm_enabled()) {
3681 if (!kvm_irqchip_in_kernel()) {
3682 x86_cpu_change_kvm_default("x2apic", "off");
3683 }
3684
3685 x86_cpu_apply_props(cpu, kvm_default_props);
3686 } else if (tcg_enabled()) {
3687 x86_cpu_apply_props(cpu, tcg_default_props);
3688 }
3689
3690 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3691
3692 /* sysenter isn't supported in compatibility mode on AMD,
3693 * syscall isn't supported in compatibility mode on Intel.
3694 * Normally we advertise the actual CPU vendor, but you can
3695 * override this using the 'vendor' property if you want to use
3696 * KVM's sysenter/syscall emulation in compatibility mode and
3697 * when doing cross vendor migration
3698 */
3699 vendor = def->vendor;
3700 if (accel_uses_host_cpuid()) {
3701 uint32_t ebx = 0, ecx = 0, edx = 0;
3702 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3703 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3704 vendor = host_vendor;
3705 }
3706
3707 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3708
3709}
3710
3711/* Return a QDict containing keys for all properties that can be included
3712 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3713 * must be included in the dictionary.
3714 */
3715static QDict *x86_cpu_static_props(void)
3716{
3717 FeatureWord w;
3718 int i;
3719 static const char *props[] = {
3720 "min-level",
3721 "min-xlevel",
3722 "family",
3723 "model",
3724 "stepping",
3725 "model-id",
3726 "vendor",
3727 "lmce",
3728 NULL,
3729 };
3730 static QDict *d;
3731
3732 if (d) {
3733 return d;
3734 }
3735
3736 d = qdict_new();
3737 for (i = 0; props[i]; i++) {
3738 qdict_put_null(d, props[i]);
3739 }
3740
3741 for (w = 0; w < FEATURE_WORDS; w++) {
3742 FeatureWordInfo *fi = &feature_word_info[w];
3743 int bit;
3744 for (bit = 0; bit < 32; bit++) {
3745 if (!fi->feat_names[bit]) {
3746 continue;
3747 }
3748 qdict_put_null(d, fi->feat_names[bit]);
3749 }
3750 }
3751
3752 return d;
3753}
3754
3755/* Add an entry to @props dict, with the value for property. */
3756static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3757{
3758 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3759 &error_abort);
3760
3761 qdict_put_obj(props, prop, value);
3762}
3763
3764/* Convert CPU model data from X86CPU object to a property dictionary
3765 * that can recreate exactly the same CPU model.
3766 */
3767static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3768{
3769 QDict *sprops = x86_cpu_static_props();
3770 const QDictEntry *e;
3771
3772 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3773 const char *prop = qdict_entry_key(e);
3774 x86_cpu_expand_prop(cpu, props, prop);
3775 }
3776}
3777
3778/* Convert CPU model data from X86CPU object to a property dictionary
3779 * that can recreate exactly the same CPU model, including every
3780 * writeable QOM property.
3781 */
3782static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3783{
3784 ObjectPropertyIterator iter;
3785 ObjectProperty *prop;
3786
3787 object_property_iter_init(&iter, OBJECT(cpu));
3788 while ((prop = object_property_iter_next(&iter))) {
3789 /* skip read-only or write-only properties */
3790 if (!prop->get || !prop->set) {
3791 continue;
3792 }
3793
3794 /* "hotplugged" is the only property that is configurable
3795 * on the command-line but will be set differently on CPUs
3796 * created using "-cpu ... -smp ..." and by CPUs created
3797 * on the fly by x86_cpu_from_model() for querying. Skip it.
3798 */
3799 if (!strcmp(prop->name, "hotplugged")) {
3800 continue;
3801 }
3802 x86_cpu_expand_prop(cpu, props, prop->name);
3803 }
3804}
3805
3806static void object_apply_props(Object *obj, QDict *props, Error **errp)
3807{
3808 const QDictEntry *prop;
3809 Error *err = NULL;
3810
3811 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3812 object_property_set_qobject(obj, qdict_entry_value(prop),
3813 qdict_entry_key(prop), &err);
3814 if (err) {
3815 break;
3816 }
3817 }
3818
3819 error_propagate(errp, err);
3820}
3821
3822/* Create X86CPU object according to model+props specification */
3823static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3824{
3825 X86CPU *xc = NULL;
3826 X86CPUClass *xcc;
3827 Error *err = NULL;
3828
3829 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3830 if (xcc == NULL) {
3831 error_setg(&err, "CPU model '%s' not found", model);
3832 goto out;
3833 }
3834
3835 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3836 if (props) {
3837 object_apply_props(OBJECT(xc), props, &err);
3838 if (err) {
3839 goto out;
3840 }
3841 }
3842
3843 x86_cpu_expand_features(xc, &err);
3844 if (err) {
3845 goto out;
3846 }
3847
3848out:
3849 if (err) {
3850 error_propagate(errp, err);
3851 object_unref(OBJECT(xc));
3852 xc = NULL;
3853 }
3854 return xc;
3855}
3856
3857CpuModelExpansionInfo *
3858arch_query_cpu_model_expansion(CpuModelExpansionType type,
3859 CpuModelInfo *model,
3860 Error **errp)
3861{
3862 X86CPU *xc = NULL;
3863 Error *err = NULL;
3864 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3865 QDict *props = NULL;
3866 const char *base_name;
3867
3868 xc = x86_cpu_from_model(model->name,
3869 model->has_props ?
3870 qobject_to(QDict, model->props) :
3871 NULL, &err);
3872 if (err) {
3873 goto out;
3874 }
3875
3876 props = qdict_new();
3877
3878 switch (type) {
3879 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3880 /* Static expansion will be based on "base" only */
3881 base_name = "base";
3882 x86_cpu_to_dict(xc, props);
3883 break;
3884 case CPU_MODEL_EXPANSION_TYPE_FULL:
3885 /* As we don't return every single property, full expansion needs
3886 * to keep the original model name+props, and add extra
3887 * properties on top of that.
3888 */
3889 base_name = model->name;
3890 x86_cpu_to_dict_full(xc, props);
3891 break;
3892 default:
3893 error_setg(&err, "Unsupportted expansion type");
3894 goto out;
3895 }
3896
3897 if (!props) {
3898 props = qdict_new();
3899 }
3900 x86_cpu_to_dict(xc, props);
3901
3902 ret->model = g_new0(CpuModelInfo, 1);
3903 ret->model->name = g_strdup(base_name);
3904 ret->model->props = QOBJECT(props);
3905 ret->model->has_props = true;
3906
3907out:
3908 object_unref(OBJECT(xc));
3909 if (err) {
3910 error_propagate(errp, err);
3911 qapi_free_CpuModelExpansionInfo(ret);
3912 ret = NULL;
3913 }
3914 return ret;
3915}
3916
3917static gchar *x86_gdb_arch_name(CPUState *cs)
3918{
3919#ifdef TARGET_X86_64
3920 return g_strdup("i386:x86-64");
3921#else
3922 return g_strdup("i386");
3923#endif
3924}
3925
3926static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3927{
3928 X86CPUDefinition *cpudef = data;
3929 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3930
3931 xcc->cpu_def = cpudef;
3932 xcc->migration_safe = true;
3933}
3934
3935static void x86_register_cpudef_type(X86CPUDefinition *def)
3936{
3937 char *typename = x86_cpu_type_name(def->name);
3938 TypeInfo ti = {
3939 .name = typename,
3940 .parent = TYPE_X86_CPU,
3941 .class_init = x86_cpu_cpudef_class_init,
3942 .class_data = def,
3943 };
3944
3945 /* AMD aliases are handled at runtime based on CPUID vendor, so
3946 * they shouldn't be set on the CPU model table.
3947 */
3948 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3949 /* catch mistakes instead of silently truncating model_id when too long */
3950 assert(def->model_id && strlen(def->model_id) <= 48);
3951
3952
3953 type_register(&ti);
3954 g_free(typename);
3955}
3956
3957#if !defined(CONFIG_USER_ONLY)
3958
3959void cpu_clear_apic_feature(CPUX86State *env)
3960{
3961 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3962}
3963
3964#endif /* !CONFIG_USER_ONLY */
3965
3966void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3967 uint32_t *eax, uint32_t *ebx,
3968 uint32_t *ecx, uint32_t *edx)
3969{
3970 X86CPU *cpu = x86_env_get_cpu(env);
3971 CPUState *cs = CPU(cpu);
3972 uint32_t pkg_offset;
3973 uint32_t limit;
3974 uint32_t signature[3];
3975
3976 /* Calculate & apply limits for different index ranges */
3977 if (index >= 0xC0000000) {
3978 limit = env->cpuid_xlevel2;
3979 } else if (index >= 0x80000000) {
3980 limit = env->cpuid_xlevel;
3981 } else if (index >= 0x40000000) {
3982 limit = 0x40000001;
3983 } else {
3984 limit = env->cpuid_level;
3985 }
3986
3987 if (index > limit) {
3988 /* Intel documentation states that invalid EAX input will
3989 * return the same information as EAX=cpuid_level
3990 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3991 */
3992 index = env->cpuid_level;
3993 }
3994
3995 switch(index) {
3996 case 0:
3997 *eax = env->cpuid_level;
3998 *ebx = env->cpuid_vendor1;
3999 *edx = env->cpuid_vendor2;
4000 *ecx = env->cpuid_vendor3;
4001 break;
4002 case 1:
4003 *eax = env->cpuid_version;
4004 *ebx = (cpu->apic_id << 24) |
4005 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4006 *ecx = env->features[FEAT_1_ECX];
4007 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4008 *ecx |= CPUID_EXT_OSXSAVE;
4009 }
4010 *edx = env->features[FEAT_1_EDX];
4011 if (cs->nr_cores * cs->nr_threads > 1) {
4012 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4013 *edx |= CPUID_HT;
4014 }
4015 break;
4016 case 2:
4017 /* cache info: needed for Pentium Pro compatibility */
4018 if (cpu->cache_info_passthrough) {
4019 host_cpuid(index, 0, eax, ebx, ecx, edx);
4020 break;
4021 }
4022 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4023 *ebx = 0;
4024 if (!cpu->enable_l3_cache) {
4025 *ecx = 0;
4026 } else {
4027 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4028 }
4029 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4030 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4031 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4032 break;
4033 case 4:
4034 /* cache info: needed for Core compatibility */
4035 if (cpu->cache_info_passthrough) {
4036 host_cpuid(index, count, eax, ebx, ecx, edx);
4037 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4038 *eax &= ~0xFC000000;
4039 if ((*eax & 31) && cs->nr_cores > 1) {
4040 *eax |= (cs->nr_cores - 1) << 26;
4041 }
4042 } else {
4043 *eax = 0;
4044 switch (count) {
4045 case 0: /* L1 dcache info */
4046 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4047 1, cs->nr_cores,
4048 eax, ebx, ecx, edx);
4049 break;
4050 case 1: /* L1 icache info */
4051 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4052 1, cs->nr_cores,
4053 eax, ebx, ecx, edx);
4054 break;
4055 case 2: /* L2 cache info */
4056 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4057 cs->nr_threads, cs->nr_cores,
4058 eax, ebx, ecx, edx);
4059 break;
4060 case 3: /* L3 cache info */
4061 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4062 if (cpu->enable_l3_cache) {
4063 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4064 (1 << pkg_offset), cs->nr_cores,
4065 eax, ebx, ecx, edx);
4066 break;
4067 }
4068 /* fall through */
4069 default: /* end of info */
4070 *eax = *ebx = *ecx = *edx = 0;
4071 break;
4072 }
4073 }
4074 break;
4075 case 5:
4076 /* MONITOR/MWAIT Leaf */
4077 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4078 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4079 *ecx = cpu->mwait.ecx; /* flags */
4080 *edx = cpu->mwait.edx; /* mwait substates */
4081 break;
4082 case 6:
4083 /* Thermal and Power Leaf */
4084 *eax = env->features[FEAT_6_EAX];
4085 *ebx = 0;
4086 *ecx = 0;
4087 *edx = 0;
4088 break;
4089 case 7:
4090 /* Structured Extended Feature Flags Enumeration Leaf */
4091 if (count == 0) {
4092 *eax = 0; /* Maximum ECX value for sub-leaves */
4093 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4094 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4095 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4096 *ecx |= CPUID_7_0_ECX_OSPKE;
4097 }
4098 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4099 } else {
4100 *eax = 0;
4101 *ebx = 0;
4102 *ecx = 0;
4103 *edx = 0;
4104 }
4105 break;
4106 case 9:
4107 /* Direct Cache Access Information Leaf */
4108 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4109 *ebx = 0;
4110 *ecx = 0;
4111 *edx = 0;
4112 break;
4113 case 0xA:
4114 /* Architectural Performance Monitoring Leaf */
4115 if (kvm_enabled() && cpu->enable_pmu) {
4116 KVMState *s = cs->kvm_state;
4117
4118 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4119 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4120 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4121 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4122 } else if (hvf_enabled() && cpu->enable_pmu) {
4123 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4124 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4125 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4126 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4127 } else {
4128 *eax = 0;
4129 *ebx = 0;
4130 *ecx = 0;
4131 *edx = 0;
4132 }
4133 break;
4134 case 0xB:
4135 /* Extended Topology Enumeration Leaf */
4136 if (!cpu->enable_cpuid_0xb) {
4137 *eax = *ebx = *ecx = *edx = 0;
4138 break;
4139 }
4140
4141 *ecx = count & 0xff;
4142 *edx = cpu->apic_id;
4143
4144 switch (count) {
4145 case 0:
4146 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4147 *ebx = cs->nr_threads;
4148 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4149 break;
4150 case 1:
4151 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4152 *ebx = cs->nr_cores * cs->nr_threads;
4153 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4154 break;
4155 default:
4156 *eax = 0;
4157 *ebx = 0;
4158 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4159 }
4160
4161 assert(!(*eax & ~0x1f));
4162 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4163 break;
4164 case 0xD: {
4165 /* Processor Extended State */
4166 *eax = 0;
4167 *ebx = 0;
4168 *ecx = 0;
4169 *edx = 0;
4170 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4171 break;
4172 }
4173
4174 if (count == 0) {
4175 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4176 *eax = env->features[FEAT_XSAVE_COMP_LO];
4177 *edx = env->features[FEAT_XSAVE_COMP_HI];
4178 *ebx = *ecx;
4179 } else if (count == 1) {
4180 *eax = env->features[FEAT_XSAVE];
4181 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4182 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4183 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4184 *eax = esa->size;
4185 *ebx = esa->offset;
4186 }
4187 }
4188 break;
4189 }
4190 case 0x14: {
4191 /* Intel Processor Trace Enumeration */
4192 *eax = 0;
4193 *ebx = 0;
4194 *ecx = 0;
4195 *edx = 0;
4196 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4197 !kvm_enabled()) {
4198 break;
4199 }
4200
4201 if (count == 0) {
4202 *eax = INTEL_PT_MAX_SUBLEAF;
4203 *ebx = INTEL_PT_MINIMAL_EBX;
4204 *ecx = INTEL_PT_MINIMAL_ECX;
4205 } else if (count == 1) {
4206 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4207 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4208 }
4209 break;
4210 }
4211 case 0x40000000:
4212 /*
4213 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4214 * set here, but we restrict to TCG none the less.
4215 */
4216 if (tcg_enabled() && cpu->expose_tcg) {
4217 memcpy(signature, "TCGTCGTCGTCG", 12);
4218 *eax = 0x40000001;
4219 *ebx = signature[0];
4220 *ecx = signature[1];
4221 *edx = signature[2];
4222 } else {
4223 *eax = 0;
4224 *ebx = 0;
4225 *ecx = 0;
4226 *edx = 0;
4227 }
4228 break;
4229 case 0x40000001:
4230 *eax = 0;
4231 *ebx = 0;
4232 *ecx = 0;
4233 *edx = 0;
4234 break;
4235 case 0x80000000:
4236 *eax = env->cpuid_xlevel;
4237 *ebx = env->cpuid_vendor1;
4238 *edx = env->cpuid_vendor2;
4239 *ecx = env->cpuid_vendor3;
4240 break;
4241 case 0x80000001:
4242 *eax = env->cpuid_version;
4243 *ebx = 0;
4244 *ecx = env->features[FEAT_8000_0001_ECX];
4245 *edx = env->features[FEAT_8000_0001_EDX];
4246
4247 /* The Linux kernel checks for the CMPLegacy bit and
4248 * discards multiple thread information if it is set.
4249 * So don't set it here for Intel to make Linux guests happy.
4250 */
4251 if (cs->nr_cores * cs->nr_threads > 1) {
4252 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4253 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4254 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4255 *ecx |= 1 << 1; /* CmpLegacy bit */
4256 }
4257 }
4258 break;
4259 case 0x80000002:
4260 case 0x80000003:
4261 case 0x80000004:
4262 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4263 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4264 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4265 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4266 break;
4267 case 0x80000005:
4268 /* cache info (L1 cache) */
4269 if (cpu->cache_info_passthrough) {
4270 host_cpuid(index, 0, eax, ebx, ecx, edx);
4271 break;
4272 }
4273 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4274 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4275 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4276 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4277 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4278 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4279 break;
4280 case 0x80000006:
4281 /* cache info (L2 cache) */
4282 if (cpu->cache_info_passthrough) {
4283 host_cpuid(index, 0, eax, ebx, ecx, edx);
4284 break;
4285 }
4286 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4287 (L2_DTLB_2M_ENTRIES << 16) | \
4288 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4289 (L2_ITLB_2M_ENTRIES);
4290 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4291 (L2_DTLB_4K_ENTRIES << 16) | \
4292 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4293 (L2_ITLB_4K_ENTRIES);
4294 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4295 cpu->enable_l3_cache ?
4296 env->cache_info_amd.l3_cache : NULL,
4297 ecx, edx);
4298 break;
4299 case 0x80000007:
4300 *eax = 0;
4301 *ebx = 0;
4302 *ecx = 0;
4303 *edx = env->features[FEAT_8000_0007_EDX];
4304 break;
4305 case 0x80000008:
4306 /* virtual & phys address size in low 2 bytes. */
4307 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4308 /* 64 bit processor */
4309 *eax = cpu->phys_bits; /* configurable physical bits */
4310 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4311 *eax |= 0x00003900; /* 57 bits virtual */
4312 } else {
4313 *eax |= 0x00003000; /* 48 bits virtual */
4314 }
4315 } else {
4316 *eax = cpu->phys_bits;
4317 }
4318 *ebx = env->features[FEAT_8000_0008_EBX];
4319 *ecx = 0;
4320 *edx = 0;
4321 if (cs->nr_cores * cs->nr_threads > 1) {
4322 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4323 }
4324 break;
4325 case 0x8000000A:
4326 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4327 *eax = 0x00000001; /* SVM Revision */
4328 *ebx = 0x00000010; /* nr of ASIDs */
4329 *ecx = 0;
4330 *edx = env->features[FEAT_SVM]; /* optional features */
4331 } else {
4332 *eax = 0;
4333 *ebx = 0;
4334 *ecx = 0;
4335 *edx = 0;
4336 }
4337 break;
4338 case 0x8000001D:
4339 *eax = 0;
4340 switch (count) {
4341 case 0: /* L1 dcache info */
4342 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4343 eax, ebx, ecx, edx);
4344 break;
4345 case 1: /* L1 icache info */
4346 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4347 eax, ebx, ecx, edx);
4348 break;
4349 case 2: /* L2 cache info */
4350 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4351 eax, ebx, ecx, edx);
4352 break;
4353 case 3: /* L3 cache info */
4354 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4355 eax, ebx, ecx, edx);
4356 break;
4357 default: /* end of info */
4358 *eax = *ebx = *ecx = *edx = 0;
4359 break;
4360 }
4361 break;
4362 case 0x8000001E:
4363 assert(cpu->core_id <= 255);
4364 encode_topo_cpuid8000001e(cs, cpu,
4365 eax, ebx, ecx, edx);
4366 break;
4367 case 0xC0000000:
4368 *eax = env->cpuid_xlevel2;
4369 *ebx = 0;
4370 *ecx = 0;
4371 *edx = 0;
4372 break;
4373 case 0xC0000001:
4374 /* Support for VIA CPU's CPUID instruction */
4375 *eax = env->cpuid_version;
4376 *ebx = 0;
4377 *ecx = 0;
4378 *edx = env->features[FEAT_C000_0001_EDX];
4379 break;
4380 case 0xC0000002:
4381 case 0xC0000003:
4382 case 0xC0000004:
4383 /* Reserved for the future, and now filled with zero */
4384 *eax = 0;
4385 *ebx = 0;
4386 *ecx = 0;
4387 *edx = 0;
4388 break;
4389 case 0x8000001F:
4390 *eax = sev_enabled() ? 0x2 : 0;
4391 *ebx = sev_get_cbit_position();
4392 *ebx |= sev_get_reduced_phys_bits() << 6;
4393 *ecx = 0;
4394 *edx = 0;
4395 break;
4396 default:
4397 /* reserved values: zero */
4398 *eax = 0;
4399 *ebx = 0;
4400 *ecx = 0;
4401 *edx = 0;
4402 break;
4403 }
4404}
4405
4406/* CPUClass::reset() */
4407static void x86_cpu_reset(CPUState *s)
4408{
4409 X86CPU *cpu = X86_CPU(s);
4410 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4411 CPUX86State *env = &cpu->env;
4412 target_ulong cr4;
4413 uint64_t xcr0;
4414 int i;
4415
4416 xcc->parent_reset(s);
4417
4418 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4419
4420 env->old_exception = -1;
4421
4422 /* init to reset state */
4423
4424 env->hflags2 |= HF2_GIF_MASK;
4425
4426 cpu_x86_update_cr0(env, 0x60000010);
4427 env->a20_mask = ~0x0;
4428 env->smbase = 0x30000;
4429 env->msr_smi_count = 0;
4430
4431 env->idt.limit = 0xffff;
4432 env->gdt.limit = 0xffff;
4433 env->ldt.limit = 0xffff;
4434 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4435 env->tr.limit = 0xffff;
4436 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4437
4438 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4439 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4440 DESC_R_MASK | DESC_A_MASK);
4441 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4442 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4443 DESC_A_MASK);
4444 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4445 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4446 DESC_A_MASK);
4447 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4448 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4449 DESC_A_MASK);
4450 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4451 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4452 DESC_A_MASK);
4453 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4454 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4455 DESC_A_MASK);
4456
4457 env->eip = 0xfff0;
4458 env->regs[R_EDX] = env->cpuid_version;
4459
4460 env->eflags = 0x2;
4461
4462 /* FPU init */
4463 for (i = 0; i < 8; i++) {
4464 env->fptags[i] = 1;
4465 }
4466 cpu_set_fpuc(env, 0x37f);
4467
4468 env->mxcsr = 0x1f80;
4469 /* All units are in INIT state. */
4470 env->xstate_bv = 0;
4471
4472 env->pat = 0x0007040600070406ULL;
4473 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4474
4475 memset(env->dr, 0, sizeof(env->dr));
4476 env->dr[6] = DR6_FIXED_1;
4477 env->dr[7] = DR7_FIXED_1;
4478 cpu_breakpoint_remove_all(s, BP_CPU);
4479 cpu_watchpoint_remove_all(s, BP_CPU);
4480
4481 cr4 = 0;
4482 xcr0 = XSTATE_FP_MASK;
4483
4484#ifdef CONFIG_USER_ONLY
4485 /* Enable all the features for user-mode. */
4486 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4487 xcr0 |= XSTATE_SSE_MASK;
4488 }
4489 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4490 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4491 if (env->features[esa->feature] & esa->bits) {
4492 xcr0 |= 1ull << i;
4493 }
4494 }
4495
4496 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4497 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4498 }
4499 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4500 cr4 |= CR4_FSGSBASE_MASK;
4501 }
4502#endif
4503
4504 env->xcr0 = xcr0;
4505 cpu_x86_update_cr4(env, cr4);
4506
4507 /*
4508 * SDM 11.11.5 requires:
4509 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4510 * - IA32_MTRR_PHYSMASKn.V = 0
4511 * All other bits are undefined. For simplification, zero it all.
4512 */
4513 env->mtrr_deftype = 0;
4514 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4515 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4516
4517 env->interrupt_injected = -1;
4518 env->exception_injected = -1;
4519 env->nmi_injected = false;
4520#if !defined(CONFIG_USER_ONLY)
4521 /* We hard-wire the BSP to the first CPU. */
4522 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4523
4524 s->halted = !cpu_is_bsp(cpu);
4525
4526 if (kvm_enabled()) {
4527 kvm_arch_reset_vcpu(cpu);
4528 }
4529 else if (hvf_enabled()) {
4530 hvf_reset_vcpu(s);
4531 }
4532#endif
4533}
4534
4535#ifndef CONFIG_USER_ONLY
4536bool cpu_is_bsp(X86CPU *cpu)
4537{
4538 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4539}
4540
4541/* TODO: remove me, when reset over QOM tree is implemented */
4542static void x86_cpu_machine_reset_cb(void *opaque)
4543{
4544 X86CPU *cpu = opaque;
4545 cpu_reset(CPU(cpu));
4546}
4547#endif
4548
4549static void mce_init(X86CPU *cpu)
4550{
4551 CPUX86State *cenv = &cpu->env;
4552 unsigned int bank;
4553
4554 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4555 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4556 (CPUID_MCE | CPUID_MCA)) {
4557 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4558 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4559 cenv->mcg_ctl = ~(uint64_t)0;
4560 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4561 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4562 }
4563 }
4564}
4565
4566#ifndef CONFIG_USER_ONLY
4567APICCommonClass *apic_get_class(void)
4568{
4569 const char *apic_type = "apic";
4570
4571 /* TODO: in-kernel irqchip for hvf */
4572 if (kvm_apic_in_kernel()) {
4573 apic_type = "kvm-apic";
4574 } else if (xen_enabled()) {
4575 apic_type = "xen-apic";
4576 }
4577
4578 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4579}
4580
4581static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4582{
4583 APICCommonState *apic;
4584 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4585
4586 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4587
4588 object_property_add_child(OBJECT(cpu), "lapic",
4589 OBJECT(cpu->apic_state), &error_abort);
4590 object_unref(OBJECT(cpu->apic_state));
4591
4592 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4593 /* TODO: convert to link<> */
4594 apic = APIC_COMMON(cpu->apic_state);
4595 apic->cpu = cpu;
4596 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4597}
4598
4599static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4600{
4601 APICCommonState *apic;
4602 static bool apic_mmio_map_once;
4603
4604 if (cpu->apic_state == NULL) {
4605 return;
4606 }
4607 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4608 errp);
4609
4610 /* Map APIC MMIO area */
4611 apic = APIC_COMMON(cpu->apic_state);
4612 if (!apic_mmio_map_once) {
4613 memory_region_add_subregion_overlap(get_system_memory(),
4614 apic->apicbase &
4615 MSR_IA32_APICBASE_BASE,
4616 &apic->io_memory,
4617 0x1000);
4618 apic_mmio_map_once = true;
4619 }
4620}
4621
4622static void x86_cpu_machine_done(Notifier *n, void *unused)
4623{
4624 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4625 MemoryRegion *smram =
4626 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4627
4628 if (smram) {
4629 cpu->smram = g_new(MemoryRegion, 1);
4630 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4631 smram, 0, 1ull << 32);
4632 memory_region_set_enabled(cpu->smram, true);
4633 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4634 }
4635}
4636#else
4637static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4638{
4639}
4640#endif
4641
4642/* Note: Only safe for use on x86(-64) hosts */
4643static uint32_t x86_host_phys_bits(void)
4644{
4645 uint32_t eax;
4646 uint32_t host_phys_bits;
4647
4648 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4649 if (eax >= 0x80000008) {
4650 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4651 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4652 * at 23:16 that can specify a maximum physical address bits for
4653 * the guest that can override this value; but I've not seen
4654 * anything with that set.
4655 */
4656 host_phys_bits = eax & 0xff;
4657 } else {
4658 /* It's an odd 64 bit machine that doesn't have the leaf for
4659 * physical address bits; fall back to 36 that's most older
4660 * Intel.
4661 */
4662 host_phys_bits = 36;
4663 }
4664
4665 return host_phys_bits;
4666}
4667
4668static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4669{
4670 if (*min < value) {
4671 *min = value;
4672 }
4673}
4674
4675/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4676static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4677{
4678 CPUX86State *env = &cpu->env;
4679 FeatureWordInfo *fi = &feature_word_info[w];
4680 uint32_t eax = fi->cpuid_eax;
4681 uint32_t region = eax & 0xF0000000;
4682
4683 if (!env->features[w]) {
4684 return;
4685 }
4686
4687 switch (region) {
4688 case 0x00000000:
4689 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4690 break;
4691 case 0x80000000:
4692 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4693 break;
4694 case 0xC0000000:
4695 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4696 break;
4697 }
4698}
4699
4700/* Calculate XSAVE components based on the configured CPU feature flags */
4701static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4702{
4703 CPUX86State *env = &cpu->env;
4704 int i;
4705 uint64_t mask;
4706
4707 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4708 return;
4709 }
4710
4711 mask = 0;
4712 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4713 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4714 if (env->features[esa->feature] & esa->bits) {
4715 mask |= (1ULL << i);
4716 }
4717 }
4718
4719 env->features[FEAT_XSAVE_COMP_LO] = mask;
4720 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4721}
4722
4723/***** Steps involved on loading and filtering CPUID data
4724 *
4725 * When initializing and realizing a CPU object, the steps
4726 * involved in setting up CPUID data are:
4727 *
4728 * 1) Loading CPU model definition (X86CPUDefinition). This is
4729 * implemented by x86_cpu_load_def() and should be completely
4730 * transparent, as it is done automatically by instance_init.
4731 * No code should need to look at X86CPUDefinition structs
4732 * outside instance_init.
4733 *
4734 * 2) CPU expansion. This is done by realize before CPUID
4735 * filtering, and will make sure host/accelerator data is
4736 * loaded for CPU models that depend on host capabilities
4737 * (e.g. "host"). Done by x86_cpu_expand_features().
4738 *
4739 * 3) CPUID filtering. This initializes extra data related to
4740 * CPUID, and checks if the host supports all capabilities
4741 * required by the CPU. Runnability of a CPU model is
4742 * determined at this step. Done by x86_cpu_filter_features().
4743 *
4744 * Some operations don't require all steps to be performed.
4745 * More precisely:
4746 *
4747 * - CPU instance creation (instance_init) will run only CPU
4748 * model loading. CPU expansion can't run at instance_init-time
4749 * because host/accelerator data may be not available yet.
4750 * - CPU realization will perform both CPU model expansion and CPUID
4751 * filtering, and return an error in case one of them fails.
4752 * - query-cpu-definitions needs to run all 3 steps. It needs
4753 * to run CPUID filtering, as the 'unavailable-features'
4754 * field is set based on the filtering results.
4755 * - The query-cpu-model-expansion QMP command only needs to run
4756 * CPU model loading and CPU expansion. It should not filter
4757 * any CPUID data based on host capabilities.
4758 */
4759
4760/* Expand CPU configuration data, based on configured features
4761 * and host/accelerator capabilities when appropriate.
4762 */
4763static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4764{
4765 CPUX86State *env = &cpu->env;
4766 FeatureWord w;
4767 GList *l;
4768 Error *local_err = NULL;
4769
4770 /*TODO: Now cpu->max_features doesn't overwrite features
4771 * set using QOM properties, and we can convert
4772 * plus_features & minus_features to global properties
4773 * inside x86_cpu_parse_featurestr() too.
4774 */
4775 if (cpu->max_features) {
4776 for (w = 0; w < FEATURE_WORDS; w++) {
4777 /* Override only features that weren't set explicitly
4778 * by the user.
4779 */
4780 env->features[w] |=
4781 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4782 ~env->user_features[w] & \
4783 ~feature_word_info[w].no_autoenable_flags;
4784 }
4785 }
4786
4787 for (l = plus_features; l; l = l->next) {
4788 const char *prop = l->data;
4789 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4790 if (local_err) {
4791 goto out;
4792 }
4793 }
4794
4795 for (l = minus_features; l; l = l->next) {
4796 const char *prop = l->data;
4797 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4798 if (local_err) {
4799 goto out;
4800 }
4801 }
4802
4803 if (!kvm_enabled() || !cpu->expose_kvm) {
4804 env->features[FEAT_KVM] = 0;
4805 }
4806
4807 x86_cpu_enable_xsave_components(cpu);
4808
4809 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4810 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4811 if (cpu->full_cpuid_auto_level) {
4812 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4813 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4814 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4815 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4816 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4817 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4818 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4819 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4820 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4821 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4822 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4823 /* SVM requires CPUID[0x8000000A] */
4824 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4825 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4826 }
4827
4828 /* SEV requires CPUID[0x8000001F] */
4829 if (sev_enabled()) {
4830 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4831 }
4832 }
4833
4834 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4835 if (env->cpuid_level == UINT32_MAX) {
4836 env->cpuid_level = env->cpuid_min_level;
4837 }
4838 if (env->cpuid_xlevel == UINT32_MAX) {
4839 env->cpuid_xlevel = env->cpuid_min_xlevel;
4840 }
4841 if (env->cpuid_xlevel2 == UINT32_MAX) {
4842 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4843 }
4844
4845out:
4846 if (local_err != NULL) {
4847 error_propagate(errp, local_err);
4848 }
4849}
4850
4851/*
4852 * Finishes initialization of CPUID data, filters CPU feature
4853 * words based on host availability of each feature.
4854 *
4855 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4856 */
4857static int x86_cpu_filter_features(X86CPU *cpu)
4858{
4859 CPUX86State *env = &cpu->env;
4860 FeatureWord w;
4861 int rv = 0;
4862
4863 for (w = 0; w < FEATURE_WORDS; w++) {
4864 uint32_t host_feat =
4865 x86_cpu_get_supported_feature_word(w, false);
4866 uint32_t requested_features = env->features[w];
4867 env->features[w] &= host_feat;
4868 cpu->filtered_features[w] = requested_features & ~env->features[w];
4869 if (cpu->filtered_features[w]) {
4870 rv = 1;
4871 }
4872 }
4873
4874 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4875 kvm_enabled()) {
4876 KVMState *s = CPU(cpu)->kvm_state;
4877 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4878 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4879 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4880 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4881 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4882
4883 if (!eax_0 ||
4884 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4885 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4886 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4887 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4888 INTEL_PT_ADDR_RANGES_NUM) ||
4889 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4890 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4891 (ecx_0 & INTEL_PT_IP_LIP)) {
4892 /*
4893 * Processor Trace capabilities aren't configurable, so if the
4894 * host can't emulate the capabilities we report on
4895 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4896 */
4897 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4898 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4899 rv = 1;
4900 }
4901 }
4902
4903 return rv;
4904}
4905
4906#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4907 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4908 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4909#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4910 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4911 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4912static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4913{
4914 CPUState *cs = CPU(dev);
4915 X86CPU *cpu = X86_CPU(dev);
4916 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4917 CPUX86State *env = &cpu->env;
4918 Error *local_err = NULL;
4919 static bool ht_warned;
4920
4921 if (xcc->host_cpuid_required) {
4922 if (!accel_uses_host_cpuid()) {
4923 char *name = x86_cpu_class_get_model_name(xcc);
4924 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4925 g_free(name);
4926 goto out;
4927 }
4928
4929 if (enable_cpu_pm) {
4930 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
4931 &cpu->mwait.ecx, &cpu->mwait.edx);
4932 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
4933 }
4934 }
4935
4936 /* mwait extended info: needed for Core compatibility */
4937 /* We always wake on interrupt even if host does not have the capability */
4938 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
4939
4940 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4941 error_setg(errp, "apic-id property was not initialized properly");
4942 return;
4943 }
4944
4945 x86_cpu_expand_features(cpu, &local_err);
4946 if (local_err) {
4947 goto out;
4948 }
4949
4950 if (x86_cpu_filter_features(cpu) &&
4951 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4952 x86_cpu_report_filtered_features(cpu);
4953 if (cpu->enforce_cpuid) {
4954 error_setg(&local_err,
4955 accel_uses_host_cpuid() ?
4956 "Host doesn't support requested features" :
4957 "TCG doesn't support requested features");
4958 goto out;
4959 }
4960 }
4961
4962 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4963 * CPUID[1].EDX.
4964 */
4965 if (IS_AMD_CPU(env)) {
4966 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4967 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4968 & CPUID_EXT2_AMD_ALIASES);
4969 }
4970
4971 /* For 64bit systems think about the number of physical bits to present.
4972 * ideally this should be the same as the host; anything other than matching
4973 * the host can cause incorrect guest behaviour.
4974 * QEMU used to pick the magic value of 40 bits that corresponds to
4975 * consumer AMD devices but nothing else.
4976 */
4977 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4978 if (accel_uses_host_cpuid()) {
4979 uint32_t host_phys_bits = x86_host_phys_bits();
4980 static bool warned;
4981
4982 if (cpu->host_phys_bits) {
4983 /* The user asked for us to use the host physical bits */
4984 cpu->phys_bits = host_phys_bits;
4985 }
4986
4987 /* Print a warning if the user set it to a value that's not the
4988 * host value.
4989 */
4990 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4991 !warned) {
4992 warn_report("Host physical bits (%u)"
4993 " does not match phys-bits property (%u)",
4994 host_phys_bits, cpu->phys_bits);
4995 warned = true;
4996 }
4997
4998 if (cpu->phys_bits &&
4999 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5000 cpu->phys_bits < 32)) {
5001 error_setg(errp, "phys-bits should be between 32 and %u "
5002 " (but is %u)",
5003 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5004 return;
5005 }
5006 } else {
5007 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5008 error_setg(errp, "TCG only supports phys-bits=%u",
5009 TCG_PHYS_ADDR_BITS);
5010 return;
5011 }
5012 }
5013 /* 0 means it was not explicitly set by the user (or by machine
5014 * compat_props or by the host code above). In this case, the default
5015 * is the value used by TCG (40).
5016 */
5017 if (cpu->phys_bits == 0) {
5018 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5019 }
5020 } else {
5021 /* For 32 bit systems don't use the user set value, but keep
5022 * phys_bits consistent with what we tell the guest.
5023 */
5024 if (cpu->phys_bits != 0) {
5025 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5026 return;
5027 }
5028
5029 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5030 cpu->phys_bits = 36;
5031 } else {
5032 cpu->phys_bits = 32;
5033 }
5034 }
5035
5036 /* Cache information initialization */
5037 if (!cpu->legacy_cache) {
5038 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5039 char *name = x86_cpu_class_get_model_name(xcc);
5040 error_setg(errp,
5041 "CPU model '%s' doesn't support legacy-cache=off", name);
5042 g_free(name);
5043 return;
5044 }
5045 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5046 *xcc->cpu_def->cache_info;
5047 } else {
5048 /* Build legacy cache information */
5049 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5050 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5051 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5052 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5053
5054 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5055 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5056 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5057 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5058
5059 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5060 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5061 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5062 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5063 }
5064
5065
5066 cpu_exec_realizefn(cs, &local_err);
5067 if (local_err != NULL) {
5068 error_propagate(errp, local_err);
5069 return;
5070 }
5071
5072#ifndef CONFIG_USER_ONLY
5073 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5074
5075 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
5076 x86_cpu_apic_create(cpu, &local_err);
5077 if (local_err != NULL) {
5078 goto out;
5079 }
5080 }
5081#endif
5082
5083 mce_init(cpu);
5084
5085#ifndef CONFIG_USER_ONLY
5086 if (tcg_enabled()) {
5087 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5088 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5089
5090 /* Outer container... */
5091 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5092 memory_region_set_enabled(cpu->cpu_as_root, true);
5093
5094 /* ... with two regions inside: normal system memory with low
5095 * priority, and...
5096 */
5097 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5098 get_system_memory(), 0, ~0ull);
5099 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5100 memory_region_set_enabled(cpu->cpu_as_mem, true);
5101
5102 cs->num_ases = 2;
5103 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5104 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5105
5106 /* ... SMRAM with higher priority, linked from /machine/smram. */
5107 cpu->machine_done.notify = x86_cpu_machine_done;
5108 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5109 }
5110#endif
5111
5112 qemu_init_vcpu(cs);
5113
5114 /*
5115 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5116 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5117 * based on inputs (sockets,cores,threads), it is still better to give
5118 * users a warning.
5119 *
5120 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5121 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5122 */
5123 if (IS_AMD_CPU(env) &&
5124 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5125 cs->nr_threads > 1 && !ht_warned) {
5126 error_report("This family of AMD CPU doesn't support "
5127 "hyperthreading(%d). Please configure -smp "
5128 "options properly or try enabling topoext feature.",
5129 cs->nr_threads);
5130 ht_warned = true;
5131 }
5132
5133 x86_cpu_apic_realize(cpu, &local_err);
5134 if (local_err != NULL) {
5135 goto out;
5136 }
5137 cpu_reset(cs);
5138
5139 xcc->parent_realize(dev, &local_err);
5140
5141out:
5142 if (local_err != NULL) {
5143 error_propagate(errp, local_err);
5144 return;
5145 }
5146}
5147
5148static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5149{
5150 X86CPU *cpu = X86_CPU(dev);
5151 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5152 Error *local_err = NULL;
5153
5154#ifndef CONFIG_USER_ONLY
5155 cpu_remove_sync(CPU(dev));
5156 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5157#endif
5158
5159 if (cpu->apic_state) {
5160 object_unparent(OBJECT(cpu->apic_state));
5161 cpu->apic_state = NULL;
5162 }
5163
5164 xcc->parent_unrealize(dev, &local_err);
5165 if (local_err != NULL) {
5166 error_propagate(errp, local_err);
5167 return;
5168 }
5169}
5170
5171typedef struct BitProperty {
5172 FeatureWord w;
5173 uint32_t mask;
5174} BitProperty;
5175
5176static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5177 void *opaque, Error **errp)
5178{
5179 X86CPU *cpu = X86_CPU(obj);
5180 BitProperty *fp = opaque;
5181 uint32_t f = cpu->env.features[fp->w];
5182 bool value = (f & fp->mask) == fp->mask;
5183 visit_type_bool(v, name, &value, errp);
5184}
5185
5186static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5187 void *opaque, Error **errp)
5188{
5189 DeviceState *dev = DEVICE(obj);
5190 X86CPU *cpu = X86_CPU(obj);
5191 BitProperty *fp = opaque;
5192 Error *local_err = NULL;
5193 bool value;
5194
5195 if (dev->realized) {
5196 qdev_prop_set_after_realize(dev, name, errp);
5197 return;
5198 }
5199
5200 visit_type_bool(v, name, &value, &local_err);
5201 if (local_err) {
5202 error_propagate(errp, local_err);
5203 return;
5204 }
5205
5206 if (value) {
5207 cpu->env.features[fp->w] |= fp->mask;
5208 } else {
5209 cpu->env.features[fp->w] &= ~fp->mask;
5210 }
5211 cpu->env.user_features[fp->w] |= fp->mask;
5212}
5213
5214static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5215 void *opaque)
5216{
5217 BitProperty *prop = opaque;
5218 g_free(prop);
5219}
5220
5221/* Register a boolean property to get/set a single bit in a uint32_t field.
5222 *
5223 * The same property name can be registered multiple times to make it affect
5224 * multiple bits in the same FeatureWord. In that case, the getter will return
5225 * true only if all bits are set.
5226 */
5227static void x86_cpu_register_bit_prop(X86CPU *cpu,
5228 const char *prop_name,
5229 FeatureWord w,
5230 int bitnr)
5231{
5232 BitProperty *fp;
5233 ObjectProperty *op;
5234 uint32_t mask = (1UL << bitnr);
5235
5236 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5237 if (op) {
5238 fp = op->opaque;
5239 assert(fp->w == w);
5240 fp->mask |= mask;
5241 } else {
5242 fp = g_new0(BitProperty, 1);
5243 fp->w = w;
5244 fp->mask = mask;
5245 object_property_add(OBJECT(cpu), prop_name, "bool",
5246 x86_cpu_get_bit_prop,
5247 x86_cpu_set_bit_prop,
5248 x86_cpu_release_bit_prop, fp, &error_abort);
5249 }
5250}
5251
5252static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5253 FeatureWord w,
5254 int bitnr)
5255{
5256 FeatureWordInfo *fi = &feature_word_info[w];
5257 const char *name = fi->feat_names[bitnr];
5258
5259 if (!name) {
5260 return;
5261 }
5262
5263 /* Property names should use "-" instead of "_".
5264 * Old names containing underscores are registered as aliases
5265 * using object_property_add_alias()
5266 */
5267 assert(!strchr(name, '_'));
5268 /* aliases don't use "|" delimiters anymore, they are registered
5269 * manually using object_property_add_alias() */
5270 assert(!strchr(name, '|'));
5271 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5272}
5273
5274static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5275{
5276 X86CPU *cpu = X86_CPU(cs);
5277 CPUX86State *env = &cpu->env;
5278 GuestPanicInformation *panic_info = NULL;
5279
5280 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5281 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5282
5283 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5284
5285 assert(HV_CRASH_PARAMS >= 5);
5286 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5287 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5288 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5289 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5290 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5291 }
5292
5293 return panic_info;
5294}
5295static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5296 const char *name, void *opaque,
5297 Error **errp)
5298{
5299 CPUState *cs = CPU(obj);
5300 GuestPanicInformation *panic_info;
5301
5302 if (!cs->crash_occurred) {
5303 error_setg(errp, "No crash occured");
5304 return;
5305 }
5306
5307 panic_info = x86_cpu_get_crash_info(cs);
5308 if (panic_info == NULL) {
5309 error_setg(errp, "No crash information");
5310 return;
5311 }
5312
5313 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5314 errp);
5315 qapi_free_GuestPanicInformation(panic_info);
5316}
5317
5318static void x86_cpu_initfn(Object *obj)
5319{
5320 CPUState *cs = CPU(obj);
5321 X86CPU *cpu = X86_CPU(obj);
5322 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5323 CPUX86State *env = &cpu->env;
5324 FeatureWord w;
5325
5326 cs->env_ptr = env;
5327
5328 object_property_add(obj, "family", "int",
5329 x86_cpuid_version_get_family,
5330 x86_cpuid_version_set_family, NULL, NULL, NULL);
5331 object_property_add(obj, "model", "int",
5332 x86_cpuid_version_get_model,
5333 x86_cpuid_version_set_model, NULL, NULL, NULL);
5334 object_property_add(obj, "stepping", "int",
5335 x86_cpuid_version_get_stepping,
5336 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5337 object_property_add_str(obj, "vendor",
5338 x86_cpuid_get_vendor,
5339 x86_cpuid_set_vendor, NULL);
5340 object_property_add_str(obj, "model-id",
5341 x86_cpuid_get_model_id,
5342 x86_cpuid_set_model_id, NULL);
5343 object_property_add(obj, "tsc-frequency", "int",
5344 x86_cpuid_get_tsc_freq,
5345 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5346 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5347 x86_cpu_get_feature_words,
5348 NULL, NULL, (void *)env->features, NULL);
5349 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5350 x86_cpu_get_feature_words,
5351 NULL, NULL, (void *)cpu->filtered_features, NULL);
5352
5353 object_property_add(obj, "crash-information", "GuestPanicInformation",
5354 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5355
5356 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5357
5358 for (w = 0; w < FEATURE_WORDS; w++) {
5359 int bitnr;
5360
5361 for (bitnr = 0; bitnr < 32; bitnr++) {
5362 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5363 }
5364 }
5365
5366 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5367 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5368 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5369 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5370 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5371 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5372 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5373
5374 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5375 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5376 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5377 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5378 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5379 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5380 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5381 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5382 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5383 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5384 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5385 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5386 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5387 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5388 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5389 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5390 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5391 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5392 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5393 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5394 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5395
5396 if (xcc->cpu_def) {
5397 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5398 }
5399}
5400
5401static int64_t x86_cpu_get_arch_id(CPUState *cs)
5402{
5403 X86CPU *cpu = X86_CPU(cs);
5404
5405 return cpu->apic_id;
5406}
5407
5408static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5409{
5410 X86CPU *cpu = X86_CPU(cs);
5411
5412 return cpu->env.cr[0] & CR0_PG_MASK;
5413}
5414
5415static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5416{
5417 X86CPU *cpu = X86_CPU(cs);
5418
5419 cpu->env.eip = value;
5420}
5421
5422static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5423{
5424 X86CPU *cpu = X86_CPU(cs);
5425
5426 cpu->env.eip = tb->pc - tb->cs_base;
5427}
5428
5429static bool x86_cpu_has_work(CPUState *cs)
5430{
5431 X86CPU *cpu = X86_CPU(cs);
5432 CPUX86State *env = &cpu->env;
5433
5434 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5435 CPU_INTERRUPT_POLL)) &&
5436 (env->eflags & IF_MASK)) ||
5437 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5438 CPU_INTERRUPT_INIT |
5439 CPU_INTERRUPT_SIPI |
5440 CPU_INTERRUPT_MCE)) ||
5441 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5442 !(env->hflags & HF_SMM_MASK));
5443}
5444
5445static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5446{
5447 X86CPU *cpu = X86_CPU(cs);
5448 CPUX86State *env = &cpu->env;
5449
5450 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5451 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5452 : bfd_mach_i386_i8086);
5453 info->print_insn = print_insn_i386;
5454
5455 info->cap_arch = CS_ARCH_X86;
5456 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5457 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5458 : CS_MODE_16);
5459 info->cap_insn_unit = 1;
5460 info->cap_insn_split = 8;
5461}
5462
5463void x86_update_hflags(CPUX86State *env)
5464{
5465 uint32_t hflags;
5466#define HFLAG_COPY_MASK \
5467 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5468 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5469 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5470 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5471
5472 hflags = env->hflags & HFLAG_COPY_MASK;
5473 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5474 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5475 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5476 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5477 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5478
5479 if (env->cr[4] & CR4_OSFXSR_MASK) {
5480 hflags |= HF_OSFXSR_MASK;
5481 }
5482
5483 if (env->efer & MSR_EFER_LMA) {
5484 hflags |= HF_LMA_MASK;
5485 }
5486
5487 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5488 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5489 } else {
5490 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5491 (DESC_B_SHIFT - HF_CS32_SHIFT);
5492 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5493 (DESC_B_SHIFT - HF_SS32_SHIFT);
5494 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5495 !(hflags & HF_CS32_MASK)) {
5496 hflags |= HF_ADDSEG_MASK;
5497 } else {
5498 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5499 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5500 }
5501 }
5502 env->hflags = hflags;
5503}
5504
5505static Property x86_cpu_properties[] = {
5506#ifdef CONFIG_USER_ONLY
5507 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5508 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5509 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5510 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5511 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5512#else
5513 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5514 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5515 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5516 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5517#endif
5518 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5519 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5520 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5521 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5522 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5523 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5524 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5525 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5526 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5527 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5528 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5529 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5530 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5531 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5532 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
5533 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5534 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5535 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5536 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5537 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5538 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5539 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5540 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5541 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5542 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5543 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5544 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5545 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5546 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5547 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5548 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5549 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5550 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5551 false),
5552 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5553 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5554 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5555 true),
5556 /*
5557 * lecacy_cache defaults to true unless the CPU model provides its
5558 * own cache information (see x86_cpu_load_def()).
5559 */
5560 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5561
5562 /*
5563 * From "Requirements for Implementing the Microsoft
5564 * Hypervisor Interface":
5565 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5566 *
5567 * "Starting with Windows Server 2012 and Windows 8, if
5568 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5569 * the hypervisor imposes no specific limit to the number of VPs.
5570 * In this case, Windows Server 2012 guest VMs may use more than
5571 * 64 VPs, up to the maximum supported number of processors applicable
5572 * to the specific Windows version being used."
5573 */
5574 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5575 DEFINE_PROP_END_OF_LIST()
5576};
5577
5578static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5579{
5580 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5581 CPUClass *cc = CPU_CLASS(oc);
5582 DeviceClass *dc = DEVICE_CLASS(oc);
5583
5584 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5585 &xcc->parent_realize);
5586 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5587 &xcc->parent_unrealize);
5588 dc->props = x86_cpu_properties;
5589
5590 xcc->parent_reset = cc->reset;
5591 cc->reset = x86_cpu_reset;
5592 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5593
5594 cc->class_by_name = x86_cpu_class_by_name;
5595 cc->parse_features = x86_cpu_parse_featurestr;
5596 cc->has_work = x86_cpu_has_work;
5597#ifdef CONFIG_TCG
5598 cc->do_interrupt = x86_cpu_do_interrupt;
5599 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5600#endif
5601 cc->dump_state = x86_cpu_dump_state;
5602 cc->get_crash_info = x86_cpu_get_crash_info;
5603 cc->set_pc = x86_cpu_set_pc;
5604 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5605 cc->gdb_read_register = x86_cpu_gdb_read_register;
5606 cc->gdb_write_register = x86_cpu_gdb_write_register;
5607 cc->get_arch_id = x86_cpu_get_arch_id;
5608 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5609#ifdef CONFIG_USER_ONLY
5610 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5611#else
5612 cc->asidx_from_attrs = x86_asidx_from_attrs;
5613 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5614 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5615 cc->write_elf64_note = x86_cpu_write_elf64_note;
5616 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5617 cc->write_elf32_note = x86_cpu_write_elf32_note;
5618 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5619 cc->vmsd = &vmstate_x86_cpu;
5620#endif
5621 cc->gdb_arch_name = x86_gdb_arch_name;
5622#ifdef TARGET_X86_64
5623 cc->gdb_core_xml_file = "i386-64bit.xml";
5624 cc->gdb_num_core_regs = 57;
5625#else
5626 cc->gdb_core_xml_file = "i386-32bit.xml";
5627 cc->gdb_num_core_regs = 41;
5628#endif
5629#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5630 cc->debug_excp_handler = breakpoint_handler;
5631#endif
5632 cc->cpu_exec_enter = x86_cpu_exec_enter;
5633 cc->cpu_exec_exit = x86_cpu_exec_exit;
5634#ifdef CONFIG_TCG
5635 cc->tcg_initialize = tcg_x86_init;
5636#endif
5637 cc->disas_set_info = x86_disas_set_info;
5638
5639 dc->user_creatable = true;
5640}
5641
5642static const TypeInfo x86_cpu_type_info = {
5643 .name = TYPE_X86_CPU,
5644 .parent = TYPE_CPU,
5645 .instance_size = sizeof(X86CPU),
5646 .instance_init = x86_cpu_initfn,
5647 .abstract = true,
5648 .class_size = sizeof(X86CPUClass),
5649 .class_init = x86_cpu_common_class_init,
5650};
5651
5652
5653/* "base" CPU model, used by query-cpu-model-expansion */
5654static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5655{
5656 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5657
5658 xcc->static_model = true;
5659 xcc->migration_safe = true;
5660 xcc->model_description = "base CPU model type with no features enabled";
5661 xcc->ordering = 8;
5662}
5663
5664static const TypeInfo x86_base_cpu_type_info = {
5665 .name = X86_CPU_TYPE_NAME("base"),
5666 .parent = TYPE_X86_CPU,
5667 .class_init = x86_cpu_base_class_init,
5668};
5669
5670static void x86_cpu_register_types(void)
5671{
5672 int i;
5673
5674 type_register_static(&x86_cpu_type_info);
5675 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5676 x86_register_cpudef_type(&builtin_x86_defs[i]);
5677 }
5678 type_register_static(&max_x86_cpu_type_info);
5679 type_register_static(&x86_base_cpu_type_info);
5680#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5681 type_register_static(&host_x86_cpu_type_info);
5682#endif
5683}
5684
5685type_init(x86_cpu_register_types)