]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/hvf.h"
31 #include "sysemu/cpus.h"
32 #include "kvm_i386.h"
33 #include "sev_i386.h"
34
35 #include "qemu/error-report.h"
36 #include "qemu/module.h"
37 #include "qemu/option.h"
38 #include "qemu/config-file.h"
39 #include "qapi/error.h"
40 #include "qapi/qapi-visit-machine.h"
41 #include "qapi/qapi-visit-run-state.h"
42 #include "qapi/qmp/qdict.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/visitor.h"
45 #include "qom/qom-qobject.h"
46 #include "sysemu/arch_init.h"
47 #include "qapi/qapi-commands-machine-target.h"
48
49 #include "standard-headers/asm-x86/kvm_para.h"
50
51 #include "sysemu/sysemu.h"
52 #include "sysemu/tcg.h"
53 #include "hw/qdev-properties.h"
54 #include "hw/i386/topology.h"
55 #ifndef CONFIG_USER_ONLY
56 #include "exec/address-spaces.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /*
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
348 */
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
357
358 /*
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
361 */
362 static int nodes_in_socket(int nr_cores)
363 {
364 int nodes;
365
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
367
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
370 }
371
372 /*
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
378 */
379 static int cores_in_core_complex(int nr_cores)
380 {
381 int nodes;
382
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
386 }
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
389
390 /*
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
393 */
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
395 }
396
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
401 {
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
405
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
408
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
415 }
416
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
425
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
428
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
432 }
433
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
438 /*
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
441 */
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
447 };
448
449 /*
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
456 */
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
459 {
460 int nodes, cores_in_ccx;
461
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
464
465 cores_in_ccx = cores_in_core_complex(nr_cores);
466
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
471 }
472
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
477 {
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
481
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
484 /*
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
498 */
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
504 }
505 /*
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
512 */
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
517 /*
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
529 */
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
534 }
535 *edx = 0;
536 }
537
538 /*
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
542 */
543
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
555 };
556
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
569 };
570
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
582 };
583
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
596 };
597
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
609 };
610
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
618 };
619
620
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
631 };
632
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
646 };
647
648 /* TLB definitions: */
649
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
654
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
659
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
664
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
669
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
672 /*
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
679 */
680 #define INTEL_PT_MINIMAL_EBX 0xf
681 /*
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
689 */
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
698
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
701 {
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
707 }
708 dst[CPUID_VENDOR_SZ] = '\0';
709 }
710
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
722
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
744
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
750
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_7_1_EAX_FEATURES 0
774 #define TCG_APM_FEATURES 0
775 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
776 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
777 /* missing:
778 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
779
780 typedef enum FeatureWordType {
781 CPUID_FEATURE_WORD,
782 MSR_FEATURE_WORD,
783 } FeatureWordType;
784
785 typedef struct FeatureWordInfo {
786 FeatureWordType type;
787 /* feature flags names are taken from "Intel Processor Identification and
788 * the CPUID Instruction" and AMD's "CPUID Specification".
789 * In cases of disagreement between feature naming conventions,
790 * aliases may be added.
791 */
792 const char *feat_names[64];
793 union {
794 /* If type==CPUID_FEATURE_WORD */
795 struct {
796 uint32_t eax; /* Input EAX for CPUID */
797 bool needs_ecx; /* CPUID instruction uses ECX as input */
798 uint32_t ecx; /* Input ECX value for CPUID */
799 int reg; /* output register (R_* constant) */
800 } cpuid;
801 /* If type==MSR_FEATURE_WORD */
802 struct {
803 uint32_t index;
804 } msr;
805 };
806 uint64_t tcg_features; /* Feature flags supported by TCG */
807 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
808 uint64_t migratable_flags; /* Feature flags known to be migratable */
809 /* Features that shouldn't be auto-enabled by "-cpu host" */
810 uint64_t no_autoenable_flags;
811 } FeatureWordInfo;
812
813 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
814 [FEAT_1_EDX] = {
815 .type = CPUID_FEATURE_WORD,
816 .feat_names = {
817 "fpu", "vme", "de", "pse",
818 "tsc", "msr", "pae", "mce",
819 "cx8", "apic", NULL, "sep",
820 "mtrr", "pge", "mca", "cmov",
821 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
822 NULL, "ds" /* Intel dts */, "acpi", "mmx",
823 "fxsr", "sse", "sse2", "ss",
824 "ht" /* Intel htt */, "tm", "ia64", "pbe",
825 },
826 .cpuid = {.eax = 1, .reg = R_EDX, },
827 .tcg_features = TCG_FEATURES,
828 },
829 [FEAT_1_ECX] = {
830 .type = CPUID_FEATURE_WORD,
831 .feat_names = {
832 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
833 "ds-cpl", "vmx", "smx", "est",
834 "tm2", "ssse3", "cid", NULL,
835 "fma", "cx16", "xtpr", "pdcm",
836 NULL, "pcid", "dca", "sse4.1",
837 "sse4.2", "x2apic", "movbe", "popcnt",
838 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
839 "avx", "f16c", "rdrand", "hypervisor",
840 },
841 .cpuid = { .eax = 1, .reg = R_ECX, },
842 .tcg_features = TCG_EXT_FEATURES,
843 },
844 /* Feature names that are already defined on feature_name[] but
845 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
846 * names on feat_names below. They are copied automatically
847 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
848 */
849 [FEAT_8000_0001_EDX] = {
850 .type = CPUID_FEATURE_WORD,
851 .feat_names = {
852 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
853 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
854 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
855 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
856 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
857 "nx", NULL, "mmxext", NULL /* mmx */,
858 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
859 NULL, "lm", "3dnowext", "3dnow",
860 },
861 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
862 .tcg_features = TCG_EXT2_FEATURES,
863 },
864 [FEAT_8000_0001_ECX] = {
865 .type = CPUID_FEATURE_WORD,
866 .feat_names = {
867 "lahf-lm", "cmp-legacy", "svm", "extapic",
868 "cr8legacy", "abm", "sse4a", "misalignsse",
869 "3dnowprefetch", "osvw", "ibs", "xop",
870 "skinit", "wdt", NULL, "lwp",
871 "fma4", "tce", NULL, "nodeid-msr",
872 NULL, "tbm", "topoext", "perfctr-core",
873 "perfctr-nb", NULL, NULL, NULL,
874 NULL, NULL, NULL, NULL,
875 },
876 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
877 .tcg_features = TCG_EXT3_FEATURES,
878 /*
879 * TOPOEXT is always allowed but can't be enabled blindly by
880 * "-cpu host", as it requires consistent cache topology info
881 * to be provided so it doesn't confuse guests.
882 */
883 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
884 },
885 [FEAT_C000_0001_EDX] = {
886 .type = CPUID_FEATURE_WORD,
887 .feat_names = {
888 NULL, NULL, "xstore", "xstore-en",
889 NULL, NULL, "xcrypt", "xcrypt-en",
890 "ace2", "ace2-en", "phe", "phe-en",
891 "pmm", "pmm-en", NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 },
897 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
898 .tcg_features = TCG_EXT4_FEATURES,
899 },
900 [FEAT_KVM] = {
901 .type = CPUID_FEATURE_WORD,
902 .feat_names = {
903 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
904 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
905 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
906 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 NULL, NULL, NULL, NULL,
909 "kvmclock-stable-bit", NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 },
912 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
913 .tcg_features = TCG_KVM_FEATURES,
914 },
915 [FEAT_KVM_HINTS] = {
916 .type = CPUID_FEATURE_WORD,
917 .feat_names = {
918 "kvm-hint-dedicated", NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 },
927 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
928 .tcg_features = TCG_KVM_FEATURES,
929 /*
930 * KVM hints aren't auto-enabled by -cpu host, they need to be
931 * explicitly enabled in the command-line.
932 */
933 .no_autoenable_flags = ~0U,
934 },
935 /*
936 * .feat_names are commented out for Hyper-V enlightenments because we
937 * don't want to have two different ways for enabling them on QEMU command
938 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
939 * enabling several feature bits simultaneously, exposing these bits
940 * individually may just confuse guests.
941 */
942 [FEAT_HYPERV_EAX] = {
943 .type = CPUID_FEATURE_WORD,
944 .feat_names = {
945 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
946 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
947 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
948 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
949 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
950 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
951 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
952 NULL, NULL,
953 NULL, NULL, NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 },
958 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
959 },
960 [FEAT_HYPERV_EBX] = {
961 .type = CPUID_FEATURE_WORD,
962 .feat_names = {
963 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
964 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
965 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
966 NULL /* hv_create_port */, NULL /* hv_connect_port */,
967 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
968 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
969 NULL, NULL,
970 NULL, NULL, NULL, NULL,
971 NULL, NULL, NULL, NULL,
972 NULL, NULL, NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 },
975 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
976 },
977 [FEAT_HYPERV_EDX] = {
978 .type = CPUID_FEATURE_WORD,
979 .feat_names = {
980 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
981 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
982 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
983 NULL, NULL,
984 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
985 NULL, NULL, NULL, NULL,
986 NULL, NULL, NULL, NULL,
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 },
991 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
992 },
993 [FEAT_HV_RECOMM_EAX] = {
994 .type = CPUID_FEATURE_WORD,
995 .feat_names = {
996 NULL /* hv_recommend_pv_as_switch */,
997 NULL /* hv_recommend_pv_tlbflush_local */,
998 NULL /* hv_recommend_pv_tlbflush_remote */,
999 NULL /* hv_recommend_msr_apic_access */,
1000 NULL /* hv_recommend_msr_reset */,
1001 NULL /* hv_recommend_relaxed_timing */,
1002 NULL /* hv_recommend_dma_remapping */,
1003 NULL /* hv_recommend_int_remapping */,
1004 NULL /* hv_recommend_x2apic_msrs */,
1005 NULL /* hv_recommend_autoeoi_deprecation */,
1006 NULL /* hv_recommend_pv_ipi */,
1007 NULL /* hv_recommend_ex_hypercalls */,
1008 NULL /* hv_hypervisor_is_nested */,
1009 NULL /* hv_recommend_int_mbec */,
1010 NULL /* hv_recommend_evmcs */,
1011 NULL,
1012 NULL, NULL, NULL, NULL,
1013 NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1016 },
1017 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1018 },
1019 [FEAT_HV_NESTED_EAX] = {
1020 .type = CPUID_FEATURE_WORD,
1021 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1022 },
1023 [FEAT_SVM] = {
1024 .type = CPUID_FEATURE_WORD,
1025 .feat_names = {
1026 "npt", "lbrv", "svm-lock", "nrip-save",
1027 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1028 NULL, NULL, "pause-filter", NULL,
1029 "pfthreshold", NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 },
1035 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1036 .tcg_features = TCG_SVM_FEATURES,
1037 },
1038 [FEAT_7_0_EBX] = {
1039 .type = CPUID_FEATURE_WORD,
1040 .feat_names = {
1041 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1042 "hle", "avx2", NULL, "smep",
1043 "bmi2", "erms", "invpcid", "rtm",
1044 NULL, NULL, "mpx", NULL,
1045 "avx512f", "avx512dq", "rdseed", "adx",
1046 "smap", "avx512ifma", "pcommit", "clflushopt",
1047 "clwb", "intel-pt", "avx512pf", "avx512er",
1048 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1049 },
1050 .cpuid = {
1051 .eax = 7,
1052 .needs_ecx = true, .ecx = 0,
1053 .reg = R_EBX,
1054 },
1055 .tcg_features = TCG_7_0_EBX_FEATURES,
1056 },
1057 [FEAT_7_0_ECX] = {
1058 .type = CPUID_FEATURE_WORD,
1059 .feat_names = {
1060 NULL, "avx512vbmi", "umip", "pku",
1061 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
1062 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1063 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1064 "la57", NULL, NULL, NULL,
1065 NULL, NULL, "rdpid", NULL,
1066 NULL, "cldemote", NULL, "movdiri",
1067 "movdir64b", NULL, NULL, NULL,
1068 },
1069 .cpuid = {
1070 .eax = 7,
1071 .needs_ecx = true, .ecx = 0,
1072 .reg = R_ECX,
1073 },
1074 .tcg_features = TCG_7_0_ECX_FEATURES,
1075 },
1076 [FEAT_7_0_EDX] = {
1077 .type = CPUID_FEATURE_WORD,
1078 .feat_names = {
1079 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, "md-clear", NULL,
1082 NULL, NULL, NULL, NULL,
1083 NULL, NULL, NULL /* pconfig */, NULL,
1084 NULL, NULL, NULL, NULL,
1085 NULL, NULL, "spec-ctrl", "stibp",
1086 NULL, "arch-capabilities", "core-capability", "ssbd",
1087 },
1088 .cpuid = {
1089 .eax = 7,
1090 .needs_ecx = true, .ecx = 0,
1091 .reg = R_EDX,
1092 },
1093 .tcg_features = TCG_7_0_EDX_FEATURES,
1094 },
1095 [FEAT_7_1_EAX] = {
1096 .type = CPUID_FEATURE_WORD,
1097 .feat_names = {
1098 NULL, NULL, NULL, NULL,
1099 NULL, "avx512-bf16", NULL, NULL,
1100 NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 NULL, NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 },
1107 .cpuid = {
1108 .eax = 7,
1109 .needs_ecx = true, .ecx = 1,
1110 .reg = R_EAX,
1111 },
1112 .tcg_features = TCG_7_1_EAX_FEATURES,
1113 },
1114 [FEAT_8000_0007_EDX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 "invtsc", NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 NULL, NULL, NULL, NULL,
1124 NULL, NULL, NULL, NULL,
1125 },
1126 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1127 .tcg_features = TCG_APM_FEATURES,
1128 .unmigratable_flags = CPUID_APM_INVTSC,
1129 },
1130 [FEAT_8000_0008_EBX] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "clzero", NULL, "xsaveerptr", NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, "wbnoinvd", NULL, NULL,
1136 "ibpb", NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1140 NULL, NULL, NULL, NULL,
1141 },
1142 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1143 .tcg_features = 0,
1144 .unmigratable_flags = 0,
1145 },
1146 [FEAT_XSAVE] = {
1147 .type = CPUID_FEATURE_WORD,
1148 .feat_names = {
1149 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1150 NULL, NULL, NULL, NULL,
1151 NULL, NULL, NULL, NULL,
1152 NULL, NULL, NULL, NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 },
1158 .cpuid = {
1159 .eax = 0xd,
1160 .needs_ecx = true, .ecx = 1,
1161 .reg = R_EAX,
1162 },
1163 .tcg_features = TCG_XSAVE_FEATURES,
1164 },
1165 [FEAT_6_EAX] = {
1166 .type = CPUID_FEATURE_WORD,
1167 .feat_names = {
1168 NULL, NULL, "arat", NULL,
1169 NULL, NULL, NULL, NULL,
1170 NULL, NULL, NULL, NULL,
1171 NULL, NULL, NULL, NULL,
1172 NULL, NULL, NULL, NULL,
1173 NULL, NULL, NULL, NULL,
1174 NULL, NULL, NULL, NULL,
1175 NULL, NULL, NULL, NULL,
1176 },
1177 .cpuid = { .eax = 6, .reg = R_EAX, },
1178 .tcg_features = TCG_6_EAX_FEATURES,
1179 },
1180 [FEAT_XSAVE_COMP_LO] = {
1181 .type = CPUID_FEATURE_WORD,
1182 .cpuid = {
1183 .eax = 0xD,
1184 .needs_ecx = true, .ecx = 0,
1185 .reg = R_EAX,
1186 },
1187 .tcg_features = ~0U,
1188 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1189 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1190 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1191 XSTATE_PKRU_MASK,
1192 },
1193 [FEAT_XSAVE_COMP_HI] = {
1194 .type = CPUID_FEATURE_WORD,
1195 .cpuid = {
1196 .eax = 0xD,
1197 .needs_ecx = true, .ecx = 0,
1198 .reg = R_EDX,
1199 },
1200 .tcg_features = ~0U,
1201 },
1202 /*Below are MSR exposed features*/
1203 [FEAT_ARCH_CAPABILITIES] = {
1204 .type = MSR_FEATURE_WORD,
1205 .feat_names = {
1206 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1207 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
1208 "taa-no", NULL, NULL, NULL,
1209 NULL, NULL, NULL, NULL,
1210 NULL, NULL, NULL, NULL,
1211 NULL, NULL, NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1214 },
1215 .msr = {
1216 .index = MSR_IA32_ARCH_CAPABILITIES,
1217 },
1218 },
1219 [FEAT_CORE_CAPABILITY] = {
1220 .type = MSR_FEATURE_WORD,
1221 .feat_names = {
1222 NULL, NULL, NULL, NULL,
1223 NULL, "split-lock-detect", NULL, NULL,
1224 NULL, NULL, NULL, NULL,
1225 NULL, NULL, NULL, NULL,
1226 NULL, NULL, NULL, NULL,
1227 NULL, NULL, NULL, NULL,
1228 NULL, NULL, NULL, NULL,
1229 NULL, NULL, NULL, NULL,
1230 },
1231 .msr = {
1232 .index = MSR_IA32_CORE_CAPABILITY,
1233 },
1234 },
1235
1236 [FEAT_VMX_PROCBASED_CTLS] = {
1237 .type = MSR_FEATURE_WORD,
1238 .feat_names = {
1239 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
1240 NULL, NULL, NULL, "vmx-hlt-exit",
1241 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
1242 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
1243 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
1244 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
1245 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
1246 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
1247 },
1248 .msr = {
1249 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1250 }
1251 },
1252
1253 [FEAT_VMX_SECONDARY_CTLS] = {
1254 .type = MSR_FEATURE_WORD,
1255 .feat_names = {
1256 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
1257 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
1258 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
1259 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
1260 "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
1261 "vmx-xsaves", NULL, NULL, NULL,
1262 NULL, NULL, NULL, NULL,
1263 NULL, NULL, NULL, NULL,
1264 },
1265 .msr = {
1266 .index = MSR_IA32_VMX_PROCBASED_CTLS2,
1267 }
1268 },
1269
1270 [FEAT_VMX_PINBASED_CTLS] = {
1271 .type = MSR_FEATURE_WORD,
1272 .feat_names = {
1273 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
1274 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
1275 NULL, NULL, NULL, NULL,
1276 NULL, NULL, NULL, NULL,
1277 NULL, NULL, NULL, NULL,
1278 NULL, NULL, NULL, NULL,
1279 NULL, NULL, NULL, NULL,
1280 NULL, NULL, NULL, NULL,
1281 },
1282 .msr = {
1283 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1284 }
1285 },
1286
1287 [FEAT_VMX_EXIT_CTLS] = {
1288 .type = MSR_FEATURE_WORD,
1289 /*
1290 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
1291 * the LM CPUID bit.
1292 */
1293 .feat_names = {
1294 NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
1295 NULL, NULL, NULL, NULL,
1296 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
1297 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
1298 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
1299 "vmx-exit-save-efer", "vmx-exit-load-efer",
1300 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
1301 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
1302 NULL, NULL, NULL, NULL,
1303 },
1304 .msr = {
1305 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
1306 }
1307 },
1308
1309 [FEAT_VMX_ENTRY_CTLS] = {
1310 .type = MSR_FEATURE_WORD,
1311 .feat_names = {
1312 NULL, NULL, "vmx-entry-noload-debugctl", NULL,
1313 NULL, NULL, NULL, NULL,
1314 NULL, "vmx-entry-ia32e-mode", NULL, NULL,
1315 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
1316 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
1317 NULL, NULL, NULL, NULL,
1318 NULL, NULL, NULL, NULL,
1319 NULL, NULL, NULL, NULL,
1320 },
1321 .msr = {
1322 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1323 }
1324 },
1325
1326 [FEAT_VMX_MISC] = {
1327 .type = MSR_FEATURE_WORD,
1328 .feat_names = {
1329 NULL, NULL, NULL, NULL,
1330 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
1331 "vmx-activity-wait-sipi", NULL, NULL, NULL,
1332 NULL, NULL, NULL, NULL,
1333 NULL, NULL, NULL, NULL,
1334 NULL, NULL, NULL, NULL,
1335 NULL, NULL, NULL, NULL,
1336 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
1337 },
1338 .msr = {
1339 .index = MSR_IA32_VMX_MISC,
1340 }
1341 },
1342
1343 [FEAT_VMX_EPT_VPID_CAPS] = {
1344 .type = MSR_FEATURE_WORD,
1345 .feat_names = {
1346 "vmx-ept-execonly", NULL, NULL, NULL,
1347 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
1348 NULL, NULL, NULL, NULL,
1349 NULL, NULL, NULL, NULL,
1350 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
1351 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
1352 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
1353 NULL, NULL, NULL, NULL,
1354 "vmx-invvpid", NULL, NULL, NULL,
1355 NULL, NULL, NULL, NULL,
1356 "vmx-invvpid-single-addr", "vmx-invept-single-context",
1357 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
1358 NULL, NULL, NULL, NULL,
1359 NULL, NULL, NULL, NULL,
1360 NULL, NULL, NULL, NULL,
1361 NULL, NULL, NULL, NULL,
1362 NULL, NULL, NULL, NULL,
1363 },
1364 .msr = {
1365 .index = MSR_IA32_VMX_EPT_VPID_CAP,
1366 }
1367 },
1368
1369 [FEAT_VMX_BASIC] = {
1370 .type = MSR_FEATURE_WORD,
1371 .feat_names = {
1372 [54] = "vmx-ins-outs",
1373 [55] = "vmx-true-ctls",
1374 },
1375 .msr = {
1376 .index = MSR_IA32_VMX_BASIC,
1377 },
1378 /* Just to be safe - we don't support setting the MSEG version field. */
1379 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
1380 },
1381
1382 [FEAT_VMX_VMFUNC] = {
1383 .type = MSR_FEATURE_WORD,
1384 .feat_names = {
1385 [0] = "vmx-eptp-switching",
1386 },
1387 .msr = {
1388 .index = MSR_IA32_VMX_VMFUNC,
1389 }
1390 },
1391
1392 };
1393
1394 typedef struct FeatureMask {
1395 FeatureWord index;
1396 uint64_t mask;
1397 } FeatureMask;
1398
1399 typedef struct FeatureDep {
1400 FeatureMask from, to;
1401 } FeatureDep;
1402
1403 static FeatureDep feature_dependencies[] = {
1404 {
1405 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
1406 .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
1407 },
1408 {
1409 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
1410 .to = { FEAT_CORE_CAPABILITY, ~0ull },
1411 },
1412 {
1413 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1414 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
1415 },
1416 {
1417 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1418 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
1419 },
1420 {
1421 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1422 .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
1423 },
1424 {
1425 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1426 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
1427 },
1428 {
1429 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1430 .to = { FEAT_VMX_MISC, ~0ull },
1431 },
1432 {
1433 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1434 .to = { FEAT_VMX_BASIC, ~0ull },
1435 },
1436 {
1437 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
1438 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
1439 },
1440 {
1441 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
1442 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
1443 },
1444 {
1445 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
1446 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
1447 },
1448 {
1449 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
1450 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
1451 },
1452 {
1453 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
1454 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
1455 },
1456 {
1457 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
1458 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
1459 },
1460 {
1461 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
1462 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
1463 },
1464 {
1465 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1466 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
1467 },
1468 {
1469 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1470 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
1471 },
1472 {
1473 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
1474 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
1475 },
1476 {
1477 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
1478 .to = { FEAT_VMX_VMFUNC, ~0ull },
1479 },
1480 };
1481
1482 typedef struct X86RegisterInfo32 {
1483 /* Name of register */
1484 const char *name;
1485 /* QAPI enum value register */
1486 X86CPURegister32 qapi_enum;
1487 } X86RegisterInfo32;
1488
1489 #define REGISTER(reg) \
1490 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1491 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1492 REGISTER(EAX),
1493 REGISTER(ECX),
1494 REGISTER(EDX),
1495 REGISTER(EBX),
1496 REGISTER(ESP),
1497 REGISTER(EBP),
1498 REGISTER(ESI),
1499 REGISTER(EDI),
1500 };
1501 #undef REGISTER
1502
1503 typedef struct ExtSaveArea {
1504 uint32_t feature, bits;
1505 uint32_t offset, size;
1506 } ExtSaveArea;
1507
1508 static const ExtSaveArea x86_ext_save_areas[] = {
1509 [XSTATE_FP_BIT] = {
1510 /* x87 FP state component is always enabled if XSAVE is supported */
1511 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1512 /* x87 state is in the legacy region of the XSAVE area */
1513 .offset = 0,
1514 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1515 },
1516 [XSTATE_SSE_BIT] = {
1517 /* SSE state component is always enabled if XSAVE is supported */
1518 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1519 /* SSE state is in the legacy region of the XSAVE area */
1520 .offset = 0,
1521 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1522 },
1523 [XSTATE_YMM_BIT] =
1524 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1525 .offset = offsetof(X86XSaveArea, avx_state),
1526 .size = sizeof(XSaveAVX) },
1527 [XSTATE_BNDREGS_BIT] =
1528 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1529 .offset = offsetof(X86XSaveArea, bndreg_state),
1530 .size = sizeof(XSaveBNDREG) },
1531 [XSTATE_BNDCSR_BIT] =
1532 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1533 .offset = offsetof(X86XSaveArea, bndcsr_state),
1534 .size = sizeof(XSaveBNDCSR) },
1535 [XSTATE_OPMASK_BIT] =
1536 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1537 .offset = offsetof(X86XSaveArea, opmask_state),
1538 .size = sizeof(XSaveOpmask) },
1539 [XSTATE_ZMM_Hi256_BIT] =
1540 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1541 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1542 .size = sizeof(XSaveZMM_Hi256) },
1543 [XSTATE_Hi16_ZMM_BIT] =
1544 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1545 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1546 .size = sizeof(XSaveHi16_ZMM) },
1547 [XSTATE_PKRU_BIT] =
1548 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1549 .offset = offsetof(X86XSaveArea, pkru_state),
1550 .size = sizeof(XSavePKRU) },
1551 };
1552
1553 static uint32_t xsave_area_size(uint64_t mask)
1554 {
1555 int i;
1556 uint64_t ret = 0;
1557
1558 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1559 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1560 if ((mask >> i) & 1) {
1561 ret = MAX(ret, esa->offset + esa->size);
1562 }
1563 }
1564 return ret;
1565 }
1566
1567 static inline bool accel_uses_host_cpuid(void)
1568 {
1569 return kvm_enabled() || hvf_enabled();
1570 }
1571
1572 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1573 {
1574 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1575 cpu->env.features[FEAT_XSAVE_COMP_LO];
1576 }
1577
1578 const char *get_register_name_32(unsigned int reg)
1579 {
1580 if (reg >= CPU_NB_REGS32) {
1581 return NULL;
1582 }
1583 return x86_reg_info_32[reg].name;
1584 }
1585
1586 /*
1587 * Returns the set of feature flags that are supported and migratable by
1588 * QEMU, for a given FeatureWord.
1589 */
1590 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
1591 {
1592 FeatureWordInfo *wi = &feature_word_info[w];
1593 uint64_t r = 0;
1594 int i;
1595
1596 for (i = 0; i < 64; i++) {
1597 uint64_t f = 1ULL << i;
1598
1599 /* If the feature name is known, it is implicitly considered migratable,
1600 * unless it is explicitly set in unmigratable_flags */
1601 if ((wi->migratable_flags & f) ||
1602 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1603 r |= f;
1604 }
1605 }
1606 return r;
1607 }
1608
1609 void host_cpuid(uint32_t function, uint32_t count,
1610 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1611 {
1612 uint32_t vec[4];
1613
1614 #ifdef __x86_64__
1615 asm volatile("cpuid"
1616 : "=a"(vec[0]), "=b"(vec[1]),
1617 "=c"(vec[2]), "=d"(vec[3])
1618 : "0"(function), "c"(count) : "cc");
1619 #elif defined(__i386__)
1620 asm volatile("pusha \n\t"
1621 "cpuid \n\t"
1622 "mov %%eax, 0(%2) \n\t"
1623 "mov %%ebx, 4(%2) \n\t"
1624 "mov %%ecx, 8(%2) \n\t"
1625 "mov %%edx, 12(%2) \n\t"
1626 "popa"
1627 : : "a"(function), "c"(count), "S"(vec)
1628 : "memory", "cc");
1629 #else
1630 abort();
1631 #endif
1632
1633 if (eax)
1634 *eax = vec[0];
1635 if (ebx)
1636 *ebx = vec[1];
1637 if (ecx)
1638 *ecx = vec[2];
1639 if (edx)
1640 *edx = vec[3];
1641 }
1642
1643 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1644 {
1645 uint32_t eax, ebx, ecx, edx;
1646
1647 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1648 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1649
1650 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1651 if (family) {
1652 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1653 }
1654 if (model) {
1655 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1656 }
1657 if (stepping) {
1658 *stepping = eax & 0x0F;
1659 }
1660 }
1661
1662 /* CPU class name definitions: */
1663
1664 /* Return type name for a given CPU model name
1665 * Caller is responsible for freeing the returned string.
1666 */
1667 static char *x86_cpu_type_name(const char *model_name)
1668 {
1669 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1670 }
1671
1672 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1673 {
1674 g_autofree char *typename = x86_cpu_type_name(cpu_model);
1675 return object_class_by_name(typename);
1676 }
1677
1678 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1679 {
1680 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1681 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1682 return g_strndup(class_name,
1683 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1684 }
1685
1686 typedef struct PropValue {
1687 const char *prop, *value;
1688 } PropValue;
1689
1690 typedef struct X86CPUVersionDefinition {
1691 X86CPUVersion version;
1692 const char *alias;
1693 PropValue *props;
1694 } X86CPUVersionDefinition;
1695
1696 /* Base definition for a CPU model */
1697 typedef struct X86CPUDefinition {
1698 const char *name;
1699 uint32_t level;
1700 uint32_t xlevel;
1701 /* vendor is zero-terminated, 12 character ASCII string */
1702 char vendor[CPUID_VENDOR_SZ + 1];
1703 int family;
1704 int model;
1705 int stepping;
1706 FeatureWordArray features;
1707 const char *model_id;
1708 CPUCaches *cache_info;
1709 /*
1710 * Definitions for alternative versions of CPU model.
1711 * List is terminated by item with version == 0.
1712 * If NULL, version 1 will be registered automatically.
1713 */
1714 const X86CPUVersionDefinition *versions;
1715 } X86CPUDefinition;
1716
1717 /* Reference to a specific CPU model version */
1718 struct X86CPUModel {
1719 /* Base CPU definition */
1720 X86CPUDefinition *cpudef;
1721 /* CPU model version */
1722 X86CPUVersion version;
1723 /*
1724 * If true, this is an alias CPU model.
1725 * This matters only for "-cpu help" and query-cpu-definitions
1726 */
1727 bool is_alias;
1728 };
1729
1730 /* Get full model name for CPU version */
1731 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1732 X86CPUVersion version)
1733 {
1734 assert(version > 0);
1735 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1736 }
1737
1738 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1739 {
1740 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1741 static const X86CPUVersionDefinition default_version_list[] = {
1742 { 1 },
1743 { /* end of list */ }
1744 };
1745
1746 return def->versions ?: default_version_list;
1747 }
1748
1749 static CPUCaches epyc_cache_info = {
1750 .l1d_cache = &(CPUCacheInfo) {
1751 .type = DATA_CACHE,
1752 .level = 1,
1753 .size = 32 * KiB,
1754 .line_size = 64,
1755 .associativity = 8,
1756 .partitions = 1,
1757 .sets = 64,
1758 .lines_per_tag = 1,
1759 .self_init = 1,
1760 .no_invd_sharing = true,
1761 },
1762 .l1i_cache = &(CPUCacheInfo) {
1763 .type = INSTRUCTION_CACHE,
1764 .level = 1,
1765 .size = 64 * KiB,
1766 .line_size = 64,
1767 .associativity = 4,
1768 .partitions = 1,
1769 .sets = 256,
1770 .lines_per_tag = 1,
1771 .self_init = 1,
1772 .no_invd_sharing = true,
1773 },
1774 .l2_cache = &(CPUCacheInfo) {
1775 .type = UNIFIED_CACHE,
1776 .level = 2,
1777 .size = 512 * KiB,
1778 .line_size = 64,
1779 .associativity = 8,
1780 .partitions = 1,
1781 .sets = 1024,
1782 .lines_per_tag = 1,
1783 },
1784 .l3_cache = &(CPUCacheInfo) {
1785 .type = UNIFIED_CACHE,
1786 .level = 3,
1787 .size = 8 * MiB,
1788 .line_size = 64,
1789 .associativity = 16,
1790 .partitions = 1,
1791 .sets = 8192,
1792 .lines_per_tag = 1,
1793 .self_init = true,
1794 .inclusive = true,
1795 .complex_indexing = true,
1796 },
1797 };
1798
1799 /* The following VMX features are not supported by KVM and are left out in the
1800 * CPU definitions:
1801 *
1802 * Dual-monitor support (all processors)
1803 * Entry to SMM
1804 * Deactivate dual-monitor treatment
1805 * Number of CR3-target values
1806 * Shutdown activity state
1807 * Wait-for-SIPI activity state
1808 * PAUSE-loop exiting (Westmere and newer)
1809 * EPT-violation #VE (Broadwell and newer)
1810 * Inject event with insn length=0 (Skylake and newer)
1811 * Conceal non-root operation from PT
1812 * Conceal VM exits from PT
1813 * Conceal VM entries from PT
1814 * Enable ENCLS exiting
1815 * Mode-based execute control (XS/XU)
1816 s TSC scaling (Skylake Server and newer)
1817 * GPA translation for PT (IceLake and newer)
1818 * User wait and pause
1819 * ENCLV exiting
1820 * Load IA32_RTIT_CTL
1821 * Clear IA32_RTIT_CTL
1822 * Advanced VM-exit information for EPT violations
1823 * Sub-page write permissions
1824 * PT in VMX operation
1825 */
1826
1827 static X86CPUDefinition builtin_x86_defs[] = {
1828 {
1829 .name = "qemu64",
1830 .level = 0xd,
1831 .vendor = CPUID_VENDOR_AMD,
1832 .family = 6,
1833 .model = 6,
1834 .stepping = 3,
1835 .features[FEAT_1_EDX] =
1836 PPRO_FEATURES |
1837 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1838 CPUID_PSE36,
1839 .features[FEAT_1_ECX] =
1840 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1841 .features[FEAT_8000_0001_EDX] =
1842 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1843 .features[FEAT_8000_0001_ECX] =
1844 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1845 .xlevel = 0x8000000A,
1846 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1847 },
1848 {
1849 .name = "phenom",
1850 .level = 5,
1851 .vendor = CPUID_VENDOR_AMD,
1852 .family = 16,
1853 .model = 2,
1854 .stepping = 3,
1855 /* Missing: CPUID_HT */
1856 .features[FEAT_1_EDX] =
1857 PPRO_FEATURES |
1858 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1859 CPUID_PSE36 | CPUID_VME,
1860 .features[FEAT_1_ECX] =
1861 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1862 CPUID_EXT_POPCNT,
1863 .features[FEAT_8000_0001_EDX] =
1864 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1865 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1866 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1867 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1868 CPUID_EXT3_CR8LEG,
1869 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1870 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1871 .features[FEAT_8000_0001_ECX] =
1872 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1873 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1874 /* Missing: CPUID_SVM_LBRV */
1875 .features[FEAT_SVM] =
1876 CPUID_SVM_NPT,
1877 .xlevel = 0x8000001A,
1878 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1879 },
1880 {
1881 .name = "core2duo",
1882 .level = 10,
1883 .vendor = CPUID_VENDOR_INTEL,
1884 .family = 6,
1885 .model = 15,
1886 .stepping = 11,
1887 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1888 .features[FEAT_1_EDX] =
1889 PPRO_FEATURES |
1890 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1891 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1892 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1893 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1894 .features[FEAT_1_ECX] =
1895 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1896 CPUID_EXT_CX16,
1897 .features[FEAT_8000_0001_EDX] =
1898 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1899 .features[FEAT_8000_0001_ECX] =
1900 CPUID_EXT3_LAHF_LM,
1901 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
1902 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1903 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1904 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1905 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1906 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
1907 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1908 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1909 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1910 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1911 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1912 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1913 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1914 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
1915 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
1916 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
1917 .features[FEAT_VMX_SECONDARY_CTLS] =
1918 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1921 },
1922 {
1923 .name = "kvm64",
1924 .level = 0xd,
1925 .vendor = CPUID_VENDOR_INTEL,
1926 .family = 15,
1927 .model = 6,
1928 .stepping = 1,
1929 /* Missing: CPUID_HT */
1930 .features[FEAT_1_EDX] =
1931 PPRO_FEATURES | CPUID_VME |
1932 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1933 CPUID_PSE36,
1934 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1937 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1938 .features[FEAT_8000_0001_EDX] =
1939 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1940 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1941 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1942 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1943 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1944 .features[FEAT_8000_0001_ECX] =
1945 0,
1946 /* VMX features from Cedar Mill/Prescott */
1947 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1948 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1949 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1950 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1951 VMX_PIN_BASED_NMI_EXITING,
1952 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1953 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1954 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1955 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1956 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1957 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1958 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1959 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING,
1960 .xlevel = 0x80000008,
1961 .model_id = "Common KVM processor"
1962 },
1963 {
1964 .name = "qemu32",
1965 .level = 4,
1966 .vendor = CPUID_VENDOR_INTEL,
1967 .family = 6,
1968 .model = 6,
1969 .stepping = 3,
1970 .features[FEAT_1_EDX] =
1971 PPRO_FEATURES,
1972 .features[FEAT_1_ECX] =
1973 CPUID_EXT_SSE3,
1974 .xlevel = 0x80000004,
1975 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1976 },
1977 {
1978 .name = "kvm32",
1979 .level = 5,
1980 .vendor = CPUID_VENDOR_INTEL,
1981 .family = 15,
1982 .model = 6,
1983 .stepping = 1,
1984 .features[FEAT_1_EDX] =
1985 PPRO_FEATURES | CPUID_VME |
1986 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1987 .features[FEAT_1_ECX] =
1988 CPUID_EXT_SSE3,
1989 .features[FEAT_8000_0001_ECX] =
1990 0,
1991 /* VMX features from Yonah */
1992 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1993 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1994 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1995 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1996 VMX_PIN_BASED_NMI_EXITING,
1997 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1998 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1999 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2000 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2001 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2002 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2003 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2004 .xlevel = 0x80000008,
2005 .model_id = "Common 32-bit KVM processor"
2006 },
2007 {
2008 .name = "coreduo",
2009 .level = 10,
2010 .vendor = CPUID_VENDOR_INTEL,
2011 .family = 6,
2012 .model = 14,
2013 .stepping = 8,
2014 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2015 .features[FEAT_1_EDX] =
2016 PPRO_FEATURES | CPUID_VME |
2017 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
2018 CPUID_SS,
2019 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
2020 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
2021 .features[FEAT_1_ECX] =
2022 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
2023 .features[FEAT_8000_0001_EDX] =
2024 CPUID_EXT2_NX,
2025 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2026 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2027 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2028 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2029 VMX_PIN_BASED_NMI_EXITING,
2030 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2031 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2032 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2033 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2034 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2035 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2036 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2037 .xlevel = 0x80000008,
2038 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
2039 },
2040 {
2041 .name = "486",
2042 .level = 1,
2043 .vendor = CPUID_VENDOR_INTEL,
2044 .family = 4,
2045 .model = 8,
2046 .stepping = 0,
2047 .features[FEAT_1_EDX] =
2048 I486_FEATURES,
2049 .xlevel = 0,
2050 .model_id = "",
2051 },
2052 {
2053 .name = "pentium",
2054 .level = 1,
2055 .vendor = CPUID_VENDOR_INTEL,
2056 .family = 5,
2057 .model = 4,
2058 .stepping = 3,
2059 .features[FEAT_1_EDX] =
2060 PENTIUM_FEATURES,
2061 .xlevel = 0,
2062 .model_id = "",
2063 },
2064 {
2065 .name = "pentium2",
2066 .level = 2,
2067 .vendor = CPUID_VENDOR_INTEL,
2068 .family = 6,
2069 .model = 5,
2070 .stepping = 2,
2071 .features[FEAT_1_EDX] =
2072 PENTIUM2_FEATURES,
2073 .xlevel = 0,
2074 .model_id = "",
2075 },
2076 {
2077 .name = "pentium3",
2078 .level = 3,
2079 .vendor = CPUID_VENDOR_INTEL,
2080 .family = 6,
2081 .model = 7,
2082 .stepping = 3,
2083 .features[FEAT_1_EDX] =
2084 PENTIUM3_FEATURES,
2085 .xlevel = 0,
2086 .model_id = "",
2087 },
2088 {
2089 .name = "athlon",
2090 .level = 2,
2091 .vendor = CPUID_VENDOR_AMD,
2092 .family = 6,
2093 .model = 2,
2094 .stepping = 3,
2095 .features[FEAT_1_EDX] =
2096 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
2097 CPUID_MCA,
2098 .features[FEAT_8000_0001_EDX] =
2099 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
2100 .xlevel = 0x80000008,
2101 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
2102 },
2103 {
2104 .name = "n270",
2105 .level = 10,
2106 .vendor = CPUID_VENDOR_INTEL,
2107 .family = 6,
2108 .model = 28,
2109 .stepping = 2,
2110 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2111 .features[FEAT_1_EDX] =
2112 PPRO_FEATURES |
2113 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
2114 CPUID_ACPI | CPUID_SS,
2115 /* Some CPUs got no CPUID_SEP */
2116 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
2117 * CPUID_EXT_XTPR */
2118 .features[FEAT_1_ECX] =
2119 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
2120 CPUID_EXT_MOVBE,
2121 .features[FEAT_8000_0001_EDX] =
2122 CPUID_EXT2_NX,
2123 .features[FEAT_8000_0001_ECX] =
2124 CPUID_EXT3_LAHF_LM,
2125 .xlevel = 0x80000008,
2126 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2127 },
2128 {
2129 .name = "Conroe",
2130 .level = 10,
2131 .vendor = CPUID_VENDOR_INTEL,
2132 .family = 6,
2133 .model = 15,
2134 .stepping = 3,
2135 .features[FEAT_1_EDX] =
2136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2140 CPUID_DE | CPUID_FP87,
2141 .features[FEAT_1_ECX] =
2142 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2143 .features[FEAT_8000_0001_EDX] =
2144 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2145 .features[FEAT_8000_0001_ECX] =
2146 CPUID_EXT3_LAHF_LM,
2147 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2148 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2149 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2150 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2151 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2152 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2153 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2154 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2155 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2156 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2157 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2158 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2159 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2160 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2161 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2162 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2163 .features[FEAT_VMX_SECONDARY_CTLS] =
2164 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
2165 .xlevel = 0x80000008,
2166 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2167 },
2168 {
2169 .name = "Penryn",
2170 .level = 10,
2171 .vendor = CPUID_VENDOR_INTEL,
2172 .family = 6,
2173 .model = 23,
2174 .stepping = 3,
2175 .features[FEAT_1_EDX] =
2176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2180 CPUID_DE | CPUID_FP87,
2181 .features[FEAT_1_ECX] =
2182 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2183 CPUID_EXT_SSE3,
2184 .features[FEAT_8000_0001_EDX] =
2185 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2186 .features[FEAT_8000_0001_ECX] =
2187 CPUID_EXT3_LAHF_LM,
2188 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2189 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2190 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2191 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT |
2192 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2193 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2194 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2195 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2196 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2197 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2198 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2199 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2200 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2201 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2202 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2203 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2204 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2205 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2206 .features[FEAT_VMX_SECONDARY_CTLS] =
2207 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2208 VMX_SECONDARY_EXEC_WBINVD_EXITING,
2209 .xlevel = 0x80000008,
2210 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2211 },
2212 {
2213 .name = "Nehalem",
2214 .level = 11,
2215 .vendor = CPUID_VENDOR_INTEL,
2216 .family = 6,
2217 .model = 26,
2218 .stepping = 3,
2219 .features[FEAT_1_EDX] =
2220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2224 CPUID_DE | CPUID_FP87,
2225 .features[FEAT_1_ECX] =
2226 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2227 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2228 .features[FEAT_8000_0001_EDX] =
2229 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2230 .features[FEAT_8000_0001_ECX] =
2231 CPUID_EXT3_LAHF_LM,
2232 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2233 MSR_VMX_BASIC_TRUE_CTLS,
2234 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2235 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2236 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2237 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2238 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2239 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2240 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2241 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2242 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2243 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2244 .features[FEAT_VMX_EXIT_CTLS] =
2245 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2246 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2247 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2248 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2249 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2250 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2251 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2252 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2253 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2254 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2255 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2256 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2257 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2258 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2259 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2260 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2261 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2262 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2263 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2264 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2265 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2266 .features[FEAT_VMX_SECONDARY_CTLS] =
2267 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2268 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2269 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2270 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2271 VMX_SECONDARY_EXEC_ENABLE_VPID,
2272 .xlevel = 0x80000008,
2273 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
2274 .versions = (X86CPUVersionDefinition[]) {
2275 { .version = 1 },
2276 {
2277 .version = 2,
2278 .alias = "Nehalem-IBRS",
2279 .props = (PropValue[]) {
2280 { "spec-ctrl", "on" },
2281 { "model-id",
2282 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
2283 { /* end of list */ }
2284 }
2285 },
2286 { /* end of list */ }
2287 }
2288 },
2289 {
2290 .name = "Westmere",
2291 .level = 11,
2292 .vendor = CPUID_VENDOR_INTEL,
2293 .family = 6,
2294 .model = 44,
2295 .stepping = 1,
2296 .features[FEAT_1_EDX] =
2297 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2298 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2299 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2300 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2301 CPUID_DE | CPUID_FP87,
2302 .features[FEAT_1_ECX] =
2303 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2304 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2305 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2306 .features[FEAT_8000_0001_EDX] =
2307 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2308 .features[FEAT_8000_0001_ECX] =
2309 CPUID_EXT3_LAHF_LM,
2310 .features[FEAT_6_EAX] =
2311 CPUID_6_EAX_ARAT,
2312 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2313 MSR_VMX_BASIC_TRUE_CTLS,
2314 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2315 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2316 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2317 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2318 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2319 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2320 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2321 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2322 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2323 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2324 .features[FEAT_VMX_EXIT_CTLS] =
2325 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2326 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2327 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2328 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2329 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2330 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2331 MSR_VMX_MISC_STORE_LMA,
2332 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2333 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2334 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2335 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2336 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2337 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2338 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2339 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2340 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2341 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2342 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2343 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2344 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2345 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2346 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2347 .features[FEAT_VMX_SECONDARY_CTLS] =
2348 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2349 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2350 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2351 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2352 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2353 .xlevel = 0x80000008,
2354 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2355 .versions = (X86CPUVersionDefinition[]) {
2356 { .version = 1 },
2357 {
2358 .version = 2,
2359 .alias = "Westmere-IBRS",
2360 .props = (PropValue[]) {
2361 { "spec-ctrl", "on" },
2362 { "model-id",
2363 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
2364 { /* end of list */ }
2365 }
2366 },
2367 { /* end of list */ }
2368 }
2369 },
2370 {
2371 .name = "SandyBridge",
2372 .level = 0xd,
2373 .vendor = CPUID_VENDOR_INTEL,
2374 .family = 6,
2375 .model = 42,
2376 .stepping = 1,
2377 .features[FEAT_1_EDX] =
2378 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2379 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2380 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2381 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2382 CPUID_DE | CPUID_FP87,
2383 .features[FEAT_1_ECX] =
2384 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2385 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2386 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2387 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2388 CPUID_EXT_SSE3,
2389 .features[FEAT_8000_0001_EDX] =
2390 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2391 CPUID_EXT2_SYSCALL,
2392 .features[FEAT_8000_0001_ECX] =
2393 CPUID_EXT3_LAHF_LM,
2394 .features[FEAT_XSAVE] =
2395 CPUID_XSAVE_XSAVEOPT,
2396 .features[FEAT_6_EAX] =
2397 CPUID_6_EAX_ARAT,
2398 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2399 MSR_VMX_BASIC_TRUE_CTLS,
2400 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2401 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2402 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2403 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2404 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2405 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2406 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2407 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2408 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2409 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2410 .features[FEAT_VMX_EXIT_CTLS] =
2411 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2412 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2413 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2414 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2415 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2416 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2417 MSR_VMX_MISC_STORE_LMA,
2418 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2419 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2420 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2421 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2422 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2423 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2424 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2425 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2426 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2427 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2428 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2429 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2430 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2431 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2432 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2433 .features[FEAT_VMX_SECONDARY_CTLS] =
2434 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2435 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2436 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2437 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2438 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2439 .xlevel = 0x80000008,
2440 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
2441 .versions = (X86CPUVersionDefinition[]) {
2442 { .version = 1 },
2443 {
2444 .version = 2,
2445 .alias = "SandyBridge-IBRS",
2446 .props = (PropValue[]) {
2447 { "spec-ctrl", "on" },
2448 { "model-id",
2449 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
2450 { /* end of list */ }
2451 }
2452 },
2453 { /* end of list */ }
2454 }
2455 },
2456 {
2457 .name = "IvyBridge",
2458 .level = 0xd,
2459 .vendor = CPUID_VENDOR_INTEL,
2460 .family = 6,
2461 .model = 58,
2462 .stepping = 9,
2463 .features[FEAT_1_EDX] =
2464 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2465 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2466 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2467 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2468 CPUID_DE | CPUID_FP87,
2469 .features[FEAT_1_ECX] =
2470 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2471 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2472 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2473 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2474 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2475 .features[FEAT_7_0_EBX] =
2476 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2477 CPUID_7_0_EBX_ERMS,
2478 .features[FEAT_8000_0001_EDX] =
2479 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2480 CPUID_EXT2_SYSCALL,
2481 .features[FEAT_8000_0001_ECX] =
2482 CPUID_EXT3_LAHF_LM,
2483 .features[FEAT_XSAVE] =
2484 CPUID_XSAVE_XSAVEOPT,
2485 .features[FEAT_6_EAX] =
2486 CPUID_6_EAX_ARAT,
2487 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2488 MSR_VMX_BASIC_TRUE_CTLS,
2489 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2490 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2491 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2492 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2493 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2494 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2495 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2496 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2497 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2498 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2499 .features[FEAT_VMX_EXIT_CTLS] =
2500 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2501 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2502 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2503 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2504 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2505 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2506 MSR_VMX_MISC_STORE_LMA,
2507 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2508 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2509 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2510 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2511 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2512 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2513 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2514 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2515 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2516 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2517 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2518 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2519 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2520 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2521 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2522 .features[FEAT_VMX_SECONDARY_CTLS] =
2523 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2524 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2525 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2526 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2527 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2528 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2529 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2530 VMX_SECONDARY_EXEC_RDRAND_EXITING,
2531 .xlevel = 0x80000008,
2532 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
2533 .versions = (X86CPUVersionDefinition[]) {
2534 { .version = 1 },
2535 {
2536 .version = 2,
2537 .alias = "IvyBridge-IBRS",
2538 .props = (PropValue[]) {
2539 { "spec-ctrl", "on" },
2540 { "model-id",
2541 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
2542 { /* end of list */ }
2543 }
2544 },
2545 { /* end of list */ }
2546 }
2547 },
2548 {
2549 .name = "Haswell",
2550 .level = 0xd,
2551 .vendor = CPUID_VENDOR_INTEL,
2552 .family = 6,
2553 .model = 60,
2554 .stepping = 4,
2555 .features[FEAT_1_EDX] =
2556 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2557 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2558 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2559 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2560 CPUID_DE | CPUID_FP87,
2561 .features[FEAT_1_ECX] =
2562 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2563 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2564 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2565 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2566 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2567 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2568 .features[FEAT_8000_0001_EDX] =
2569 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2570 CPUID_EXT2_SYSCALL,
2571 .features[FEAT_8000_0001_ECX] =
2572 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2573 .features[FEAT_7_0_EBX] =
2574 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2575 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2576 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2577 CPUID_7_0_EBX_RTM,
2578 .features[FEAT_XSAVE] =
2579 CPUID_XSAVE_XSAVEOPT,
2580 .features[FEAT_6_EAX] =
2581 CPUID_6_EAX_ARAT,
2582 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2583 MSR_VMX_BASIC_TRUE_CTLS,
2584 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2585 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2586 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2587 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2588 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2589 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2590 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2591 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2592 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2593 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2594 .features[FEAT_VMX_EXIT_CTLS] =
2595 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2596 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2597 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2598 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2599 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2600 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2601 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2602 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2603 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2604 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2605 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2606 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2607 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2608 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2609 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2610 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2611 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2612 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2613 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2614 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2615 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2616 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2617 .features[FEAT_VMX_SECONDARY_CTLS] =
2618 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2619 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2620 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2621 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2622 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2623 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2624 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2625 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2626 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
2627 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2628 .xlevel = 0x80000008,
2629 .model_id = "Intel Core Processor (Haswell)",
2630 .versions = (X86CPUVersionDefinition[]) {
2631 { .version = 1 },
2632 {
2633 .version = 2,
2634 .alias = "Haswell-noTSX",
2635 .props = (PropValue[]) {
2636 { "hle", "off" },
2637 { "rtm", "off" },
2638 { "stepping", "1" },
2639 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2640 { /* end of list */ }
2641 },
2642 },
2643 {
2644 .version = 3,
2645 .alias = "Haswell-IBRS",
2646 .props = (PropValue[]) {
2647 /* Restore TSX features removed by -v2 above */
2648 { "hle", "on" },
2649 { "rtm", "on" },
2650 /*
2651 * Haswell and Haswell-IBRS had stepping=4 in
2652 * QEMU 4.0 and older
2653 */
2654 { "stepping", "4" },
2655 { "spec-ctrl", "on" },
2656 { "model-id",
2657 "Intel Core Processor (Haswell, IBRS)" },
2658 { /* end of list */ }
2659 }
2660 },
2661 {
2662 .version = 4,
2663 .alias = "Haswell-noTSX-IBRS",
2664 .props = (PropValue[]) {
2665 { "hle", "off" },
2666 { "rtm", "off" },
2667 /* spec-ctrl was already enabled by -v3 above */
2668 { "stepping", "1" },
2669 { "model-id",
2670 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2671 { /* end of list */ }
2672 }
2673 },
2674 { /* end of list */ }
2675 }
2676 },
2677 {
2678 .name = "Broadwell",
2679 .level = 0xd,
2680 .vendor = CPUID_VENDOR_INTEL,
2681 .family = 6,
2682 .model = 61,
2683 .stepping = 2,
2684 .features[FEAT_1_EDX] =
2685 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2686 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2687 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2688 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2689 CPUID_DE | CPUID_FP87,
2690 .features[FEAT_1_ECX] =
2691 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2692 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2693 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2694 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2695 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2696 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2697 .features[FEAT_8000_0001_EDX] =
2698 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2699 CPUID_EXT2_SYSCALL,
2700 .features[FEAT_8000_0001_ECX] =
2701 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2702 .features[FEAT_7_0_EBX] =
2703 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2704 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2705 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2706 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2707 CPUID_7_0_EBX_SMAP,
2708 .features[FEAT_XSAVE] =
2709 CPUID_XSAVE_XSAVEOPT,
2710 .features[FEAT_6_EAX] =
2711 CPUID_6_EAX_ARAT,
2712 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2713 MSR_VMX_BASIC_TRUE_CTLS,
2714 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2715 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2716 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2717 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2718 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2719 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2720 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2721 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2722 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2723 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2724 .features[FEAT_VMX_EXIT_CTLS] =
2725 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2726 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2727 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2728 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2729 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2730 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2731 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2732 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2733 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2734 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2735 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2736 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2737 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2738 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2739 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2740 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2741 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2742 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2743 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2744 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2745 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2746 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2747 .features[FEAT_VMX_SECONDARY_CTLS] =
2748 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2749 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2750 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2751 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2752 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2753 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2754 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2755 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2756 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2757 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2758 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2759 .xlevel = 0x80000008,
2760 .model_id = "Intel Core Processor (Broadwell)",
2761 .versions = (X86CPUVersionDefinition[]) {
2762 { .version = 1 },
2763 {
2764 .version = 2,
2765 .alias = "Broadwell-noTSX",
2766 .props = (PropValue[]) {
2767 { "hle", "off" },
2768 { "rtm", "off" },
2769 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2770 { /* end of list */ }
2771 },
2772 },
2773 {
2774 .version = 3,
2775 .alias = "Broadwell-IBRS",
2776 .props = (PropValue[]) {
2777 /* Restore TSX features removed by -v2 above */
2778 { "hle", "on" },
2779 { "rtm", "on" },
2780 { "spec-ctrl", "on" },
2781 { "model-id",
2782 "Intel Core Processor (Broadwell, IBRS)" },
2783 { /* end of list */ }
2784 }
2785 },
2786 {
2787 .version = 4,
2788 .alias = "Broadwell-noTSX-IBRS",
2789 .props = (PropValue[]) {
2790 { "hle", "off" },
2791 { "rtm", "off" },
2792 /* spec-ctrl was already enabled by -v3 above */
2793 { "model-id",
2794 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2795 { /* end of list */ }
2796 }
2797 },
2798 { /* end of list */ }
2799 }
2800 },
2801 {
2802 .name = "Skylake-Client",
2803 .level = 0xd,
2804 .vendor = CPUID_VENDOR_INTEL,
2805 .family = 6,
2806 .model = 94,
2807 .stepping = 3,
2808 .features[FEAT_1_EDX] =
2809 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2810 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2811 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2812 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2813 CPUID_DE | CPUID_FP87,
2814 .features[FEAT_1_ECX] =
2815 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2816 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2817 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2818 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2819 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2820 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2821 .features[FEAT_8000_0001_EDX] =
2822 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2823 CPUID_EXT2_SYSCALL,
2824 .features[FEAT_8000_0001_ECX] =
2825 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2826 .features[FEAT_7_0_EBX] =
2827 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2828 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2829 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2830 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2831 CPUID_7_0_EBX_SMAP,
2832 /* Missing: XSAVES (not supported by some Linux versions,
2833 * including v4.1 to v4.12).
2834 * KVM doesn't yet expose any XSAVES state save component,
2835 * and the only one defined in Skylake (processor tracing)
2836 * probably will block migration anyway.
2837 */
2838 .features[FEAT_XSAVE] =
2839 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2840 CPUID_XSAVE_XGETBV1,
2841 .features[FEAT_6_EAX] =
2842 CPUID_6_EAX_ARAT,
2843 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2844 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2845 MSR_VMX_BASIC_TRUE_CTLS,
2846 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2847 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2848 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2849 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2850 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2851 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2852 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2853 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2854 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2855 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2856 .features[FEAT_VMX_EXIT_CTLS] =
2857 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2858 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2859 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2860 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2861 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2862 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2863 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2864 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2865 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2866 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2867 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2868 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2869 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2870 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2871 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2872 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2873 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2874 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2875 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2876 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2877 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2878 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2879 .features[FEAT_VMX_SECONDARY_CTLS] =
2880 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2881 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2882 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2883 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2884 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2885 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2886 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2887 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2888 .xlevel = 0x80000008,
2889 .model_id = "Intel Core Processor (Skylake)",
2890 .versions = (X86CPUVersionDefinition[]) {
2891 { .version = 1 },
2892 {
2893 .version = 2,
2894 .alias = "Skylake-Client-IBRS",
2895 .props = (PropValue[]) {
2896 { "spec-ctrl", "on" },
2897 { "model-id",
2898 "Intel Core Processor (Skylake, IBRS)" },
2899 { /* end of list */ }
2900 }
2901 },
2902 {
2903 .version = 3,
2904 .alias = "Skylake-Client-noTSX-IBRS",
2905 .props = (PropValue[]) {
2906 { "hle", "off" },
2907 { "rtm", "off" },
2908 { /* end of list */ }
2909 }
2910 },
2911 { /* end of list */ }
2912 }
2913 },
2914 {
2915 .name = "Skylake-Server",
2916 .level = 0xd,
2917 .vendor = CPUID_VENDOR_INTEL,
2918 .family = 6,
2919 .model = 85,
2920 .stepping = 4,
2921 .features[FEAT_1_EDX] =
2922 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2923 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2924 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2925 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2926 CPUID_DE | CPUID_FP87,
2927 .features[FEAT_1_ECX] =
2928 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2929 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2930 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2931 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2932 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2933 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2934 .features[FEAT_8000_0001_EDX] =
2935 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2936 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2937 .features[FEAT_8000_0001_ECX] =
2938 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2939 .features[FEAT_7_0_EBX] =
2940 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2941 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2942 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2943 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2944 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2945 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2946 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2947 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2948 .features[FEAT_7_0_ECX] =
2949 CPUID_7_0_ECX_PKU,
2950 /* Missing: XSAVES (not supported by some Linux versions,
2951 * including v4.1 to v4.12).
2952 * KVM doesn't yet expose any XSAVES state save component,
2953 * and the only one defined in Skylake (processor tracing)
2954 * probably will block migration anyway.
2955 */
2956 .features[FEAT_XSAVE] =
2957 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2958 CPUID_XSAVE_XGETBV1,
2959 .features[FEAT_6_EAX] =
2960 CPUID_6_EAX_ARAT,
2961 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2962 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2963 MSR_VMX_BASIC_TRUE_CTLS,
2964 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2965 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2966 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2967 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2968 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2969 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2970 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2971 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2972 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2973 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2974 .features[FEAT_VMX_EXIT_CTLS] =
2975 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2976 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2977 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2978 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2979 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2980 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2981 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2982 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2983 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2984 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2985 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2986 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2987 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2988 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2989 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2990 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2991 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2992 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2993 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2994 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2995 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2996 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2997 .features[FEAT_VMX_SECONDARY_CTLS] =
2998 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2999 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3000 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3001 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3002 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3003 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3004 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3005 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3006 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3007 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3008 .xlevel = 0x80000008,
3009 .model_id = "Intel Xeon Processor (Skylake)",
3010 .versions = (X86CPUVersionDefinition[]) {
3011 { .version = 1 },
3012 {
3013 .version = 2,
3014 .alias = "Skylake-Server-IBRS",
3015 .props = (PropValue[]) {
3016 /* clflushopt was not added to Skylake-Server-IBRS */
3017 /* TODO: add -v3 including clflushopt */
3018 { "clflushopt", "off" },
3019 { "spec-ctrl", "on" },
3020 { "model-id",
3021 "Intel Xeon Processor (Skylake, IBRS)" },
3022 { /* end of list */ }
3023 }
3024 },
3025 {
3026 .version = 3,
3027 .alias = "Skylake-Server-noTSX-IBRS",
3028 .props = (PropValue[]) {
3029 { "hle", "off" },
3030 { "rtm", "off" },
3031 { /* end of list */ }
3032 }
3033 },
3034 { /* end of list */ }
3035 }
3036 },
3037 {
3038 .name = "Cascadelake-Server",
3039 .level = 0xd,
3040 .vendor = CPUID_VENDOR_INTEL,
3041 .family = 6,
3042 .model = 85,
3043 .stepping = 6,
3044 .features[FEAT_1_EDX] =
3045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3049 CPUID_DE | CPUID_FP87,
3050 .features[FEAT_1_ECX] =
3051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3052 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3053 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3054 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3055 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3056 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3057 .features[FEAT_8000_0001_EDX] =
3058 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3059 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3060 .features[FEAT_8000_0001_ECX] =
3061 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3062 .features[FEAT_7_0_EBX] =
3063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3064 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3065 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3066 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3067 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3068 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3069 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3070 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3071 .features[FEAT_7_0_ECX] =
3072 CPUID_7_0_ECX_PKU |
3073 CPUID_7_0_ECX_AVX512VNNI,
3074 .features[FEAT_7_0_EDX] =
3075 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3076 /* Missing: XSAVES (not supported by some Linux versions,
3077 * including v4.1 to v4.12).
3078 * KVM doesn't yet expose any XSAVES state save component,
3079 * and the only one defined in Skylake (processor tracing)
3080 * probably will block migration anyway.
3081 */
3082 .features[FEAT_XSAVE] =
3083 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3084 CPUID_XSAVE_XGETBV1,
3085 .features[FEAT_6_EAX] =
3086 CPUID_6_EAX_ARAT,
3087 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3088 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3089 MSR_VMX_BASIC_TRUE_CTLS,
3090 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3091 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3092 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3093 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3094 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3095 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3096 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3097 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3098 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3099 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3100 .features[FEAT_VMX_EXIT_CTLS] =
3101 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3102 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3103 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3104 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3105 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3106 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3107 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3108 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3109 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3110 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3111 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3112 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3113 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3114 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3115 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3116 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3117 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3118 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3119 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3120 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3121 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3122 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3123 .features[FEAT_VMX_SECONDARY_CTLS] =
3124 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3125 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3126 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3127 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3128 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3129 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3130 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3131 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3132 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3133 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3134 .xlevel = 0x80000008,
3135 .model_id = "Intel Xeon Processor (Cascadelake)",
3136 .versions = (X86CPUVersionDefinition[]) {
3137 { .version = 1 },
3138 { .version = 2,
3139 .props = (PropValue[]) {
3140 { "arch-capabilities", "on" },
3141 { "rdctl-no", "on" },
3142 { "ibrs-all", "on" },
3143 { "skip-l1dfl-vmentry", "on" },
3144 { "mds-no", "on" },
3145 { /* end of list */ }
3146 },
3147 },
3148 { .version = 3,
3149 .alias = "Cascadelake-Server-noTSX",
3150 .props = (PropValue[]) {
3151 { "hle", "off" },
3152 { "rtm", "off" },
3153 { /* end of list */ }
3154 },
3155 },
3156 { /* end of list */ }
3157 }
3158 },
3159 {
3160 .name = "Cooperlake",
3161 .level = 0xd,
3162 .vendor = CPUID_VENDOR_INTEL,
3163 .family = 6,
3164 .model = 85,
3165 .stepping = 10,
3166 .features[FEAT_1_EDX] =
3167 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3168 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3169 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3170 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3171 CPUID_DE | CPUID_FP87,
3172 .features[FEAT_1_ECX] =
3173 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3174 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3175 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3176 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3177 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3178 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3179 .features[FEAT_8000_0001_EDX] =
3180 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3181 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3182 .features[FEAT_8000_0001_ECX] =
3183 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3184 .features[FEAT_7_0_EBX] =
3185 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3186 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3187 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3188 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3189 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3190 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3191 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3192 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3193 .features[FEAT_7_0_ECX] =
3194 CPUID_7_0_ECX_PKU |
3195 CPUID_7_0_ECX_AVX512VNNI,
3196 .features[FEAT_7_0_EDX] =
3197 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP |
3198 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES,
3199 .features[FEAT_ARCH_CAPABILITIES] =
3200 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
3201 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO,
3202 .features[FEAT_7_1_EAX] =
3203 CPUID_7_1_EAX_AVX512_BF16,
3204 /*
3205 * Missing: XSAVES (not supported by some Linux versions,
3206 * including v4.1 to v4.12).
3207 * KVM doesn't yet expose any XSAVES state save component,
3208 * and the only one defined in Skylake (processor tracing)
3209 * probably will block migration anyway.
3210 */
3211 .features[FEAT_XSAVE] =
3212 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3213 CPUID_XSAVE_XGETBV1,
3214 .features[FEAT_6_EAX] =
3215 CPUID_6_EAX_ARAT,
3216 .xlevel = 0x80000008,
3217 .model_id = "Intel Xeon Processor (Cooperlake)",
3218 },
3219 {
3220 .name = "Icelake-Client",
3221 .level = 0xd,
3222 .vendor = CPUID_VENDOR_INTEL,
3223 .family = 6,
3224 .model = 126,
3225 .stepping = 0,
3226 .features[FEAT_1_EDX] =
3227 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3228 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3229 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3230 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3231 CPUID_DE | CPUID_FP87,
3232 .features[FEAT_1_ECX] =
3233 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3234 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3235 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3236 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3237 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3238 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3239 .features[FEAT_8000_0001_EDX] =
3240 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
3241 CPUID_EXT2_SYSCALL,
3242 .features[FEAT_8000_0001_ECX] =
3243 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3244 .features[FEAT_8000_0008_EBX] =
3245 CPUID_8000_0008_EBX_WBNOINVD,
3246 .features[FEAT_7_0_EBX] =
3247 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3248 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3249 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3250 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3251 CPUID_7_0_EBX_SMAP,
3252 .features[FEAT_7_0_ECX] =
3253 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3254 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3255 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3256 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3257 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3258 .features[FEAT_7_0_EDX] =
3259 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3260 /* Missing: XSAVES (not supported by some Linux versions,
3261 * including v4.1 to v4.12).
3262 * KVM doesn't yet expose any XSAVES state save component,
3263 * and the only one defined in Skylake (processor tracing)
3264 * probably will block migration anyway.
3265 */
3266 .features[FEAT_XSAVE] =
3267 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3268 CPUID_XSAVE_XGETBV1,
3269 .features[FEAT_6_EAX] =
3270 CPUID_6_EAX_ARAT,
3271 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3272 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3273 MSR_VMX_BASIC_TRUE_CTLS,
3274 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3275 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3276 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3277 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3278 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3279 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3280 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3281 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3282 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3283 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3284 .features[FEAT_VMX_EXIT_CTLS] =
3285 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3286 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3287 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3288 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3289 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3290 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3291 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3292 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3293 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3294 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
3295 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3296 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3297 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3298 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3299 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3300 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3301 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3302 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3303 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3304 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3305 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3306 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3307 .features[FEAT_VMX_SECONDARY_CTLS] =
3308 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3309 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3310 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3311 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3312 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3313 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3314 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3315 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3316 .xlevel = 0x80000008,
3317 .model_id = "Intel Core Processor (Icelake)",
3318 .versions = (X86CPUVersionDefinition[]) {
3319 { .version = 1 },
3320 {
3321 .version = 2,
3322 .alias = "Icelake-Client-noTSX",
3323 .props = (PropValue[]) {
3324 { "hle", "off" },
3325 { "rtm", "off" },
3326 { /* end of list */ }
3327 },
3328 },
3329 { /* end of list */ }
3330 }
3331 },
3332 {
3333 .name = "Icelake-Server",
3334 .level = 0xd,
3335 .vendor = CPUID_VENDOR_INTEL,
3336 .family = 6,
3337 .model = 134,
3338 .stepping = 0,
3339 .features[FEAT_1_EDX] =
3340 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3341 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3342 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3343 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3344 CPUID_DE | CPUID_FP87,
3345 .features[FEAT_1_ECX] =
3346 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3347 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3348 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3349 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3350 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3351 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3352 .features[FEAT_8000_0001_EDX] =
3353 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3354 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3355 .features[FEAT_8000_0001_ECX] =
3356 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3357 .features[FEAT_8000_0008_EBX] =
3358 CPUID_8000_0008_EBX_WBNOINVD,
3359 .features[FEAT_7_0_EBX] =
3360 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3361 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3362 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3363 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3364 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3365 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3366 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3367 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3368 .features[FEAT_7_0_ECX] =
3369 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3370 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3371 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3372 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3373 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
3374 .features[FEAT_7_0_EDX] =
3375 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3376 /* Missing: XSAVES (not supported by some Linux versions,
3377 * including v4.1 to v4.12).
3378 * KVM doesn't yet expose any XSAVES state save component,
3379 * and the only one defined in Skylake (processor tracing)
3380 * probably will block migration anyway.
3381 */
3382 .features[FEAT_XSAVE] =
3383 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3384 CPUID_XSAVE_XGETBV1,
3385 .features[FEAT_6_EAX] =
3386 CPUID_6_EAX_ARAT,
3387 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3388 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3389 MSR_VMX_BASIC_TRUE_CTLS,
3390 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3391 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3392 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3393 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3394 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3395 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3396 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3397 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3398 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3399 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3400 .features[FEAT_VMX_EXIT_CTLS] =
3401 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3402 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3403 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3404 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3405 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3406 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3407 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3408 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3409 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3410 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3411 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3412 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3413 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3414 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3415 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3416 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3417 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3418 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3419 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3420 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3421 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3422 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3423 .features[FEAT_VMX_SECONDARY_CTLS] =
3424 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3425 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3426 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3427 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3428 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3429 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3430 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3431 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3432 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
3433 .xlevel = 0x80000008,
3434 .model_id = "Intel Xeon Processor (Icelake)",
3435 .versions = (X86CPUVersionDefinition[]) {
3436 { .version = 1 },
3437 {
3438 .version = 2,
3439 .alias = "Icelake-Server-noTSX",
3440 .props = (PropValue[]) {
3441 { "hle", "off" },
3442 { "rtm", "off" },
3443 { /* end of list */ }
3444 },
3445 },
3446 { /* end of list */ }
3447 }
3448 },
3449 {
3450 .name = "Denverton",
3451 .level = 21,
3452 .vendor = CPUID_VENDOR_INTEL,
3453 .family = 6,
3454 .model = 95,
3455 .stepping = 1,
3456 .features[FEAT_1_EDX] =
3457 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
3458 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
3459 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3460 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
3461 CPUID_SSE | CPUID_SSE2,
3462 .features[FEAT_1_ECX] =
3463 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3464 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 |
3465 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3466 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER |
3467 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND,
3468 .features[FEAT_8000_0001_EDX] =
3469 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
3470 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
3471 .features[FEAT_8000_0001_ECX] =
3472 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3473 .features[FEAT_7_0_EBX] =
3474 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS |
3475 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP |
3476 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI,
3477 .features[FEAT_7_0_EDX] =
3478 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
3479 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3480 /*
3481 * Missing: XSAVES (not supported by some Linux versions,
3482 * including v4.1 to v4.12).
3483 * KVM doesn't yet expose any XSAVES state save component,
3484 * and the only one defined in Skylake (processor tracing)
3485 * probably will block migration anyway.
3486 */
3487 .features[FEAT_XSAVE] =
3488 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1,
3489 .features[FEAT_6_EAX] =
3490 CPUID_6_EAX_ARAT,
3491 .features[FEAT_ARCH_CAPABILITIES] =
3492 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY,
3493 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3494 MSR_VMX_BASIC_TRUE_CTLS,
3495 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3496 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3497 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3498 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3499 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3500 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3501 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3502 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3503 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3504 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3505 .features[FEAT_VMX_EXIT_CTLS] =
3506 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3507 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3508 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3509 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3510 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3511 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3512 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3513 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3514 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3515 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3516 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3517 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3518 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3519 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3520 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3521 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3522 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3523 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3524 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3525 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3526 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3527 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3528 .features[FEAT_VMX_SECONDARY_CTLS] =
3529 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3530 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3531 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3532 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3533 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3534 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3535 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3536 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3537 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3538 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3539 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3540 .xlevel = 0x80000008,
3541 .model_id = "Intel Atom Processor (Denverton)",
3542 },
3543 {
3544 .name = "Snowridge",
3545 .level = 27,
3546 .vendor = CPUID_VENDOR_INTEL,
3547 .family = 6,
3548 .model = 134,
3549 .stepping = 1,
3550 .features[FEAT_1_EDX] =
3551 /* missing: CPUID_PN CPUID_IA64 */
3552 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
3553 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
3554 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
3555 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
3556 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3557 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
3558 CPUID_MMX |
3559 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
3560 .features[FEAT_1_ECX] =
3561 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3562 CPUID_EXT_SSSE3 |
3563 CPUID_EXT_CX16 |
3564 CPUID_EXT_SSE41 |
3565 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3566 CPUID_EXT_POPCNT |
3567 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
3568 CPUID_EXT_RDRAND,
3569 .features[FEAT_8000_0001_EDX] =
3570 CPUID_EXT2_SYSCALL |
3571 CPUID_EXT2_NX |
3572 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3573 CPUID_EXT2_LM,
3574 .features[FEAT_8000_0001_ECX] =
3575 CPUID_EXT3_LAHF_LM |
3576 CPUID_EXT3_3DNOWPREFETCH,
3577 .features[FEAT_7_0_EBX] =
3578 CPUID_7_0_EBX_FSGSBASE |
3579 CPUID_7_0_EBX_SMEP |
3580 CPUID_7_0_EBX_ERMS |
3581 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
3582 CPUID_7_0_EBX_RDSEED |
3583 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3584 CPUID_7_0_EBX_CLWB |
3585 CPUID_7_0_EBX_SHA_NI,
3586 .features[FEAT_7_0_ECX] =
3587 CPUID_7_0_ECX_UMIP |
3588 /* missing bit 5 */
3589 CPUID_7_0_ECX_GFNI |
3590 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
3591 CPUID_7_0_ECX_MOVDIR64B,
3592 .features[FEAT_7_0_EDX] =
3593 CPUID_7_0_EDX_SPEC_CTRL |
3594 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
3595 CPUID_7_0_EDX_CORE_CAPABILITY,
3596 .features[FEAT_CORE_CAPABILITY] =
3597 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
3598 /*
3599 * Missing: XSAVES (not supported by some Linux versions,
3600 * including v4.1 to v4.12).
3601 * KVM doesn't yet expose any XSAVES state save component,
3602 * and the only one defined in Skylake (processor tracing)
3603 * probably will block migration anyway.
3604 */
3605 .features[FEAT_XSAVE] =
3606 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3607 CPUID_XSAVE_XGETBV1,
3608 .features[FEAT_6_EAX] =
3609 CPUID_6_EAX_ARAT,
3610 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3611 MSR_VMX_BASIC_TRUE_CTLS,
3612 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3613 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3614 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3615 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3616 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3617 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3618 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3619 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3620 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3621 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3622 .features[FEAT_VMX_EXIT_CTLS] =
3623 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3624 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3625 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3626 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3627 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3628 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3629 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3630 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3631 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3632 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3633 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3634 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3635 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3636 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3637 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3638 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3639 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3640 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3641 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3642 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3643 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3644 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3645 .features[FEAT_VMX_SECONDARY_CTLS] =
3646 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3647 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3648 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3649 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3650 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3651 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3652 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3653 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3654 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3655 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3656 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3657 .xlevel = 0x80000008,
3658 .model_id = "Intel Atom Processor (SnowRidge)",
3659 .versions = (X86CPUVersionDefinition[]) {
3660 { .version = 1 },
3661 {
3662 .version = 2,
3663 .props = (PropValue[]) {
3664 { "mpx", "off" },
3665 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
3666 { /* end of list */ },
3667 },
3668 },
3669 { /* end of list */ },
3670 },
3671 },
3672 {
3673 .name = "KnightsMill",
3674 .level = 0xd,
3675 .vendor = CPUID_VENDOR_INTEL,
3676 .family = 6,
3677 .model = 133,
3678 .stepping = 0,
3679 .features[FEAT_1_EDX] =
3680 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
3681 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
3682 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
3683 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
3684 CPUID_PSE | CPUID_DE | CPUID_FP87,
3685 .features[FEAT_1_ECX] =
3686 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3687 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3688 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3689 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3690 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3691 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3692 .features[FEAT_8000_0001_EDX] =
3693 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3694 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3695 .features[FEAT_8000_0001_ECX] =
3696 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3697 .features[FEAT_7_0_EBX] =
3698 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3699 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
3700 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
3701 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
3702 CPUID_7_0_EBX_AVX512ER,
3703 .features[FEAT_7_0_ECX] =
3704 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3705 .features[FEAT_7_0_EDX] =
3706 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
3707 .features[FEAT_XSAVE] =
3708 CPUID_XSAVE_XSAVEOPT,
3709 .features[FEAT_6_EAX] =
3710 CPUID_6_EAX_ARAT,
3711 .xlevel = 0x80000008,
3712 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
3713 },
3714 {
3715 .name = "Opteron_G1",
3716 .level = 5,
3717 .vendor = CPUID_VENDOR_AMD,
3718 .family = 15,
3719 .model = 6,
3720 .stepping = 1,
3721 .features[FEAT_1_EDX] =
3722 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3723 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3724 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3725 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3726 CPUID_DE | CPUID_FP87,
3727 .features[FEAT_1_ECX] =
3728 CPUID_EXT_SSE3,
3729 .features[FEAT_8000_0001_EDX] =
3730 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3731 .xlevel = 0x80000008,
3732 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
3733 },
3734 {
3735 .name = "Opteron_G2",
3736 .level = 5,
3737 .vendor = CPUID_VENDOR_AMD,
3738 .family = 15,
3739 .model = 6,
3740 .stepping = 1,
3741 .features[FEAT_1_EDX] =
3742 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3743 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3744 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3745 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3746 CPUID_DE | CPUID_FP87,
3747 .features[FEAT_1_ECX] =
3748 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
3749 .features[FEAT_8000_0001_EDX] =
3750 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3751 .features[FEAT_8000_0001_ECX] =
3752 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3753 .xlevel = 0x80000008,
3754 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
3755 },
3756 {
3757 .name = "Opteron_G3",
3758 .level = 5,
3759 .vendor = CPUID_VENDOR_AMD,
3760 .family = 16,
3761 .model = 2,
3762 .stepping = 3,
3763 .features[FEAT_1_EDX] =
3764 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3765 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3766 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3767 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3768 CPUID_DE | CPUID_FP87,
3769 .features[FEAT_1_ECX] =
3770 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
3771 CPUID_EXT_SSE3,
3772 .features[FEAT_8000_0001_EDX] =
3773 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
3774 CPUID_EXT2_RDTSCP,
3775 .features[FEAT_8000_0001_ECX] =
3776 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
3777 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3778 .xlevel = 0x80000008,
3779 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
3780 },
3781 {
3782 .name = "Opteron_G4",
3783 .level = 0xd,
3784 .vendor = CPUID_VENDOR_AMD,
3785 .family = 21,
3786 .model = 1,
3787 .stepping = 2,
3788 .features[FEAT_1_EDX] =
3789 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3790 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3791 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3792 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3793 CPUID_DE | CPUID_FP87,
3794 .features[FEAT_1_ECX] =
3795 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3796 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3797 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
3798 CPUID_EXT_SSE3,
3799 .features[FEAT_8000_0001_EDX] =
3800 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3801 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3802 .features[FEAT_8000_0001_ECX] =
3803 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3804 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3805 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3806 CPUID_EXT3_LAHF_LM,
3807 .features[FEAT_SVM] =
3808 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3809 /* no xsaveopt! */
3810 .xlevel = 0x8000001A,
3811 .model_id = "AMD Opteron 62xx class CPU",
3812 },
3813 {
3814 .name = "Opteron_G5",
3815 .level = 0xd,
3816 .vendor = CPUID_VENDOR_AMD,
3817 .family = 21,
3818 .model = 2,
3819 .stepping = 0,
3820 .features[FEAT_1_EDX] =
3821 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3822 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3823 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3824 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3825 CPUID_DE | CPUID_FP87,
3826 .features[FEAT_1_ECX] =
3827 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
3828 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
3829 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
3830 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3831 .features[FEAT_8000_0001_EDX] =
3832 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3833 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3834 .features[FEAT_8000_0001_ECX] =
3835 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3836 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3837 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3838 CPUID_EXT3_LAHF_LM,
3839 .features[FEAT_SVM] =
3840 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3841 /* no xsaveopt! */
3842 .xlevel = 0x8000001A,
3843 .model_id = "AMD Opteron 63xx class CPU",
3844 },
3845 {
3846 .name = "EPYC",
3847 .level = 0xd,
3848 .vendor = CPUID_VENDOR_AMD,
3849 .family = 23,
3850 .model = 1,
3851 .stepping = 2,
3852 .features[FEAT_1_EDX] =
3853 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3854 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3855 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3856 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3857 CPUID_VME | CPUID_FP87,
3858 .features[FEAT_1_ECX] =
3859 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3860 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
3861 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3862 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3863 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3864 .features[FEAT_8000_0001_EDX] =
3865 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3866 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3867 CPUID_EXT2_SYSCALL,
3868 .features[FEAT_8000_0001_ECX] =
3869 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3870 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3871 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3872 CPUID_EXT3_TOPOEXT,
3873 .features[FEAT_7_0_EBX] =
3874 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3875 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3876 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3877 CPUID_7_0_EBX_SHA_NI,
3878 /* Missing: XSAVES (not supported by some Linux versions,
3879 * including v4.1 to v4.12).
3880 * KVM doesn't yet expose any XSAVES state save component.
3881 */
3882 .features[FEAT_XSAVE] =
3883 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3884 CPUID_XSAVE_XGETBV1,
3885 .features[FEAT_6_EAX] =
3886 CPUID_6_EAX_ARAT,
3887 .features[FEAT_SVM] =
3888 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3889 .xlevel = 0x8000001E,
3890 .model_id = "AMD EPYC Processor",
3891 .cache_info = &epyc_cache_info,
3892 .versions = (X86CPUVersionDefinition[]) {
3893 { .version = 1 },
3894 {
3895 .version = 2,
3896 .alias = "EPYC-IBPB",
3897 .props = (PropValue[]) {
3898 { "ibpb", "on" },
3899 { "model-id",
3900 "AMD EPYC Processor (with IBPB)" },
3901 { /* end of list */ }
3902 }
3903 },
3904 { /* end of list */ }
3905 }
3906 },
3907 {
3908 .name = "Dhyana",
3909 .level = 0xd,
3910 .vendor = CPUID_VENDOR_HYGON,
3911 .family = 24,
3912 .model = 0,
3913 .stepping = 1,
3914 .features[FEAT_1_EDX] =
3915 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3916 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3917 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3918 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3919 CPUID_VME | CPUID_FP87,
3920 .features[FEAT_1_ECX] =
3921 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3922 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3923 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3924 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3925 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3926 .features[FEAT_8000_0001_EDX] =
3927 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3928 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3929 CPUID_EXT2_SYSCALL,
3930 .features[FEAT_8000_0001_ECX] =
3931 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3932 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3933 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3934 CPUID_EXT3_TOPOEXT,
3935 .features[FEAT_8000_0008_EBX] =
3936 CPUID_8000_0008_EBX_IBPB,
3937 .features[FEAT_7_0_EBX] =
3938 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3939 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3940 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
3941 /*
3942 * Missing: XSAVES (not supported by some Linux versions,
3943 * including v4.1 to v4.12).
3944 * KVM doesn't yet expose any XSAVES state save component.
3945 */
3946 .features[FEAT_XSAVE] =
3947 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3948 CPUID_XSAVE_XGETBV1,
3949 .features[FEAT_6_EAX] =
3950 CPUID_6_EAX_ARAT,
3951 .features[FEAT_SVM] =
3952 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3953 .xlevel = 0x8000001E,
3954 .model_id = "Hygon Dhyana Processor",
3955 .cache_info = &epyc_cache_info,
3956 },
3957 };
3958
3959 /* KVM-specific features that are automatically added/removed
3960 * from all CPU models when KVM is enabled.
3961 */
3962 static PropValue kvm_default_props[] = {
3963 { "kvmclock", "on" },
3964 { "kvm-nopiodelay", "on" },
3965 { "kvm-asyncpf", "on" },
3966 { "kvm-steal-time", "on" },
3967 { "kvm-pv-eoi", "on" },
3968 { "kvmclock-stable-bit", "on" },
3969 { "x2apic", "on" },
3970 { "acpi", "off" },
3971 { "monitor", "off" },
3972 { "svm", "off" },
3973 { NULL, NULL },
3974 };
3975
3976 /* TCG-specific defaults that override all CPU models when using TCG
3977 */
3978 static PropValue tcg_default_props[] = {
3979 { "vme", "off" },
3980 { NULL, NULL },
3981 };
3982
3983
3984 X86CPUVersion default_cpu_version = CPU_VERSION_LATEST;
3985
3986 void x86_cpu_set_default_version(X86CPUVersion version)
3987 {
3988 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
3989 assert(version != CPU_VERSION_AUTO);
3990 default_cpu_version = version;
3991 }
3992
3993 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
3994 {
3995 int v = 0;
3996 const X86CPUVersionDefinition *vdef =
3997 x86_cpu_def_get_versions(model->cpudef);
3998 while (vdef->version) {
3999 v = vdef->version;
4000 vdef++;
4001 }
4002 return v;
4003 }
4004
4005 /* Return the actual version being used for a specific CPU model */
4006 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
4007 {
4008 X86CPUVersion v = model->version;
4009 if (v == CPU_VERSION_AUTO) {
4010 v = default_cpu_version;
4011 }
4012 if (v == CPU_VERSION_LATEST) {
4013 return x86_cpu_model_last_version(model);
4014 }
4015 return v;
4016 }
4017
4018 void x86_cpu_change_kvm_default(const char *prop, const char *value)
4019 {
4020 PropValue *pv;
4021 for (pv = kvm_default_props; pv->prop; pv++) {
4022 if (!strcmp(pv->prop, prop)) {
4023 pv->value = value;
4024 break;
4025 }
4026 }
4027
4028 /* It is valid to call this function only for properties that
4029 * are already present in the kvm_default_props table.
4030 */
4031 assert(pv->prop);
4032 }
4033
4034 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4035 bool migratable_only);
4036
4037 static bool lmce_supported(void)
4038 {
4039 uint64_t mce_cap = 0;
4040
4041 #ifdef CONFIG_KVM
4042 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
4043 return false;
4044 }
4045 #endif
4046
4047 return !!(mce_cap & MCG_LMCE_P);
4048 }
4049
4050 #define CPUID_MODEL_ID_SZ 48
4051
4052 /**
4053 * cpu_x86_fill_model_id:
4054 * Get CPUID model ID string from host CPU.
4055 *
4056 * @str should have at least CPUID_MODEL_ID_SZ bytes
4057 *
4058 * The function does NOT add a null terminator to the string
4059 * automatically.
4060 */
4061 static int cpu_x86_fill_model_id(char *str)
4062 {
4063 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
4064 int i;
4065
4066 for (i = 0; i < 3; i++) {
4067 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
4068 memcpy(str + i * 16 + 0, &eax, 4);
4069 memcpy(str + i * 16 + 4, &ebx, 4);
4070 memcpy(str + i * 16 + 8, &ecx, 4);
4071 memcpy(str + i * 16 + 12, &edx, 4);
4072 }
4073 return 0;
4074 }
4075
4076 static Property max_x86_cpu_properties[] = {
4077 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
4078 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
4079 DEFINE_PROP_END_OF_LIST()
4080 };
4081
4082 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
4083 {
4084 DeviceClass *dc = DEVICE_CLASS(oc);
4085 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4086
4087 xcc->ordering = 9;
4088
4089 xcc->model_description =
4090 "Enables all features supported by the accelerator in the current host";
4091
4092 dc->props = max_x86_cpu_properties;
4093 }
4094
4095 static void max_x86_cpu_initfn(Object *obj)
4096 {
4097 X86CPU *cpu = X86_CPU(obj);
4098 CPUX86State *env = &cpu->env;
4099 KVMState *s = kvm_state;
4100
4101 /* We can't fill the features array here because we don't know yet if
4102 * "migratable" is true or false.
4103 */
4104 cpu->max_features = true;
4105
4106 if (accel_uses_host_cpuid()) {
4107 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
4108 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
4109 int family, model, stepping;
4110
4111 host_vendor_fms(vendor, &family, &model, &stepping);
4112 cpu_x86_fill_model_id(model_id);
4113
4114 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
4115 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
4116 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
4117 object_property_set_int(OBJECT(cpu), stepping, "stepping",
4118 &error_abort);
4119 object_property_set_str(OBJECT(cpu), model_id, "model-id",
4120 &error_abort);
4121
4122 if (kvm_enabled()) {
4123 env->cpuid_min_level =
4124 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
4125 env->cpuid_min_xlevel =
4126 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
4127 env->cpuid_min_xlevel2 =
4128 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
4129 } else {
4130 env->cpuid_min_level =
4131 hvf_get_supported_cpuid(0x0, 0, R_EAX);
4132 env->cpuid_min_xlevel =
4133 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
4134 env->cpuid_min_xlevel2 =
4135 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
4136 }
4137
4138 if (lmce_supported()) {
4139 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
4140 }
4141 } else {
4142 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
4143 "vendor", &error_abort);
4144 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
4145 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
4146 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
4147 object_property_set_str(OBJECT(cpu),
4148 "QEMU TCG CPU version " QEMU_HW_VERSION,
4149 "model-id", &error_abort);
4150 }
4151
4152 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
4153 }
4154
4155 static const TypeInfo max_x86_cpu_type_info = {
4156 .name = X86_CPU_TYPE_NAME("max"),
4157 .parent = TYPE_X86_CPU,
4158 .instance_init = max_x86_cpu_initfn,
4159 .class_init = max_x86_cpu_class_init,
4160 };
4161
4162 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4163 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
4164 {
4165 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4166
4167 xcc->host_cpuid_required = true;
4168 xcc->ordering = 8;
4169
4170 #if defined(CONFIG_KVM)
4171 xcc->model_description =
4172 "KVM processor with all supported host features ";
4173 #elif defined(CONFIG_HVF)
4174 xcc->model_description =
4175 "HVF processor with all supported host features ";
4176 #endif
4177 }
4178
4179 static const TypeInfo host_x86_cpu_type_info = {
4180 .name = X86_CPU_TYPE_NAME("host"),
4181 .parent = X86_CPU_TYPE_NAME("max"),
4182 .class_init = host_x86_cpu_class_init,
4183 };
4184
4185 #endif
4186
4187 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
4188 {
4189 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
4190
4191 switch (f->type) {
4192 case CPUID_FEATURE_WORD:
4193 {
4194 const char *reg = get_register_name_32(f->cpuid.reg);
4195 assert(reg);
4196 return g_strdup_printf("CPUID.%02XH:%s",
4197 f->cpuid.eax, reg);
4198 }
4199 case MSR_FEATURE_WORD:
4200 return g_strdup_printf("MSR(%02XH)",
4201 f->msr.index);
4202 }
4203
4204 return NULL;
4205 }
4206
4207 static bool x86_cpu_have_filtered_features(X86CPU *cpu)
4208 {
4209 FeatureWord w;
4210
4211 for (w = 0; w < FEATURE_WORDS; w++) {
4212 if (cpu->filtered_features[w]) {
4213 return true;
4214 }
4215 }
4216
4217 return false;
4218 }
4219
4220 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
4221 const char *verbose_prefix)
4222 {
4223 CPUX86State *env = &cpu->env;
4224 FeatureWordInfo *f = &feature_word_info[w];
4225 int i;
4226
4227 if (!cpu->force_features) {
4228 env->features[w] &= ~mask;
4229 }
4230 cpu->filtered_features[w] |= mask;
4231
4232 if (!verbose_prefix) {
4233 return;
4234 }
4235
4236 for (i = 0; i < 64; ++i) {
4237 if ((1ULL << i) & mask) {
4238 g_autofree char *feat_word_str = feature_word_description(f, i);
4239 warn_report("%s: %s%s%s [bit %d]",
4240 verbose_prefix,
4241 feat_word_str,
4242 f->feat_names[i] ? "." : "",
4243 f->feat_names[i] ? f->feat_names[i] : "", i);
4244 }
4245 }
4246 }
4247
4248 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
4249 const char *name, void *opaque,
4250 Error **errp)
4251 {
4252 X86CPU *cpu = X86_CPU(obj);
4253 CPUX86State *env = &cpu->env;
4254 int64_t value;
4255
4256 value = (env->cpuid_version >> 8) & 0xf;
4257 if (value == 0xf) {
4258 value += (env->cpuid_version >> 20) & 0xff;
4259 }
4260 visit_type_int(v, name, &value, errp);
4261 }
4262
4263 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
4264 const char *name, void *opaque,
4265 Error **errp)
4266 {
4267 X86CPU *cpu = X86_CPU(obj);
4268 CPUX86State *env = &cpu->env;
4269 const int64_t min = 0;
4270 const int64_t max = 0xff + 0xf;
4271 Error *local_err = NULL;
4272 int64_t value;
4273
4274 visit_type_int(v, name, &value, &local_err);
4275 if (local_err) {
4276 error_propagate(errp, local_err);
4277 return;
4278 }
4279 if (value < min || value > max) {
4280 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4281 name ? name : "null", value, min, max);
4282 return;
4283 }
4284
4285 env->cpuid_version &= ~0xff00f00;
4286 if (value > 0x0f) {
4287 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
4288 } else {
4289 env->cpuid_version |= value << 8;
4290 }
4291 }
4292
4293 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
4294 const char *name, void *opaque,
4295 Error **errp)
4296 {
4297 X86CPU *cpu = X86_CPU(obj);
4298 CPUX86State *env = &cpu->env;
4299 int64_t value;
4300
4301 value = (env->cpuid_version >> 4) & 0xf;
4302 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
4303 visit_type_int(v, name, &value, errp);
4304 }
4305
4306 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
4307 const char *name, void *opaque,
4308 Error **errp)
4309 {
4310 X86CPU *cpu = X86_CPU(obj);
4311 CPUX86State *env = &cpu->env;
4312 const int64_t min = 0;
4313 const int64_t max = 0xff;
4314 Error *local_err = NULL;
4315 int64_t value;
4316
4317 visit_type_int(v, name, &value, &local_err);
4318 if (local_err) {
4319 error_propagate(errp, local_err);
4320 return;
4321 }
4322 if (value < min || value > max) {
4323 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4324 name ? name : "null", value, min, max);
4325 return;
4326 }
4327
4328 env->cpuid_version &= ~0xf00f0;
4329 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
4330 }
4331
4332 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
4333 const char *name, void *opaque,
4334 Error **errp)
4335 {
4336 X86CPU *cpu = X86_CPU(obj);
4337 CPUX86State *env = &cpu->env;
4338 int64_t value;
4339
4340 value = env->cpuid_version & 0xf;
4341 visit_type_int(v, name, &value, errp);
4342 }
4343
4344 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
4345 const char *name, void *opaque,
4346 Error **errp)
4347 {
4348 X86CPU *cpu = X86_CPU(obj);
4349 CPUX86State *env = &cpu->env;
4350 const int64_t min = 0;
4351 const int64_t max = 0xf;
4352 Error *local_err = NULL;
4353 int64_t value;
4354
4355 visit_type_int(v, name, &value, &local_err);
4356 if (local_err) {
4357 error_propagate(errp, local_err);
4358 return;
4359 }
4360 if (value < min || value > max) {
4361 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4362 name ? name : "null", value, min, max);
4363 return;
4364 }
4365
4366 env->cpuid_version &= ~0xf;
4367 env->cpuid_version |= value & 0xf;
4368 }
4369
4370 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
4371 {
4372 X86CPU *cpu = X86_CPU(obj);
4373 CPUX86State *env = &cpu->env;
4374 char *value;
4375
4376 value = g_malloc(CPUID_VENDOR_SZ + 1);
4377 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
4378 env->cpuid_vendor3);
4379 return value;
4380 }
4381
4382 static void x86_cpuid_set_vendor(Object *obj, const char *value,
4383 Error **errp)
4384 {
4385 X86CPU *cpu = X86_CPU(obj);
4386 CPUX86State *env = &cpu->env;
4387 int i;
4388
4389 if (strlen(value) != CPUID_VENDOR_SZ) {
4390 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
4391 return;
4392 }
4393
4394 env->cpuid_vendor1 = 0;
4395 env->cpuid_vendor2 = 0;
4396 env->cpuid_vendor3 = 0;
4397 for (i = 0; i < 4; i++) {
4398 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
4399 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
4400 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
4401 }
4402 }
4403
4404 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
4405 {
4406 X86CPU *cpu = X86_CPU(obj);
4407 CPUX86State *env = &cpu->env;
4408 char *value;
4409 int i;
4410
4411 value = g_malloc(48 + 1);
4412 for (i = 0; i < 48; i++) {
4413 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
4414 }
4415 value[48] = '\0';
4416 return value;
4417 }
4418
4419 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
4420 Error **errp)
4421 {
4422 X86CPU *cpu = X86_CPU(obj);
4423 CPUX86State *env = &cpu->env;
4424 int c, len, i;
4425
4426 if (model_id == NULL) {
4427 model_id = "";
4428 }
4429 len = strlen(model_id);
4430 memset(env->cpuid_model, 0, 48);
4431 for (i = 0; i < 48; i++) {
4432 if (i >= len) {
4433 c = '\0';
4434 } else {
4435 c = (uint8_t)model_id[i];
4436 }
4437 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
4438 }
4439 }
4440
4441 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
4442 void *opaque, Error **errp)
4443 {
4444 X86CPU *cpu = X86_CPU(obj);
4445 int64_t value;
4446
4447 value = cpu->env.tsc_khz * 1000;
4448 visit_type_int(v, name, &value, errp);
4449 }
4450
4451 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
4452 void *opaque, Error **errp)
4453 {
4454 X86CPU *cpu = X86_CPU(obj);
4455 const int64_t min = 0;
4456 const int64_t max = INT64_MAX;
4457 Error *local_err = NULL;
4458 int64_t value;
4459
4460 visit_type_int(v, name, &value, &local_err);
4461 if (local_err) {
4462 error_propagate(errp, local_err);
4463 return;
4464 }
4465 if (value < min || value > max) {
4466 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4467 name ? name : "null", value, min, max);
4468 return;
4469 }
4470
4471 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
4472 }
4473
4474 /* Generic getter for "feature-words" and "filtered-features" properties */
4475 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
4476 const char *name, void *opaque,
4477 Error **errp)
4478 {
4479 uint64_t *array = (uint64_t *)opaque;
4480 FeatureWord w;
4481 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
4482 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
4483 X86CPUFeatureWordInfoList *list = NULL;
4484
4485 for (w = 0; w < FEATURE_WORDS; w++) {
4486 FeatureWordInfo *wi = &feature_word_info[w];
4487 /*
4488 * We didn't have MSR features when "feature-words" was
4489 * introduced. Therefore skipped other type entries.
4490 */
4491 if (wi->type != CPUID_FEATURE_WORD) {
4492 continue;
4493 }
4494 X86CPUFeatureWordInfo *qwi = &word_infos[w];
4495 qwi->cpuid_input_eax = wi->cpuid.eax;
4496 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
4497 qwi->cpuid_input_ecx = wi->cpuid.ecx;
4498 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
4499 qwi->features = array[w];
4500
4501 /* List will be in reverse order, but order shouldn't matter */
4502 list_entries[w].next = list;
4503 list_entries[w].value = &word_infos[w];
4504 list = &list_entries[w];
4505 }
4506
4507 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
4508 }
4509
4510 /* Convert all '_' in a feature string option name to '-', to make feature
4511 * name conform to QOM property naming rule, which uses '-' instead of '_'.
4512 */
4513 static inline void feat2prop(char *s)
4514 {
4515 while ((s = strchr(s, '_'))) {
4516 *s = '-';
4517 }
4518 }
4519
4520 /* Return the feature property name for a feature flag bit */
4521 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
4522 {
4523 const char *name;
4524 /* XSAVE components are automatically enabled by other features,
4525 * so return the original feature name instead
4526 */
4527 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
4528 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
4529
4530 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
4531 x86_ext_save_areas[comp].bits) {
4532 w = x86_ext_save_areas[comp].feature;
4533 bitnr = ctz32(x86_ext_save_areas[comp].bits);
4534 }
4535 }
4536
4537 assert(bitnr < 64);
4538 assert(w < FEATURE_WORDS);
4539 name = feature_word_info[w].feat_names[bitnr];
4540 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
4541 return name;
4542 }
4543
4544 /* Compatibily hack to maintain legacy +-feat semantic,
4545 * where +-feat overwrites any feature set by
4546 * feat=on|feat even if the later is parsed after +-feat
4547 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
4548 */
4549 static GList *plus_features, *minus_features;
4550
4551 static gint compare_string(gconstpointer a, gconstpointer b)
4552 {
4553 return g_strcmp0(a, b);
4554 }
4555
4556 /* Parse "+feature,-feature,feature=foo" CPU feature string
4557 */
4558 static void x86_cpu_parse_featurestr(const char *typename, char *features,
4559 Error **errp)
4560 {
4561 char *featurestr; /* Single 'key=value" string being parsed */
4562 static bool cpu_globals_initialized;
4563 bool ambiguous = false;
4564
4565 if (cpu_globals_initialized) {
4566 return;
4567 }
4568 cpu_globals_initialized = true;
4569
4570 if (!features) {
4571 return;
4572 }
4573
4574 for (featurestr = strtok(features, ",");
4575 featurestr;
4576 featurestr = strtok(NULL, ",")) {
4577 const char *name;
4578 const char *val = NULL;
4579 char *eq = NULL;
4580 char num[32];
4581 GlobalProperty *prop;
4582
4583 /* Compatibility syntax: */
4584 if (featurestr[0] == '+') {
4585 plus_features = g_list_append(plus_features,
4586 g_strdup(featurestr + 1));
4587 continue;
4588 } else if (featurestr[0] == '-') {
4589 minus_features = g_list_append(minus_features,
4590 g_strdup(featurestr + 1));
4591 continue;
4592 }
4593
4594 eq = strchr(featurestr, '=');
4595 if (eq) {
4596 *eq++ = 0;
4597 val = eq;
4598 } else {
4599 val = "on";
4600 }
4601
4602 feat2prop(featurestr);
4603 name = featurestr;
4604
4605 if (g_list_find_custom(plus_features, name, compare_string)) {
4606 warn_report("Ambiguous CPU model string. "
4607 "Don't mix both \"+%s\" and \"%s=%s\"",
4608 name, name, val);
4609 ambiguous = true;
4610 }
4611 if (g_list_find_custom(minus_features, name, compare_string)) {
4612 warn_report("Ambiguous CPU model string. "
4613 "Don't mix both \"-%s\" and \"%s=%s\"",
4614 name, name, val);
4615 ambiguous = true;
4616 }
4617
4618 /* Special case: */
4619 if (!strcmp(name, "tsc-freq")) {
4620 int ret;
4621 uint64_t tsc_freq;
4622
4623 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
4624 if (ret < 0 || tsc_freq > INT64_MAX) {
4625 error_setg(errp, "bad numerical value %s", val);
4626 return;
4627 }
4628 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
4629 val = num;
4630 name = "tsc-frequency";
4631 }
4632
4633 prop = g_new0(typeof(*prop), 1);
4634 prop->driver = typename;
4635 prop->property = g_strdup(name);
4636 prop->value = g_strdup(val);
4637 qdev_prop_register_global(prop);
4638 }
4639
4640 if (ambiguous) {
4641 warn_report("Compatibility of ambiguous CPU model "
4642 "strings won't be kept on future QEMU versions");
4643 }
4644 }
4645
4646 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
4647 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
4648
4649 /* Build a list with the name of all features on a feature word array */
4650 static void x86_cpu_list_feature_names(FeatureWordArray features,
4651 strList **feat_names)
4652 {
4653 FeatureWord w;
4654 strList **next = feat_names;
4655
4656 for (w = 0; w < FEATURE_WORDS; w++) {
4657 uint64_t filtered = features[w];
4658 int i;
4659 for (i = 0; i < 64; i++) {
4660 if (filtered & (1ULL << i)) {
4661 strList *new = g_new0(strList, 1);
4662 new->value = g_strdup(x86_cpu_feature_name(w, i));
4663 *next = new;
4664 next = &new->next;
4665 }
4666 }
4667 }
4668 }
4669
4670 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
4671 const char *name, void *opaque,
4672 Error **errp)
4673 {
4674 X86CPU *xc = X86_CPU(obj);
4675 strList *result = NULL;
4676
4677 x86_cpu_list_feature_names(xc->filtered_features, &result);
4678 visit_type_strList(v, "unavailable-features", &result, errp);
4679 }
4680
4681 /* Check for missing features that may prevent the CPU class from
4682 * running using the current machine and accelerator.
4683 */
4684 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
4685 strList **missing_feats)
4686 {
4687 X86CPU *xc;
4688 Error *err = NULL;
4689 strList **next = missing_feats;
4690
4691 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4692 strList *new = g_new0(strList, 1);
4693 new->value = g_strdup("kvm");
4694 *missing_feats = new;
4695 return;
4696 }
4697
4698 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
4699
4700 x86_cpu_expand_features(xc, &err);
4701 if (err) {
4702 /* Errors at x86_cpu_expand_features should never happen,
4703 * but in case it does, just report the model as not
4704 * runnable at all using the "type" property.
4705 */
4706 strList *new = g_new0(strList, 1);
4707 new->value = g_strdup("type");
4708 *next = new;
4709 next = &new->next;
4710 }
4711
4712 x86_cpu_filter_features(xc, false);
4713
4714 x86_cpu_list_feature_names(xc->filtered_features, next);
4715
4716 object_unref(OBJECT(xc));
4717 }
4718
4719 /* Print all cpuid feature names in featureset
4720 */
4721 static void listflags(GList *features)
4722 {
4723 size_t len = 0;
4724 GList *tmp;
4725
4726 for (tmp = features; tmp; tmp = tmp->next) {
4727 const char *name = tmp->data;
4728 if ((len + strlen(name) + 1) >= 75) {
4729 qemu_printf("\n");
4730 len = 0;
4731 }
4732 qemu_printf("%s%s", len == 0 ? " " : " ", name);
4733 len += strlen(name) + 1;
4734 }
4735 qemu_printf("\n");
4736 }
4737
4738 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
4739 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
4740 {
4741 ObjectClass *class_a = (ObjectClass *)a;
4742 ObjectClass *class_b = (ObjectClass *)b;
4743 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
4744 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
4745 int ret;
4746
4747 if (cc_a->ordering != cc_b->ordering) {
4748 ret = cc_a->ordering - cc_b->ordering;
4749 } else {
4750 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a);
4751 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b);
4752 ret = strcmp(name_a, name_b);
4753 }
4754 return ret;
4755 }
4756
4757 static GSList *get_sorted_cpu_model_list(void)
4758 {
4759 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
4760 list = g_slist_sort(list, x86_cpu_list_compare);
4761 return list;
4762 }
4763
4764 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
4765 {
4766 Object *obj = object_new_with_class(OBJECT_CLASS(xc));
4767 char *r = object_property_get_str(obj, "model-id", &error_abort);
4768 object_unref(obj);
4769 return r;
4770 }
4771
4772 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
4773 {
4774 X86CPUVersion version;
4775
4776 if (!cc->model || !cc->model->is_alias) {
4777 return NULL;
4778 }
4779 version = x86_cpu_model_resolve_version(cc->model);
4780 if (version <= 0) {
4781 return NULL;
4782 }
4783 return x86_cpu_versioned_model_name(cc->model->cpudef, version);
4784 }
4785
4786 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
4787 {
4788 ObjectClass *oc = data;
4789 X86CPUClass *cc = X86_CPU_CLASS(oc);
4790 g_autofree char *name = x86_cpu_class_get_model_name(cc);
4791 g_autofree char *desc = g_strdup(cc->model_description);
4792 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc);
4793
4794 if (!desc && alias_of) {
4795 if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
4796 desc = g_strdup("(alias configured by machine type)");
4797 } else {
4798 desc = g_strdup_printf("(alias of %s)", alias_of);
4799 }
4800 }
4801 if (!desc) {
4802 desc = x86_cpu_class_get_model_id(cc);
4803 }
4804
4805 qemu_printf("x86 %-20s %-48s\n", name, desc);
4806 }
4807
4808 /* list available CPU models and flags */
4809 void x86_cpu_list(void)
4810 {
4811 int i, j;
4812 GSList *list;
4813 GList *names = NULL;
4814
4815 qemu_printf("Available CPUs:\n");
4816 list = get_sorted_cpu_model_list();
4817 g_slist_foreach(list, x86_cpu_list_entry, NULL);
4818 g_slist_free(list);
4819
4820 names = NULL;
4821 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
4822 FeatureWordInfo *fw = &feature_word_info[i];
4823 for (j = 0; j < 64; j++) {
4824 if (fw->feat_names[j]) {
4825 names = g_list_append(names, (gpointer)fw->feat_names[j]);
4826 }
4827 }
4828 }
4829
4830 names = g_list_sort(names, (GCompareFunc)strcmp);
4831
4832 qemu_printf("\nRecognized CPUID flags:\n");
4833 listflags(names);
4834 qemu_printf("\n");
4835 g_list_free(names);
4836 }
4837
4838 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
4839 {
4840 ObjectClass *oc = data;
4841 X86CPUClass *cc = X86_CPU_CLASS(oc);
4842 CpuDefinitionInfoList **cpu_list = user_data;
4843 CpuDefinitionInfoList *entry;
4844 CpuDefinitionInfo *info;
4845
4846 info = g_malloc0(sizeof(*info));
4847 info->name = x86_cpu_class_get_model_name(cc);
4848 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
4849 info->has_unavailable_features = true;
4850 info->q_typename = g_strdup(object_class_get_name(oc));
4851 info->migration_safe = cc->migration_safe;
4852 info->has_migration_safe = true;
4853 info->q_static = cc->static_model;
4854 /*
4855 * Old machine types won't report aliases, so that alias translation
4856 * doesn't break compatibility with previous QEMU versions.
4857 */
4858 if (default_cpu_version != CPU_VERSION_LEGACY) {
4859 info->alias_of = x86_cpu_class_get_alias_of(cc);
4860 info->has_alias_of = !!info->alias_of;
4861 }
4862
4863 entry = g_malloc0(sizeof(*entry));
4864 entry->value = info;
4865 entry->next = *cpu_list;
4866 *cpu_list = entry;
4867 }
4868
4869 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
4870 {
4871 CpuDefinitionInfoList *cpu_list = NULL;
4872 GSList *list = get_sorted_cpu_model_list();
4873 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
4874 g_slist_free(list);
4875 return cpu_list;
4876 }
4877
4878 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4879 bool migratable_only)
4880 {
4881 FeatureWordInfo *wi = &feature_word_info[w];
4882 uint64_t r = 0;
4883
4884 if (kvm_enabled()) {
4885 switch (wi->type) {
4886 case CPUID_FEATURE_WORD:
4887 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
4888 wi->cpuid.ecx,
4889 wi->cpuid.reg);
4890 break;
4891 case MSR_FEATURE_WORD:
4892 r = kvm_arch_get_supported_msr_feature(kvm_state,
4893 wi->msr.index);
4894 break;
4895 }
4896 } else if (hvf_enabled()) {
4897 if (wi->type != CPUID_FEATURE_WORD) {
4898 return 0;
4899 }
4900 r = hvf_get_supported_cpuid(wi->cpuid.eax,
4901 wi->cpuid.ecx,
4902 wi->cpuid.reg);
4903 } else if (tcg_enabled()) {
4904 r = wi->tcg_features;
4905 } else {
4906 return ~0;
4907 }
4908 if (migratable_only) {
4909 r &= x86_cpu_get_migratable_flags(w);
4910 }
4911 return r;
4912 }
4913
4914 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
4915 {
4916 PropValue *pv;
4917 for (pv = props; pv->prop; pv++) {
4918 if (!pv->value) {
4919 continue;
4920 }
4921 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
4922 &error_abort);
4923 }
4924 }
4925
4926 /* Apply properties for the CPU model version specified in model */
4927 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
4928 {
4929 const X86CPUVersionDefinition *vdef;
4930 X86CPUVersion version = x86_cpu_model_resolve_version(model);
4931
4932 if (version == CPU_VERSION_LEGACY) {
4933 return;
4934 }
4935
4936 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
4937 PropValue *p;
4938
4939 for (p = vdef->props; p && p->prop; p++) {
4940 object_property_parse(OBJECT(cpu), p->value, p->prop,
4941 &error_abort);
4942 }
4943
4944 if (vdef->version == version) {
4945 break;
4946 }
4947 }
4948
4949 /*
4950 * If we reached the end of the list, version number was invalid
4951 */
4952 assert(vdef->version == version);
4953 }
4954
4955 /* Load data from X86CPUDefinition into a X86CPU object
4956 */
4957 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
4958 {
4959 X86CPUDefinition *def = model->cpudef;
4960 CPUX86State *env = &cpu->env;
4961 const char *vendor;
4962 char host_vendor[CPUID_VENDOR_SZ + 1];
4963 FeatureWord w;
4964
4965 /*NOTE: any property set by this function should be returned by
4966 * x86_cpu_static_props(), so static expansion of
4967 * query-cpu-model-expansion is always complete.
4968 */
4969
4970 /* CPU models only set _minimum_ values for level/xlevel: */
4971 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
4972 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
4973
4974 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
4975 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
4976 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
4977 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
4978 for (w = 0; w < FEATURE_WORDS; w++) {
4979 env->features[w] = def->features[w];
4980 }
4981
4982 /* legacy-cache defaults to 'off' if CPU model provides cache info */
4983 cpu->legacy_cache = !def->cache_info;
4984
4985 /* Special cases not set in the X86CPUDefinition structs: */
4986 /* TODO: in-kernel irqchip for hvf */
4987 if (kvm_enabled()) {
4988 if (!kvm_irqchip_in_kernel()) {
4989 x86_cpu_change_kvm_default("x2apic", "off");
4990 }
4991
4992 x86_cpu_apply_props(cpu, kvm_default_props);
4993 } else if (tcg_enabled()) {
4994 x86_cpu_apply_props(cpu, tcg_default_props);
4995 }
4996
4997 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
4998
4999 /* sysenter isn't supported in compatibility mode on AMD,
5000 * syscall isn't supported in compatibility mode on Intel.
5001 * Normally we advertise the actual CPU vendor, but you can
5002 * override this using the 'vendor' property if you want to use
5003 * KVM's sysenter/syscall emulation in compatibility mode and
5004 * when doing cross vendor migration
5005 */
5006 vendor = def->vendor;
5007 if (accel_uses_host_cpuid()) {
5008 uint32_t ebx = 0, ecx = 0, edx = 0;
5009 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
5010 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
5011 vendor = host_vendor;
5012 }
5013
5014 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
5015
5016 x86_cpu_apply_version_props(cpu, model);
5017 }
5018
5019 #ifndef CONFIG_USER_ONLY
5020 /* Return a QDict containing keys for all properties that can be included
5021 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
5022 * must be included in the dictionary.
5023 */
5024 static QDict *x86_cpu_static_props(void)
5025 {
5026 FeatureWord w;
5027 int i;
5028 static const char *props[] = {
5029 "min-level",
5030 "min-xlevel",
5031 "family",
5032 "model",
5033 "stepping",
5034 "model-id",
5035 "vendor",
5036 "lmce",
5037 NULL,
5038 };
5039 static QDict *d;
5040
5041 if (d) {
5042 return d;
5043 }
5044
5045 d = qdict_new();
5046 for (i = 0; props[i]; i++) {
5047 qdict_put_null(d, props[i]);
5048 }
5049
5050 for (w = 0; w < FEATURE_WORDS; w++) {
5051 FeatureWordInfo *fi = &feature_word_info[w];
5052 int bit;
5053 for (bit = 0; bit < 64; bit++) {
5054 if (!fi->feat_names[bit]) {
5055 continue;
5056 }
5057 qdict_put_null(d, fi->feat_names[bit]);
5058 }
5059 }
5060
5061 return d;
5062 }
5063
5064 /* Add an entry to @props dict, with the value for property. */
5065 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
5066 {
5067 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
5068 &error_abort);
5069
5070 qdict_put_obj(props, prop, value);
5071 }
5072
5073 /* Convert CPU model data from X86CPU object to a property dictionary
5074 * that can recreate exactly the same CPU model.
5075 */
5076 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
5077 {
5078 QDict *sprops = x86_cpu_static_props();
5079 const QDictEntry *e;
5080
5081 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
5082 const char *prop = qdict_entry_key(e);
5083 x86_cpu_expand_prop(cpu, props, prop);
5084 }
5085 }
5086
5087 /* Convert CPU model data from X86CPU object to a property dictionary
5088 * that can recreate exactly the same CPU model, including every
5089 * writeable QOM property.
5090 */
5091 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
5092 {
5093 ObjectPropertyIterator iter;
5094 ObjectProperty *prop;
5095
5096 object_property_iter_init(&iter, OBJECT(cpu));
5097 while ((prop = object_property_iter_next(&iter))) {
5098 /* skip read-only or write-only properties */
5099 if (!prop->get || !prop->set) {
5100 continue;
5101 }
5102
5103 /* "hotplugged" is the only property that is configurable
5104 * on the command-line but will be set differently on CPUs
5105 * created using "-cpu ... -smp ..." and by CPUs created
5106 * on the fly by x86_cpu_from_model() for querying. Skip it.
5107 */
5108 if (!strcmp(prop->name, "hotplugged")) {
5109 continue;
5110 }
5111 x86_cpu_expand_prop(cpu, props, prop->name);
5112 }
5113 }
5114
5115 static void object_apply_props(Object *obj, QDict *props, Error **errp)
5116 {
5117 const QDictEntry *prop;
5118 Error *err = NULL;
5119
5120 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
5121 object_property_set_qobject(obj, qdict_entry_value(prop),
5122 qdict_entry_key(prop), &err);
5123 if (err) {
5124 break;
5125 }
5126 }
5127
5128 error_propagate(errp, err);
5129 }
5130
5131 /* Create X86CPU object according to model+props specification */
5132 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
5133 {
5134 X86CPU *xc = NULL;
5135 X86CPUClass *xcc;
5136 Error *err = NULL;
5137
5138 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
5139 if (xcc == NULL) {
5140 error_setg(&err, "CPU model '%s' not found", model);
5141 goto out;
5142 }
5143
5144 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
5145 if (props) {
5146 object_apply_props(OBJECT(xc), props, &err);
5147 if (err) {
5148 goto out;
5149 }
5150 }
5151
5152 x86_cpu_expand_features(xc, &err);
5153 if (err) {
5154 goto out;
5155 }
5156
5157 out:
5158 if (err) {
5159 error_propagate(errp, err);
5160 object_unref(OBJECT(xc));
5161 xc = NULL;
5162 }
5163 return xc;
5164 }
5165
5166 CpuModelExpansionInfo *
5167 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
5168 CpuModelInfo *model,
5169 Error **errp)
5170 {
5171 X86CPU *xc = NULL;
5172 Error *err = NULL;
5173 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
5174 QDict *props = NULL;
5175 const char *base_name;
5176
5177 xc = x86_cpu_from_model(model->name,
5178 model->has_props ?
5179 qobject_to(QDict, model->props) :
5180 NULL, &err);
5181 if (err) {
5182 goto out;
5183 }
5184
5185 props = qdict_new();
5186 ret->model = g_new0(CpuModelInfo, 1);
5187 ret->model->props = QOBJECT(props);
5188 ret->model->has_props = true;
5189
5190 switch (type) {
5191 case CPU_MODEL_EXPANSION_TYPE_STATIC:
5192 /* Static expansion will be based on "base" only */
5193 base_name = "base";
5194 x86_cpu_to_dict(xc, props);
5195 break;
5196 case CPU_MODEL_EXPANSION_TYPE_FULL:
5197 /* As we don't return every single property, full expansion needs
5198 * to keep the original model name+props, and add extra
5199 * properties on top of that.
5200 */
5201 base_name = model->name;
5202 x86_cpu_to_dict_full(xc, props);
5203 break;
5204 default:
5205 error_setg(&err, "Unsupported expansion type");
5206 goto out;
5207 }
5208
5209 x86_cpu_to_dict(xc, props);
5210
5211 ret->model->name = g_strdup(base_name);
5212
5213 out:
5214 object_unref(OBJECT(xc));
5215 if (err) {
5216 error_propagate(errp, err);
5217 qapi_free_CpuModelExpansionInfo(ret);
5218 ret = NULL;
5219 }
5220 return ret;
5221 }
5222 #endif /* !CONFIG_USER_ONLY */
5223
5224 static gchar *x86_gdb_arch_name(CPUState *cs)
5225 {
5226 #ifdef TARGET_X86_64
5227 return g_strdup("i386:x86-64");
5228 #else
5229 return g_strdup("i386");
5230 #endif
5231 }
5232
5233 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
5234 {
5235 X86CPUModel *model = data;
5236 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5237
5238 xcc->model = model;
5239 xcc->migration_safe = true;
5240 }
5241
5242 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
5243 {
5244 g_autofree char *typename = x86_cpu_type_name(name);
5245 TypeInfo ti = {
5246 .name = typename,
5247 .parent = TYPE_X86_CPU,
5248 .class_init = x86_cpu_cpudef_class_init,
5249 .class_data = model,
5250 };
5251
5252 type_register(&ti);
5253 }
5254
5255 static void x86_register_cpudef_types(X86CPUDefinition *def)
5256 {
5257 X86CPUModel *m;
5258 const X86CPUVersionDefinition *vdef;
5259
5260 /* AMD aliases are handled at runtime based on CPUID vendor, so
5261 * they shouldn't be set on the CPU model table.
5262 */
5263 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
5264 /* catch mistakes instead of silently truncating model_id when too long */
5265 assert(def->model_id && strlen(def->model_id) <= 48);
5266
5267 /* Unversioned model: */
5268 m = g_new0(X86CPUModel, 1);
5269 m->cpudef = def;
5270 m->version = CPU_VERSION_AUTO;
5271 m->is_alias = true;
5272 x86_register_cpu_model_type(def->name, m);
5273
5274 /* Versioned models: */
5275
5276 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
5277 X86CPUModel *m = g_new0(X86CPUModel, 1);
5278 g_autofree char *name =
5279 x86_cpu_versioned_model_name(def, vdef->version);
5280 m->cpudef = def;
5281 m->version = vdef->version;
5282 x86_register_cpu_model_type(name, m);
5283
5284 if (vdef->alias) {
5285 X86CPUModel *am = g_new0(X86CPUModel, 1);
5286 am->cpudef = def;
5287 am->version = vdef->version;
5288 am->is_alias = true;
5289 x86_register_cpu_model_type(vdef->alias, am);
5290 }
5291 }
5292
5293 }
5294
5295 #if !defined(CONFIG_USER_ONLY)
5296
5297 void cpu_clear_apic_feature(CPUX86State *env)
5298 {
5299 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
5300 }
5301
5302 #endif /* !CONFIG_USER_ONLY */
5303
5304 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
5305 uint32_t *eax, uint32_t *ebx,
5306 uint32_t *ecx, uint32_t *edx)
5307 {
5308 X86CPU *cpu = env_archcpu(env);
5309 CPUState *cs = env_cpu(env);
5310 uint32_t die_offset;
5311 uint32_t limit;
5312 uint32_t signature[3];
5313
5314 /* Calculate & apply limits for different index ranges */
5315 if (index >= 0xC0000000) {
5316 limit = env->cpuid_xlevel2;
5317 } else if (index >= 0x80000000) {
5318 limit = env->cpuid_xlevel;
5319 } else if (index >= 0x40000000) {
5320 limit = 0x40000001;
5321 } else {
5322 limit = env->cpuid_level;
5323 }
5324
5325 if (index > limit) {
5326 /* Intel documentation states that invalid EAX input will
5327 * return the same information as EAX=cpuid_level
5328 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
5329 */
5330 index = env->cpuid_level;
5331 }
5332
5333 switch(index) {
5334 case 0:
5335 *eax = env->cpuid_level;
5336 *ebx = env->cpuid_vendor1;
5337 *edx = env->cpuid_vendor2;
5338 *ecx = env->cpuid_vendor3;
5339 break;
5340 case 1:
5341 *eax = env->cpuid_version;
5342 *ebx = (cpu->apic_id << 24) |
5343 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
5344 *ecx = env->features[FEAT_1_ECX];
5345 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
5346 *ecx |= CPUID_EXT_OSXSAVE;
5347 }
5348 *edx = env->features[FEAT_1_EDX];
5349 if (cs->nr_cores * cs->nr_threads > 1) {
5350 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
5351 *edx |= CPUID_HT;
5352 }
5353 break;
5354 case 2:
5355 /* cache info: needed for Pentium Pro compatibility */
5356 if (cpu->cache_info_passthrough) {
5357 host_cpuid(index, 0, eax, ebx, ecx, edx);
5358 break;
5359 }
5360 *eax = 1; /* Number of CPUID[EAX=2] calls required */
5361 *ebx = 0;
5362 if (!cpu->enable_l3_cache) {
5363 *ecx = 0;
5364 } else {
5365 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
5366 }
5367 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
5368 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
5369 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
5370 break;
5371 case 4:
5372 /* cache info: needed for Core compatibility */
5373 if (cpu->cache_info_passthrough) {
5374 host_cpuid(index, count, eax, ebx, ecx, edx);
5375 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
5376 *eax &= ~0xFC000000;
5377 if ((*eax & 31) && cs->nr_cores > 1) {
5378 *eax |= (cs->nr_cores - 1) << 26;
5379 }
5380 } else {
5381 *eax = 0;
5382 switch (count) {
5383 case 0: /* L1 dcache info */
5384 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
5385 1, cs->nr_cores,
5386 eax, ebx, ecx, edx);
5387 break;
5388 case 1: /* L1 icache info */
5389 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
5390 1, cs->nr_cores,
5391 eax, ebx, ecx, edx);
5392 break;
5393 case 2: /* L2 cache info */
5394 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
5395 cs->nr_threads, cs->nr_cores,
5396 eax, ebx, ecx, edx);
5397 break;
5398 case 3: /* L3 cache info */
5399 die_offset = apicid_die_offset(env->nr_dies,
5400 cs->nr_cores, cs->nr_threads);
5401 if (cpu->enable_l3_cache) {
5402 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
5403 (1 << die_offset), cs->nr_cores,
5404 eax, ebx, ecx, edx);
5405 break;
5406 }
5407 /* fall through */
5408 default: /* end of info */
5409 *eax = *ebx = *ecx = *edx = 0;
5410 break;
5411 }
5412 }
5413 break;
5414 case 5:
5415 /* MONITOR/MWAIT Leaf */
5416 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
5417 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
5418 *ecx = cpu->mwait.ecx; /* flags */
5419 *edx = cpu->mwait.edx; /* mwait substates */
5420 break;
5421 case 6:
5422 /* Thermal and Power Leaf */
5423 *eax = env->features[FEAT_6_EAX];
5424 *ebx = 0;
5425 *ecx = 0;
5426 *edx = 0;
5427 break;
5428 case 7:
5429 /* Structured Extended Feature Flags Enumeration Leaf */
5430 if (count == 0) {
5431 /* Maximum ECX value for sub-leaves */
5432 *eax = env->cpuid_level_func7;
5433 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
5434 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
5435 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
5436 *ecx |= CPUID_7_0_ECX_OSPKE;
5437 }
5438 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
5439 } else if (count == 1) {
5440 *eax = env->features[FEAT_7_1_EAX];
5441 *ebx = 0;
5442 *ecx = 0;
5443 *edx = 0;
5444 } else {
5445 *eax = 0;
5446 *ebx = 0;
5447 *ecx = 0;
5448 *edx = 0;
5449 }
5450 break;
5451 case 9:
5452 /* Direct Cache Access Information Leaf */
5453 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
5454 *ebx = 0;
5455 *ecx = 0;
5456 *edx = 0;
5457 break;
5458 case 0xA:
5459 /* Architectural Performance Monitoring Leaf */
5460 if (kvm_enabled() && cpu->enable_pmu) {
5461 KVMState *s = cs->kvm_state;
5462
5463 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
5464 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
5465 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
5466 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
5467 } else if (hvf_enabled() && cpu->enable_pmu) {
5468 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
5469 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
5470 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
5471 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
5472 } else {
5473 *eax = 0;
5474 *ebx = 0;
5475 *ecx = 0;
5476 *edx = 0;
5477 }
5478 break;
5479 case 0xB:
5480 /* Extended Topology Enumeration Leaf */
5481 if (!cpu->enable_cpuid_0xb) {
5482 *eax = *ebx = *ecx = *edx = 0;
5483 break;
5484 }
5485
5486 *ecx = count & 0xff;
5487 *edx = cpu->apic_id;
5488
5489 switch (count) {
5490 case 0:
5491 *eax = apicid_core_offset(env->nr_dies,
5492 cs->nr_cores, cs->nr_threads);
5493 *ebx = cs->nr_threads;
5494 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5495 break;
5496 case 1:
5497 *eax = apicid_pkg_offset(env->nr_dies,
5498 cs->nr_cores, cs->nr_threads);
5499 *ebx = cs->nr_cores * cs->nr_threads;
5500 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5501 break;
5502 default:
5503 *eax = 0;
5504 *ebx = 0;
5505 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5506 }
5507
5508 assert(!(*eax & ~0x1f));
5509 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5510 break;
5511 case 0x1F:
5512 /* V2 Extended Topology Enumeration Leaf */
5513 if (env->nr_dies < 2) {
5514 *eax = *ebx = *ecx = *edx = 0;
5515 break;
5516 }
5517
5518 *ecx = count & 0xff;
5519 *edx = cpu->apic_id;
5520 switch (count) {
5521 case 0:
5522 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores,
5523 cs->nr_threads);
5524 *ebx = cs->nr_threads;
5525 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5526 break;
5527 case 1:
5528 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores,
5529 cs->nr_threads);
5530 *ebx = cs->nr_cores * cs->nr_threads;
5531 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5532 break;
5533 case 2:
5534 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores,
5535 cs->nr_threads);
5536 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
5537 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
5538 break;
5539 default:
5540 *eax = 0;
5541 *ebx = 0;
5542 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5543 }
5544 assert(!(*eax & ~0x1f));
5545 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5546 break;
5547 case 0xD: {
5548 /* Processor Extended State */
5549 *eax = 0;
5550 *ebx = 0;
5551 *ecx = 0;
5552 *edx = 0;
5553 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5554 break;
5555 }
5556
5557 if (count == 0) {
5558 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
5559 *eax = env->features[FEAT_XSAVE_COMP_LO];
5560 *edx = env->features[FEAT_XSAVE_COMP_HI];
5561 /*
5562 * The initial value of xcr0 and ebx == 0, On host without kvm
5563 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
5564 * even through guest update xcr0, this will crash some legacy guest
5565 * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
5566 */
5567 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
5568 } else if (count == 1) {
5569 *eax = env->features[FEAT_XSAVE];
5570 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
5571 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
5572 const ExtSaveArea *esa = &x86_ext_save_areas[count];
5573 *eax = esa->size;
5574 *ebx = esa->offset;
5575 }
5576 }
5577 break;
5578 }
5579 case 0x14: {
5580 /* Intel Processor Trace Enumeration */
5581 *eax = 0;
5582 *ebx = 0;
5583 *ecx = 0;
5584 *edx = 0;
5585 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
5586 !kvm_enabled()) {
5587 break;
5588 }
5589
5590 if (count == 0) {
5591 *eax = INTEL_PT_MAX_SUBLEAF;
5592 *ebx = INTEL_PT_MINIMAL_EBX;
5593 *ecx = INTEL_PT_MINIMAL_ECX;
5594 } else if (count == 1) {
5595 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
5596 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
5597 }
5598 break;
5599 }
5600 case 0x40000000:
5601 /*
5602 * CPUID code in kvm_arch_init_vcpu() ignores stuff
5603 * set here, but we restrict to TCG none the less.
5604 */
5605 if (tcg_enabled() && cpu->expose_tcg) {
5606 memcpy(signature, "TCGTCGTCGTCG", 12);
5607 *eax = 0x40000001;
5608 *ebx = signature[0];
5609 *ecx = signature[1];
5610 *edx = signature[2];
5611 } else {
5612 *eax = 0;
5613 *ebx = 0;
5614 *ecx = 0;
5615 *edx = 0;
5616 }
5617 break;
5618 case 0x40000001:
5619 *eax = 0;
5620 *ebx = 0;
5621 *ecx = 0;
5622 *edx = 0;
5623 break;
5624 case 0x80000000:
5625 *eax = env->cpuid_xlevel;
5626 *ebx = env->cpuid_vendor1;
5627 *edx = env->cpuid_vendor2;
5628 *ecx = env->cpuid_vendor3;
5629 break;
5630 case 0x80000001:
5631 *eax = env->cpuid_version;
5632 *ebx = 0;
5633 *ecx = env->features[FEAT_8000_0001_ECX];
5634 *edx = env->features[FEAT_8000_0001_EDX];
5635
5636 /* The Linux kernel checks for the CMPLegacy bit and
5637 * discards multiple thread information if it is set.
5638 * So don't set it here for Intel to make Linux guests happy.
5639 */
5640 if (cs->nr_cores * cs->nr_threads > 1) {
5641 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
5642 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
5643 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
5644 *ecx |= 1 << 1; /* CmpLegacy bit */
5645 }
5646 }
5647 break;
5648 case 0x80000002:
5649 case 0x80000003:
5650 case 0x80000004:
5651 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
5652 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
5653 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
5654 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
5655 break;
5656 case 0x80000005:
5657 /* cache info (L1 cache) */
5658 if (cpu->cache_info_passthrough) {
5659 host_cpuid(index, 0, eax, ebx, ecx, edx);
5660 break;
5661 }
5662 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
5663 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
5664 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
5665 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
5666 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
5667 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
5668 break;
5669 case 0x80000006:
5670 /* cache info (L2 cache) */
5671 if (cpu->cache_info_passthrough) {
5672 host_cpuid(index, 0, eax, ebx, ecx, edx);
5673 break;
5674 }
5675 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
5676 (L2_DTLB_2M_ENTRIES << 16) | \
5677 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
5678 (L2_ITLB_2M_ENTRIES);
5679 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
5680 (L2_DTLB_4K_ENTRIES << 16) | \
5681 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
5682 (L2_ITLB_4K_ENTRIES);
5683 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
5684 cpu->enable_l3_cache ?
5685 env->cache_info_amd.l3_cache : NULL,
5686 ecx, edx);
5687 break;
5688 case 0x80000007:
5689 *eax = 0;
5690 *ebx = 0;
5691 *ecx = 0;
5692 *edx = env->features[FEAT_8000_0007_EDX];
5693 break;
5694 case 0x80000008:
5695 /* virtual & phys address size in low 2 bytes. */
5696 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5697 /* 64 bit processor */
5698 *eax = cpu->phys_bits; /* configurable physical bits */
5699 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
5700 *eax |= 0x00003900; /* 57 bits virtual */
5701 } else {
5702 *eax |= 0x00003000; /* 48 bits virtual */
5703 }
5704 } else {
5705 *eax = cpu->phys_bits;
5706 }
5707 *ebx = env->features[FEAT_8000_0008_EBX];
5708 *ecx = 0;
5709 *edx = 0;
5710 if (cs->nr_cores * cs->nr_threads > 1) {
5711 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
5712 }
5713 break;
5714 case 0x8000000A:
5715 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5716 *eax = 0x00000001; /* SVM Revision */
5717 *ebx = 0x00000010; /* nr of ASIDs */
5718 *ecx = 0;
5719 *edx = env->features[FEAT_SVM]; /* optional features */
5720 } else {
5721 *eax = 0;
5722 *ebx = 0;
5723 *ecx = 0;
5724 *edx = 0;
5725 }
5726 break;
5727 case 0x8000001D:
5728 *eax = 0;
5729 if (cpu->cache_info_passthrough) {
5730 host_cpuid(index, count, eax, ebx, ecx, edx);
5731 break;
5732 }
5733 switch (count) {
5734 case 0: /* L1 dcache info */
5735 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
5736 eax, ebx, ecx, edx);
5737 break;
5738 case 1: /* L1 icache info */
5739 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
5740 eax, ebx, ecx, edx);
5741 break;
5742 case 2: /* L2 cache info */
5743 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
5744 eax, ebx, ecx, edx);
5745 break;
5746 case 3: /* L3 cache info */
5747 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
5748 eax, ebx, ecx, edx);
5749 break;
5750 default: /* end of info */
5751 *eax = *ebx = *ecx = *edx = 0;
5752 break;
5753 }
5754 break;
5755 case 0x8000001E:
5756 assert(cpu->core_id <= 255);
5757 encode_topo_cpuid8000001e(cs, cpu,
5758 eax, ebx, ecx, edx);
5759 break;
5760 case 0xC0000000:
5761 *eax = env->cpuid_xlevel2;
5762 *ebx = 0;
5763 *ecx = 0;
5764 *edx = 0;
5765 break;
5766 case 0xC0000001:
5767 /* Support for VIA CPU's CPUID instruction */
5768 *eax = env->cpuid_version;
5769 *ebx = 0;
5770 *ecx = 0;
5771 *edx = env->features[FEAT_C000_0001_EDX];
5772 break;
5773 case 0xC0000002:
5774 case 0xC0000003:
5775 case 0xC0000004:
5776 /* Reserved for the future, and now filled with zero */
5777 *eax = 0;
5778 *ebx = 0;
5779 *ecx = 0;
5780 *edx = 0;
5781 break;
5782 case 0x8000001F:
5783 *eax = sev_enabled() ? 0x2 : 0;
5784 *ebx = sev_get_cbit_position();
5785 *ebx |= sev_get_reduced_phys_bits() << 6;
5786 *ecx = 0;
5787 *edx = 0;
5788 break;
5789 default:
5790 /* reserved values: zero */
5791 *eax = 0;
5792 *ebx = 0;
5793 *ecx = 0;
5794 *edx = 0;
5795 break;
5796 }
5797 }
5798
5799 /* CPUClass::reset() */
5800 static void x86_cpu_reset(CPUState *s)
5801 {
5802 X86CPU *cpu = X86_CPU(s);
5803 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
5804 CPUX86State *env = &cpu->env;
5805 target_ulong cr4;
5806 uint64_t xcr0;
5807 int i;
5808
5809 xcc->parent_reset(s);
5810
5811 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
5812
5813 env->old_exception = -1;
5814
5815 /* init to reset state */
5816
5817 env->hflags2 |= HF2_GIF_MASK;
5818
5819 cpu_x86_update_cr0(env, 0x60000010);
5820 env->a20_mask = ~0x0;
5821 env->smbase = 0x30000;
5822 env->msr_smi_count = 0;
5823
5824 env->idt.limit = 0xffff;
5825 env->gdt.limit = 0xffff;
5826 env->ldt.limit = 0xffff;
5827 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
5828 env->tr.limit = 0xffff;
5829 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
5830
5831 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
5832 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
5833 DESC_R_MASK | DESC_A_MASK);
5834 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
5835 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5836 DESC_A_MASK);
5837 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
5838 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5839 DESC_A_MASK);
5840 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
5841 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5842 DESC_A_MASK);
5843 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
5844 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5845 DESC_A_MASK);
5846 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
5847 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5848 DESC_A_MASK);
5849
5850 env->eip = 0xfff0;
5851 env->regs[R_EDX] = env->cpuid_version;
5852
5853 env->eflags = 0x2;
5854
5855 /* FPU init */
5856 for (i = 0; i < 8; i++) {
5857 env->fptags[i] = 1;
5858 }
5859 cpu_set_fpuc(env, 0x37f);
5860
5861 env->mxcsr = 0x1f80;
5862 /* All units are in INIT state. */
5863 env->xstate_bv = 0;
5864
5865 env->pat = 0x0007040600070406ULL;
5866 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
5867 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
5868 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
5869 }
5870
5871 memset(env->dr, 0, sizeof(env->dr));
5872 env->dr[6] = DR6_FIXED_1;
5873 env->dr[7] = DR7_FIXED_1;
5874 cpu_breakpoint_remove_all(s, BP_CPU);
5875 cpu_watchpoint_remove_all(s, BP_CPU);
5876
5877 cr4 = 0;
5878 xcr0 = XSTATE_FP_MASK;
5879
5880 #ifdef CONFIG_USER_ONLY
5881 /* Enable all the features for user-mode. */
5882 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
5883 xcr0 |= XSTATE_SSE_MASK;
5884 }
5885 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5886 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5887 if (env->features[esa->feature] & esa->bits) {
5888 xcr0 |= 1ull << i;
5889 }
5890 }
5891
5892 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
5893 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
5894 }
5895 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
5896 cr4 |= CR4_FSGSBASE_MASK;
5897 }
5898 #endif
5899
5900 env->xcr0 = xcr0;
5901 cpu_x86_update_cr4(env, cr4);
5902
5903 /*
5904 * SDM 11.11.5 requires:
5905 * - IA32_MTRR_DEF_TYPE MSR.E = 0
5906 * - IA32_MTRR_PHYSMASKn.V = 0
5907 * All other bits are undefined. For simplification, zero it all.
5908 */
5909 env->mtrr_deftype = 0;
5910 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
5911 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
5912
5913 env->interrupt_injected = -1;
5914 env->exception_nr = -1;
5915 env->exception_pending = 0;
5916 env->exception_injected = 0;
5917 env->exception_has_payload = false;
5918 env->exception_payload = 0;
5919 env->nmi_injected = false;
5920 #if !defined(CONFIG_USER_ONLY)
5921 /* We hard-wire the BSP to the first CPU. */
5922 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
5923
5924 s->halted = !cpu_is_bsp(cpu);
5925
5926 if (kvm_enabled()) {
5927 kvm_arch_reset_vcpu(cpu);
5928 }
5929 else if (hvf_enabled()) {
5930 hvf_reset_vcpu(s);
5931 }
5932 #endif
5933 }
5934
5935 #ifndef CONFIG_USER_ONLY
5936 bool cpu_is_bsp(X86CPU *cpu)
5937 {
5938 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
5939 }
5940
5941 /* TODO: remove me, when reset over QOM tree is implemented */
5942 static void x86_cpu_machine_reset_cb(void *opaque)
5943 {
5944 X86CPU *cpu = opaque;
5945 cpu_reset(CPU(cpu));
5946 }
5947 #endif
5948
5949 static void mce_init(X86CPU *cpu)
5950 {
5951 CPUX86State *cenv = &cpu->env;
5952 unsigned int bank;
5953
5954 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
5955 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
5956 (CPUID_MCE | CPUID_MCA)) {
5957 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
5958 (cpu->enable_lmce ? MCG_LMCE_P : 0);
5959 cenv->mcg_ctl = ~(uint64_t)0;
5960 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
5961 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
5962 }
5963 }
5964 }
5965
5966 #ifndef CONFIG_USER_ONLY
5967 APICCommonClass *apic_get_class(void)
5968 {
5969 const char *apic_type = "apic";
5970
5971 /* TODO: in-kernel irqchip for hvf */
5972 if (kvm_apic_in_kernel()) {
5973 apic_type = "kvm-apic";
5974 } else if (xen_enabled()) {
5975 apic_type = "xen-apic";
5976 }
5977
5978 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
5979 }
5980
5981 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
5982 {
5983 APICCommonState *apic;
5984 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
5985
5986 cpu->apic_state = DEVICE(object_new_with_class(apic_class));
5987
5988 object_property_add_child(OBJECT(cpu), "lapic",
5989 OBJECT(cpu->apic_state), &error_abort);
5990 object_unref(OBJECT(cpu->apic_state));
5991
5992 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
5993 /* TODO: convert to link<> */
5994 apic = APIC_COMMON(cpu->apic_state);
5995 apic->cpu = cpu;
5996 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
5997 }
5998
5999 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6000 {
6001 APICCommonState *apic;
6002 static bool apic_mmio_map_once;
6003
6004 if (cpu->apic_state == NULL) {
6005 return;
6006 }
6007 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
6008 errp);
6009
6010 /* Map APIC MMIO area */
6011 apic = APIC_COMMON(cpu->apic_state);
6012 if (!apic_mmio_map_once) {
6013 memory_region_add_subregion_overlap(get_system_memory(),
6014 apic->apicbase &
6015 MSR_IA32_APICBASE_BASE,
6016 &apic->io_memory,
6017 0x1000);
6018 apic_mmio_map_once = true;
6019 }
6020 }
6021
6022 static void x86_cpu_machine_done(Notifier *n, void *unused)
6023 {
6024 X86CPU *cpu = container_of(n, X86CPU, machine_done);
6025 MemoryRegion *smram =
6026 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
6027
6028 if (smram) {
6029 cpu->smram = g_new(MemoryRegion, 1);
6030 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
6031 smram, 0, 1ull << 32);
6032 memory_region_set_enabled(cpu->smram, true);
6033 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
6034 }
6035 }
6036 #else
6037 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6038 {
6039 }
6040 #endif
6041
6042 /* Note: Only safe for use on x86(-64) hosts */
6043 static uint32_t x86_host_phys_bits(void)
6044 {
6045 uint32_t eax;
6046 uint32_t host_phys_bits;
6047
6048 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
6049 if (eax >= 0x80000008) {
6050 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
6051 /* Note: According to AMD doc 25481 rev 2.34 they have a field
6052 * at 23:16 that can specify a maximum physical address bits for
6053 * the guest that can override this value; but I've not seen
6054 * anything with that set.
6055 */
6056 host_phys_bits = eax & 0xff;
6057 } else {
6058 /* It's an odd 64 bit machine that doesn't have the leaf for
6059 * physical address bits; fall back to 36 that's most older
6060 * Intel.
6061 */
6062 host_phys_bits = 36;
6063 }
6064
6065 return host_phys_bits;
6066 }
6067
6068 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
6069 {
6070 if (*min < value) {
6071 *min = value;
6072 }
6073 }
6074
6075 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
6076 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
6077 {
6078 CPUX86State *env = &cpu->env;
6079 FeatureWordInfo *fi = &feature_word_info[w];
6080 uint32_t eax = fi->cpuid.eax;
6081 uint32_t region = eax & 0xF0000000;
6082
6083 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
6084 if (!env->features[w]) {
6085 return;
6086 }
6087
6088 switch (region) {
6089 case 0x00000000:
6090 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
6091 break;
6092 case 0x80000000:
6093 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
6094 break;
6095 case 0xC0000000:
6096 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
6097 break;
6098 }
6099
6100 if (eax == 7) {
6101 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
6102 fi->cpuid.ecx);
6103 }
6104 }
6105
6106 /* Calculate XSAVE components based on the configured CPU feature flags */
6107 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
6108 {
6109 CPUX86State *env = &cpu->env;
6110 int i;
6111 uint64_t mask;
6112
6113 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
6114 return;
6115 }
6116
6117 mask = 0;
6118 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
6119 const ExtSaveArea *esa = &x86_ext_save_areas[i];
6120 if (env->features[esa->feature] & esa->bits) {
6121 mask |= (1ULL << i);
6122 }
6123 }
6124
6125 env->features[FEAT_XSAVE_COMP_LO] = mask;
6126 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
6127 }
6128
6129 /***** Steps involved on loading and filtering CPUID data
6130 *
6131 * When initializing and realizing a CPU object, the steps
6132 * involved in setting up CPUID data are:
6133 *
6134 * 1) Loading CPU model definition (X86CPUDefinition). This is
6135 * implemented by x86_cpu_load_model() and should be completely
6136 * transparent, as it is done automatically by instance_init.
6137 * No code should need to look at X86CPUDefinition structs
6138 * outside instance_init.
6139 *
6140 * 2) CPU expansion. This is done by realize before CPUID
6141 * filtering, and will make sure host/accelerator data is
6142 * loaded for CPU models that depend on host capabilities
6143 * (e.g. "host"). Done by x86_cpu_expand_features().
6144 *
6145 * 3) CPUID filtering. This initializes extra data related to
6146 * CPUID, and checks if the host supports all capabilities
6147 * required by the CPU. Runnability of a CPU model is
6148 * determined at this step. Done by x86_cpu_filter_features().
6149 *
6150 * Some operations don't require all steps to be performed.
6151 * More precisely:
6152 *
6153 * - CPU instance creation (instance_init) will run only CPU
6154 * model loading. CPU expansion can't run at instance_init-time
6155 * because host/accelerator data may be not available yet.
6156 * - CPU realization will perform both CPU model expansion and CPUID
6157 * filtering, and return an error in case one of them fails.
6158 * - query-cpu-definitions needs to run all 3 steps. It needs
6159 * to run CPUID filtering, as the 'unavailable-features'
6160 * field is set based on the filtering results.
6161 * - The query-cpu-model-expansion QMP command only needs to run
6162 * CPU model loading and CPU expansion. It should not filter
6163 * any CPUID data based on host capabilities.
6164 */
6165
6166 /* Expand CPU configuration data, based on configured features
6167 * and host/accelerator capabilities when appropriate.
6168 */
6169 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
6170 {
6171 CPUX86State *env = &cpu->env;
6172 FeatureWord w;
6173 int i;
6174 GList *l;
6175 Error *local_err = NULL;
6176
6177 for (l = plus_features; l; l = l->next) {
6178 const char *prop = l->data;
6179 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
6180 if (local_err) {
6181 goto out;
6182 }
6183 }
6184
6185 for (l = minus_features; l; l = l->next) {
6186 const char *prop = l->data;
6187 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
6188 if (local_err) {
6189 goto out;
6190 }
6191 }
6192
6193 /*TODO: Now cpu->max_features doesn't overwrite features
6194 * set using QOM properties, and we can convert
6195 * plus_features & minus_features to global properties
6196 * inside x86_cpu_parse_featurestr() too.
6197 */
6198 if (cpu->max_features) {
6199 for (w = 0; w < FEATURE_WORDS; w++) {
6200 /* Override only features that weren't set explicitly
6201 * by the user.
6202 */
6203 env->features[w] |=
6204 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
6205 ~env->user_features[w] & \
6206 ~feature_word_info[w].no_autoenable_flags;
6207 }
6208 }
6209
6210 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
6211 FeatureDep *d = &feature_dependencies[i];
6212 if (!(env->features[d->from.index] & d->from.mask)) {
6213 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
6214
6215 /* Not an error unless the dependent feature was added explicitly. */
6216 mark_unavailable_features(cpu, d->to.index,
6217 unavailable_features & env->user_features[d->to.index],
6218 "This feature depends on other features that were not requested");
6219
6220 env->user_features[d->to.index] |= unavailable_features;
6221 env->features[d->to.index] &= ~unavailable_features;
6222 }
6223 }
6224
6225 if (!kvm_enabled() || !cpu->expose_kvm) {
6226 env->features[FEAT_KVM] = 0;
6227 }
6228
6229 x86_cpu_enable_xsave_components(cpu);
6230
6231 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
6232 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
6233 if (cpu->full_cpuid_auto_level) {
6234 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
6235 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
6236 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
6237 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
6238 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
6239 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
6240 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
6241 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
6242 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
6243 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
6244 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
6245 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
6246
6247 /* Intel Processor Trace requires CPUID[0x14] */
6248 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6249 kvm_enabled() && cpu->intel_pt_auto_level) {
6250 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
6251 }
6252
6253 /* CPU topology with multi-dies support requires CPUID[0x1F] */
6254 if (env->nr_dies > 1) {
6255 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
6256 }
6257
6258 /* SVM requires CPUID[0x8000000A] */
6259 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
6260 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
6261 }
6262
6263 /* SEV requires CPUID[0x8000001F] */
6264 if (sev_enabled()) {
6265 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
6266 }
6267 }
6268
6269 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
6270 if (env->cpuid_level_func7 == UINT32_MAX) {
6271 env->cpuid_level_func7 = env->cpuid_min_level_func7;
6272 }
6273 if (env->cpuid_level == UINT32_MAX) {
6274 env->cpuid_level = env->cpuid_min_level;
6275 }
6276 if (env->cpuid_xlevel == UINT32_MAX) {
6277 env->cpuid_xlevel = env->cpuid_min_xlevel;
6278 }
6279 if (env->cpuid_xlevel2 == UINT32_MAX) {
6280 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
6281 }
6282
6283 out:
6284 if (local_err != NULL) {
6285 error_propagate(errp, local_err);
6286 }
6287 }
6288
6289 /*
6290 * Finishes initialization of CPUID data, filters CPU feature
6291 * words based on host availability of each feature.
6292 *
6293 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
6294 */
6295 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
6296 {
6297 CPUX86State *env = &cpu->env;
6298 FeatureWord w;
6299 const char *prefix = NULL;
6300
6301 if (verbose) {
6302 prefix = accel_uses_host_cpuid()
6303 ? "host doesn't support requested feature"
6304 : "TCG doesn't support requested feature";
6305 }
6306
6307 for (w = 0; w < FEATURE_WORDS; w++) {
6308 uint64_t host_feat =
6309 x86_cpu_get_supported_feature_word(w, false);
6310 uint64_t requested_features = env->features[w];
6311 uint64_t unavailable_features = requested_features & ~host_feat;
6312 mark_unavailable_features(cpu, w, unavailable_features, prefix);
6313 }
6314
6315 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6316 kvm_enabled()) {
6317 KVMState *s = CPU(cpu)->kvm_state;
6318 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
6319 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
6320 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
6321 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
6322 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
6323
6324 if (!eax_0 ||
6325 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
6326 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
6327 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
6328 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
6329 INTEL_PT_ADDR_RANGES_NUM) ||
6330 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
6331 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
6332 (ecx_0 & INTEL_PT_IP_LIP)) {
6333 /*
6334 * Processor Trace capabilities aren't configurable, so if the
6335 * host can't emulate the capabilities we report on
6336 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
6337 */
6338 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
6339 }
6340 }
6341 }
6342
6343 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
6344 {
6345 CPUState *cs = CPU(dev);
6346 X86CPU *cpu = X86_CPU(dev);
6347 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6348 CPUX86State *env = &cpu->env;
6349 Error *local_err = NULL;
6350 static bool ht_warned;
6351
6352 if (xcc->host_cpuid_required) {
6353 if (!accel_uses_host_cpuid()) {
6354 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6355 error_setg(&local_err, "CPU model '%s' requires KVM", name);
6356 goto out;
6357 }
6358
6359 if (enable_cpu_pm) {
6360 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
6361 &cpu->mwait.ecx, &cpu->mwait.edx);
6362 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
6363 }
6364 }
6365
6366 /* mwait extended info: needed for Core compatibility */
6367 /* We always wake on interrupt even if host does not have the capability */
6368 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
6369
6370 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
6371 error_setg(errp, "apic-id property was not initialized properly");
6372 return;
6373 }
6374
6375 x86_cpu_expand_features(cpu, &local_err);
6376 if (local_err) {
6377 goto out;
6378 }
6379
6380 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
6381
6382 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
6383 error_setg(&local_err,
6384 accel_uses_host_cpuid() ?
6385 "Host doesn't support requested features" :
6386 "TCG doesn't support requested features");
6387 goto out;
6388 }
6389
6390 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
6391 * CPUID[1].EDX.
6392 */
6393 if (IS_AMD_CPU(env)) {
6394 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
6395 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
6396 & CPUID_EXT2_AMD_ALIASES);
6397 }
6398
6399 /* For 64bit systems think about the number of physical bits to present.
6400 * ideally this should be the same as the host; anything other than matching
6401 * the host can cause incorrect guest behaviour.
6402 * QEMU used to pick the magic value of 40 bits that corresponds to
6403 * consumer AMD devices but nothing else.
6404 */
6405 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
6406 if (accel_uses_host_cpuid()) {
6407 uint32_t host_phys_bits = x86_host_phys_bits();
6408 static bool warned;
6409
6410 /* Print a warning if the user set it to a value that's not the
6411 * host value.
6412 */
6413 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
6414 !warned) {
6415 warn_report("Host physical bits (%u)"
6416 " does not match phys-bits property (%u)",
6417 host_phys_bits, cpu->phys_bits);
6418 warned = true;
6419 }
6420
6421 if (cpu->host_phys_bits) {
6422 /* The user asked for us to use the host physical bits */
6423 cpu->phys_bits = host_phys_bits;
6424 if (cpu->host_phys_bits_limit &&
6425 cpu->phys_bits > cpu->host_phys_bits_limit) {
6426 cpu->phys_bits = cpu->host_phys_bits_limit;
6427 }
6428 }
6429
6430 if (cpu->phys_bits &&
6431 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
6432 cpu->phys_bits < 32)) {
6433 error_setg(errp, "phys-bits should be between 32 and %u "
6434 " (but is %u)",
6435 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
6436 return;
6437 }
6438 } else {
6439 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
6440 error_setg(errp, "TCG only supports phys-bits=%u",
6441 TCG_PHYS_ADDR_BITS);
6442 return;
6443 }
6444 }
6445 /* 0 means it was not explicitly set by the user (or by machine
6446 * compat_props or by the host code above). In this case, the default
6447 * is the value used by TCG (40).
6448 */
6449 if (cpu->phys_bits == 0) {
6450 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
6451 }
6452 } else {
6453 /* For 32 bit systems don't use the user set value, but keep
6454 * phys_bits consistent with what we tell the guest.
6455 */
6456 if (cpu->phys_bits != 0) {
6457 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
6458 return;
6459 }
6460
6461 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
6462 cpu->phys_bits = 36;
6463 } else {
6464 cpu->phys_bits = 32;
6465 }
6466 }
6467
6468 /* Cache information initialization */
6469 if (!cpu->legacy_cache) {
6470 if (!xcc->model || !xcc->model->cpudef->cache_info) {
6471 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6472 error_setg(errp,
6473 "CPU model '%s' doesn't support legacy-cache=off", name);
6474 return;
6475 }
6476 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
6477 *xcc->model->cpudef->cache_info;
6478 } else {
6479 /* Build legacy cache information */
6480 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
6481 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
6482 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
6483 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
6484
6485 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
6486 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
6487 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
6488 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
6489
6490 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
6491 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
6492 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
6493 env->cache_info_amd.l3_cache = &legacy_l3_cache;
6494 }
6495
6496
6497 cpu_exec_realizefn(cs, &local_err);
6498 if (local_err != NULL) {
6499 error_propagate(errp, local_err);
6500 return;
6501 }
6502
6503 #ifndef CONFIG_USER_ONLY
6504 MachineState *ms = MACHINE(qdev_get_machine());
6505 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
6506
6507 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
6508 x86_cpu_apic_create(cpu, &local_err);
6509 if (local_err != NULL) {
6510 goto out;
6511 }
6512 }
6513 #endif
6514
6515 mce_init(cpu);
6516
6517 #ifndef CONFIG_USER_ONLY
6518 if (tcg_enabled()) {
6519 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
6520 cpu->cpu_as_root = g_new(MemoryRegion, 1);
6521
6522 /* Outer container... */
6523 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
6524 memory_region_set_enabled(cpu->cpu_as_root, true);
6525
6526 /* ... with two regions inside: normal system memory with low
6527 * priority, and...
6528 */
6529 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
6530 get_system_memory(), 0, ~0ull);
6531 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
6532 memory_region_set_enabled(cpu->cpu_as_mem, true);
6533
6534 cs->num_ases = 2;
6535 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
6536 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
6537
6538 /* ... SMRAM with higher priority, linked from /machine/smram. */
6539 cpu->machine_done.notify = x86_cpu_machine_done;
6540 qemu_add_machine_init_done_notifier(&cpu->machine_done);
6541 }
6542 #endif
6543
6544 qemu_init_vcpu(cs);
6545
6546 /*
6547 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
6548 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
6549 * based on inputs (sockets,cores,threads), it is still better to give
6550 * users a warning.
6551 *
6552 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
6553 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
6554 */
6555 if (IS_AMD_CPU(env) &&
6556 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
6557 cs->nr_threads > 1 && !ht_warned) {
6558 warn_report("This family of AMD CPU doesn't support "
6559 "hyperthreading(%d)",
6560 cs->nr_threads);
6561 error_printf("Please configure -smp options properly"
6562 " or try enabling topoext feature.\n");
6563 ht_warned = true;
6564 }
6565
6566 x86_cpu_apic_realize(cpu, &local_err);
6567 if (local_err != NULL) {
6568 goto out;
6569 }
6570 cpu_reset(cs);
6571
6572 xcc->parent_realize(dev, &local_err);
6573
6574 out:
6575 if (local_err != NULL) {
6576 error_propagate(errp, local_err);
6577 return;
6578 }
6579 }
6580
6581 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
6582 {
6583 X86CPU *cpu = X86_CPU(dev);
6584 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6585 Error *local_err = NULL;
6586
6587 #ifndef CONFIG_USER_ONLY
6588 cpu_remove_sync(CPU(dev));
6589 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
6590 #endif
6591
6592 if (cpu->apic_state) {
6593 object_unparent(OBJECT(cpu->apic_state));
6594 cpu->apic_state = NULL;
6595 }
6596
6597 xcc->parent_unrealize(dev, &local_err);
6598 if (local_err != NULL) {
6599 error_propagate(errp, local_err);
6600 return;
6601 }
6602 }
6603
6604 typedef struct BitProperty {
6605 FeatureWord w;
6606 uint64_t mask;
6607 } BitProperty;
6608
6609 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
6610 void *opaque, Error **errp)
6611 {
6612 X86CPU *cpu = X86_CPU(obj);
6613 BitProperty *fp = opaque;
6614 uint64_t f = cpu->env.features[fp->w];
6615 bool value = (f & fp->mask) == fp->mask;
6616 visit_type_bool(v, name, &value, errp);
6617 }
6618
6619 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
6620 void *opaque, Error **errp)
6621 {
6622 DeviceState *dev = DEVICE(obj);
6623 X86CPU *cpu = X86_CPU(obj);
6624 BitProperty *fp = opaque;
6625 Error *local_err = NULL;
6626 bool value;
6627
6628 if (dev->realized) {
6629 qdev_prop_set_after_realize(dev, name, errp);
6630 return;
6631 }
6632
6633 visit_type_bool(v, name, &value, &local_err);
6634 if (local_err) {
6635 error_propagate(errp, local_err);
6636 return;
6637 }
6638
6639 if (value) {
6640 cpu->env.features[fp->w] |= fp->mask;
6641 } else {
6642 cpu->env.features[fp->w] &= ~fp->mask;
6643 }
6644 cpu->env.user_features[fp->w] |= fp->mask;
6645 }
6646
6647 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
6648 void *opaque)
6649 {
6650 BitProperty *prop = opaque;
6651 g_free(prop);
6652 }
6653
6654 /* Register a boolean property to get/set a single bit in a uint32_t field.
6655 *
6656 * The same property name can be registered multiple times to make it affect
6657 * multiple bits in the same FeatureWord. In that case, the getter will return
6658 * true only if all bits are set.
6659 */
6660 static void x86_cpu_register_bit_prop(X86CPU *cpu,
6661 const char *prop_name,
6662 FeatureWord w,
6663 int bitnr)
6664 {
6665 BitProperty *fp;
6666 ObjectProperty *op;
6667 uint64_t mask = (1ULL << bitnr);
6668
6669 op = object_property_find(OBJECT(cpu), prop_name, NULL);
6670 if (op) {
6671 fp = op->opaque;
6672 assert(fp->w == w);
6673 fp->mask |= mask;
6674 } else {
6675 fp = g_new0(BitProperty, 1);
6676 fp->w = w;
6677 fp->mask = mask;
6678 object_property_add(OBJECT(cpu), prop_name, "bool",
6679 x86_cpu_get_bit_prop,
6680 x86_cpu_set_bit_prop,
6681 x86_cpu_release_bit_prop, fp, &error_abort);
6682 }
6683 }
6684
6685 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
6686 FeatureWord w,
6687 int bitnr)
6688 {
6689 FeatureWordInfo *fi = &feature_word_info[w];
6690 const char *name = fi->feat_names[bitnr];
6691
6692 if (!name) {
6693 return;
6694 }
6695
6696 /* Property names should use "-" instead of "_".
6697 * Old names containing underscores are registered as aliases
6698 * using object_property_add_alias()
6699 */
6700 assert(!strchr(name, '_'));
6701 /* aliases don't use "|" delimiters anymore, they are registered
6702 * manually using object_property_add_alias() */
6703 assert(!strchr(name, '|'));
6704 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
6705 }
6706
6707 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
6708 {
6709 X86CPU *cpu = X86_CPU(cs);
6710 CPUX86State *env = &cpu->env;
6711 GuestPanicInformation *panic_info = NULL;
6712
6713 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
6714 panic_info = g_malloc0(sizeof(GuestPanicInformation));
6715
6716 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
6717
6718 assert(HV_CRASH_PARAMS >= 5);
6719 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
6720 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
6721 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
6722 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
6723 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
6724 }
6725
6726 return panic_info;
6727 }
6728 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
6729 const char *name, void *opaque,
6730 Error **errp)
6731 {
6732 CPUState *cs = CPU(obj);
6733 GuestPanicInformation *panic_info;
6734
6735 if (!cs->crash_occurred) {
6736 error_setg(errp, "No crash occured");
6737 return;
6738 }
6739
6740 panic_info = x86_cpu_get_crash_info(cs);
6741 if (panic_info == NULL) {
6742 error_setg(errp, "No crash information");
6743 return;
6744 }
6745
6746 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
6747 errp);
6748 qapi_free_GuestPanicInformation(panic_info);
6749 }
6750
6751 static void x86_cpu_initfn(Object *obj)
6752 {
6753 X86CPU *cpu = X86_CPU(obj);
6754 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
6755 CPUX86State *env = &cpu->env;
6756 FeatureWord w;
6757
6758 env->nr_dies = 1;
6759 cpu_set_cpustate_pointers(cpu);
6760
6761 object_property_add(obj, "family", "int",
6762 x86_cpuid_version_get_family,
6763 x86_cpuid_version_set_family, NULL, NULL, NULL);
6764 object_property_add(obj, "model", "int",
6765 x86_cpuid_version_get_model,
6766 x86_cpuid_version_set_model, NULL, NULL, NULL);
6767 object_property_add(obj, "stepping", "int",
6768 x86_cpuid_version_get_stepping,
6769 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
6770 object_property_add_str(obj, "vendor",
6771 x86_cpuid_get_vendor,
6772 x86_cpuid_set_vendor, NULL);
6773 object_property_add_str(obj, "model-id",
6774 x86_cpuid_get_model_id,
6775 x86_cpuid_set_model_id, NULL);
6776 object_property_add(obj, "tsc-frequency", "int",
6777 x86_cpuid_get_tsc_freq,
6778 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
6779 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
6780 x86_cpu_get_feature_words,
6781 NULL, NULL, (void *)env->features, NULL);
6782 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
6783 x86_cpu_get_feature_words,
6784 NULL, NULL, (void *)cpu->filtered_features, NULL);
6785 /*
6786 * The "unavailable-features" property has the same semantics as
6787 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
6788 * QMP command: they list the features that would have prevented the
6789 * CPU from running if the "enforce" flag was set.
6790 */
6791 object_property_add(obj, "unavailable-features", "strList",
6792 x86_cpu_get_unavailable_features,
6793 NULL, NULL, NULL, &error_abort);
6794
6795 object_property_add(obj, "crash-information", "GuestPanicInformation",
6796 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
6797
6798 for (w = 0; w < FEATURE_WORDS; w++) {
6799 int bitnr;
6800
6801 for (bitnr = 0; bitnr < 64; bitnr++) {
6802 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
6803 }
6804 }
6805
6806 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
6807 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
6808 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
6809 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
6810 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
6811 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
6812 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
6813
6814 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
6815 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
6816 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
6817 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
6818 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
6819 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
6820 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
6821 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
6822 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
6823 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
6824 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
6825 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
6826 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
6827 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
6828 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control",
6829 &error_abort);
6830 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
6831 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
6832 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
6833 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
6834 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
6835 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
6836 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
6837
6838 if (xcc->model) {
6839 x86_cpu_load_model(cpu, xcc->model, &error_abort);
6840 }
6841 }
6842
6843 static int64_t x86_cpu_get_arch_id(CPUState *cs)
6844 {
6845 X86CPU *cpu = X86_CPU(cs);
6846
6847 return cpu->apic_id;
6848 }
6849
6850 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
6851 {
6852 X86CPU *cpu = X86_CPU(cs);
6853
6854 return cpu->env.cr[0] & CR0_PG_MASK;
6855 }
6856
6857 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
6858 {
6859 X86CPU *cpu = X86_CPU(cs);
6860
6861 cpu->env.eip = value;
6862 }
6863
6864 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
6865 {
6866 X86CPU *cpu = X86_CPU(cs);
6867
6868 cpu->env.eip = tb->pc - tb->cs_base;
6869 }
6870
6871 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
6872 {
6873 X86CPU *cpu = X86_CPU(cs);
6874 CPUX86State *env = &cpu->env;
6875
6876 #if !defined(CONFIG_USER_ONLY)
6877 if (interrupt_request & CPU_INTERRUPT_POLL) {
6878 return CPU_INTERRUPT_POLL;
6879 }
6880 #endif
6881 if (interrupt_request & CPU_INTERRUPT_SIPI) {
6882 return CPU_INTERRUPT_SIPI;
6883 }
6884
6885 if (env->hflags2 & HF2_GIF_MASK) {
6886 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
6887 !(env->hflags & HF_SMM_MASK)) {
6888 return CPU_INTERRUPT_SMI;
6889 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
6890 !(env->hflags2 & HF2_NMI_MASK)) {
6891 return CPU_INTERRUPT_NMI;
6892 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
6893 return CPU_INTERRUPT_MCE;
6894 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
6895 (((env->hflags2 & HF2_VINTR_MASK) &&
6896 (env->hflags2 & HF2_HIF_MASK)) ||
6897 (!(env->hflags2 & HF2_VINTR_MASK) &&
6898 (env->eflags & IF_MASK &&
6899 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
6900 return CPU_INTERRUPT_HARD;
6901 #if !defined(CONFIG_USER_ONLY)
6902 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
6903 (env->eflags & IF_MASK) &&
6904 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
6905 return CPU_INTERRUPT_VIRQ;
6906 #endif
6907 }
6908 }
6909
6910 return 0;
6911 }
6912
6913 static bool x86_cpu_has_work(CPUState *cs)
6914 {
6915 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
6916 }
6917
6918 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
6919 {
6920 X86CPU *cpu = X86_CPU(cs);
6921 CPUX86State *env = &cpu->env;
6922
6923 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
6924 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
6925 : bfd_mach_i386_i8086);
6926 info->print_insn = print_insn_i386;
6927
6928 info->cap_arch = CS_ARCH_X86;
6929 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
6930 : env->hflags & HF_CS32_MASK ? CS_MODE_32
6931 : CS_MODE_16);
6932 info->cap_insn_unit = 1;
6933 info->cap_insn_split = 8;
6934 }
6935
6936 void x86_update_hflags(CPUX86State *env)
6937 {
6938 uint32_t hflags;
6939 #define HFLAG_COPY_MASK \
6940 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
6941 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
6942 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
6943 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
6944
6945 hflags = env->hflags & HFLAG_COPY_MASK;
6946 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
6947 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
6948 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
6949 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
6950 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
6951
6952 if (env->cr[4] & CR4_OSFXSR_MASK) {
6953 hflags |= HF_OSFXSR_MASK;
6954 }
6955
6956 if (env->efer & MSR_EFER_LMA) {
6957 hflags |= HF_LMA_MASK;
6958 }
6959
6960 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
6961 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
6962 } else {
6963 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
6964 (DESC_B_SHIFT - HF_CS32_SHIFT);
6965 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
6966 (DESC_B_SHIFT - HF_SS32_SHIFT);
6967 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
6968 !(hflags & HF_CS32_MASK)) {
6969 hflags |= HF_ADDSEG_MASK;
6970 } else {
6971 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
6972 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
6973 }
6974 }
6975 env->hflags = hflags;
6976 }
6977
6978 static Property x86_cpu_properties[] = {
6979 #ifdef CONFIG_USER_ONLY
6980 /* apic_id = 0 by default for *-user, see commit 9886e834 */
6981 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
6982 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
6983 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
6984 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
6985 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
6986 #else
6987 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
6988 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
6989 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
6990 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
6991 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
6992 #endif
6993 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
6994 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
6995
6996 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
6997 HYPERV_SPINLOCK_NEVER_RETRY),
6998 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
6999 HYPERV_FEAT_RELAXED, 0),
7000 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
7001 HYPERV_FEAT_VAPIC, 0),
7002 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
7003 HYPERV_FEAT_TIME, 0),
7004 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
7005 HYPERV_FEAT_CRASH, 0),
7006 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
7007 HYPERV_FEAT_RESET, 0),
7008 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
7009 HYPERV_FEAT_VPINDEX, 0),
7010 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
7011 HYPERV_FEAT_RUNTIME, 0),
7012 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
7013 HYPERV_FEAT_SYNIC, 0),
7014 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
7015 HYPERV_FEAT_STIMER, 0),
7016 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
7017 HYPERV_FEAT_FREQUENCIES, 0),
7018 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
7019 HYPERV_FEAT_REENLIGHTENMENT, 0),
7020 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
7021 HYPERV_FEAT_TLBFLUSH, 0),
7022 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
7023 HYPERV_FEAT_EVMCS, 0),
7024 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
7025 HYPERV_FEAT_IPI, 0),
7026 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
7027 HYPERV_FEAT_STIMER_DIRECT, 0),
7028 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
7029 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
7030 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
7031
7032 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
7033 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
7034 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
7035 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
7036 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
7037 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
7038 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
7039 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
7040 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
7041 UINT32_MAX),
7042 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
7043 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
7044 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
7045 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
7046 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
7047 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
7048 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
7049 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
7050 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
7051 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
7052 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
7053 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
7054 false),
7055 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
7056 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
7057 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
7058 true),
7059 /*
7060 * lecacy_cache defaults to true unless the CPU model provides its
7061 * own cache information (see x86_cpu_load_def()).
7062 */
7063 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
7064
7065 /*
7066 * From "Requirements for Implementing the Microsoft
7067 * Hypervisor Interface":
7068 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
7069 *
7070 * "Starting with Windows Server 2012 and Windows 8, if
7071 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
7072 * the hypervisor imposes no specific limit to the number of VPs.
7073 * In this case, Windows Server 2012 guest VMs may use more than
7074 * 64 VPs, up to the maximum supported number of processors applicable
7075 * to the specific Windows version being used."
7076 */
7077 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
7078 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
7079 false),
7080 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
7081 true),
7082 DEFINE_PROP_END_OF_LIST()
7083 };
7084
7085 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
7086 {
7087 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7088 CPUClass *cc = CPU_CLASS(oc);
7089 DeviceClass *dc = DEVICE_CLASS(oc);
7090
7091 device_class_set_parent_realize(dc, x86_cpu_realizefn,
7092 &xcc->parent_realize);
7093 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
7094 &xcc->parent_unrealize);
7095 dc->props = x86_cpu_properties;
7096
7097 xcc->parent_reset = cc->reset;
7098 cc->reset = x86_cpu_reset;
7099 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
7100
7101 cc->class_by_name = x86_cpu_class_by_name;
7102 cc->parse_features = x86_cpu_parse_featurestr;
7103 cc->has_work = x86_cpu_has_work;
7104 #ifdef CONFIG_TCG
7105 cc->do_interrupt = x86_cpu_do_interrupt;
7106 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
7107 #endif
7108 cc->dump_state = x86_cpu_dump_state;
7109 cc->get_crash_info = x86_cpu_get_crash_info;
7110 cc->set_pc = x86_cpu_set_pc;
7111 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
7112 cc->gdb_read_register = x86_cpu_gdb_read_register;
7113 cc->gdb_write_register = x86_cpu_gdb_write_register;
7114 cc->get_arch_id = x86_cpu_get_arch_id;
7115 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
7116 #ifndef CONFIG_USER_ONLY
7117 cc->asidx_from_attrs = x86_asidx_from_attrs;
7118 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
7119 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
7120 cc->write_elf64_note = x86_cpu_write_elf64_note;
7121 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
7122 cc->write_elf32_note = x86_cpu_write_elf32_note;
7123 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
7124 cc->vmsd = &vmstate_x86_cpu;
7125 #endif
7126 cc->gdb_arch_name = x86_gdb_arch_name;
7127 #ifdef TARGET_X86_64
7128 cc->gdb_core_xml_file = "i386-64bit.xml";
7129 cc->gdb_num_core_regs = 66;
7130 #else
7131 cc->gdb_core_xml_file = "i386-32bit.xml";
7132 cc->gdb_num_core_regs = 50;
7133 #endif
7134 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
7135 cc->debug_excp_handler = breakpoint_handler;
7136 #endif
7137 cc->cpu_exec_enter = x86_cpu_exec_enter;
7138 cc->cpu_exec_exit = x86_cpu_exec_exit;
7139 #ifdef CONFIG_TCG
7140 cc->tcg_initialize = tcg_x86_init;
7141 cc->tlb_fill = x86_cpu_tlb_fill;
7142 #endif
7143 cc->disas_set_info = x86_disas_set_info;
7144
7145 dc->user_creatable = true;
7146 }
7147
7148 static const TypeInfo x86_cpu_type_info = {
7149 .name = TYPE_X86_CPU,
7150 .parent = TYPE_CPU,
7151 .instance_size = sizeof(X86CPU),
7152 .instance_init = x86_cpu_initfn,
7153 .abstract = true,
7154 .class_size = sizeof(X86CPUClass),
7155 .class_init = x86_cpu_common_class_init,
7156 };
7157
7158
7159 /* "base" CPU model, used by query-cpu-model-expansion */
7160 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
7161 {
7162 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7163
7164 xcc->static_model = true;
7165 xcc->migration_safe = true;
7166 xcc->model_description = "base CPU model type with no features enabled";
7167 xcc->ordering = 8;
7168 }
7169
7170 static const TypeInfo x86_base_cpu_type_info = {
7171 .name = X86_CPU_TYPE_NAME("base"),
7172 .parent = TYPE_X86_CPU,
7173 .class_init = x86_cpu_base_class_init,
7174 };
7175
7176 static void x86_cpu_register_types(void)
7177 {
7178 int i;
7179
7180 type_register_static(&x86_cpu_type_info);
7181 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
7182 x86_register_cpudef_types(&builtin_x86_defs[i]);
7183 }
7184 type_register_static(&max_x86_cpu_type_info);
7185 type_register_static(&x86_base_cpu_type_info);
7186 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
7187 type_register_static(&host_x86_cpu_type_info);
7188 #endif
7189 }
7190
7191 type_init(x86_cpu_register_types)