]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
qom: Drop parameter @errp of object_property_add() & friends
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/hvf.h"
31 #include "sysemu/cpus.h"
32 #include "kvm_i386.h"
33 #include "sev_i386.h"
34
35 #include "qemu/error-report.h"
36 #include "qemu/module.h"
37 #include "qemu/option.h"
38 #include "qemu/config-file.h"
39 #include "qapi/error.h"
40 #include "qapi/qapi-visit-machine.h"
41 #include "qapi/qapi-visit-run-state.h"
42 #include "qapi/qmp/qdict.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/visitor.h"
45 #include "qom/qom-qobject.h"
46 #include "sysemu/arch_init.h"
47 #include "qapi/qapi-commands-machine-target.h"
48
49 #include "standard-headers/asm-x86/kvm_para.h"
50
51 #include "sysemu/sysemu.h"
52 #include "sysemu/tcg.h"
53 #include "hw/qdev-properties.h"
54 #include "hw/i386/topology.h"
55 #ifndef CONFIG_USER_ONLY
56 #include "exec/address-spaces.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /* Encode cache info for CPUID[8000001D] */
342 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
343 X86CPUTopoInfo *topo_info,
344 uint32_t *eax, uint32_t *ebx,
345 uint32_t *ecx, uint32_t *edx)
346 {
347 uint32_t l3_cores;
348 unsigned nodes = MAX(topo_info->nodes_per_pkg, 1);
349
350 assert(cache->size == cache->line_size * cache->associativity *
351 cache->partitions * cache->sets);
352
353 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
354 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
355
356 /* L3 is shared among multiple cores */
357 if (cache->level == 3) {
358 l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg *
359 topo_info->cores_per_die *
360 topo_info->threads_per_core),
361 nodes);
362 *eax |= (l3_cores - 1) << 14;
363 } else {
364 *eax |= ((topo_info->threads_per_core - 1) << 14);
365 }
366
367 assert(cache->line_size > 0);
368 assert(cache->partitions > 0);
369 assert(cache->associativity > 0);
370 /* We don't implement fully-associative caches */
371 assert(cache->associativity < cache->sets);
372 *ebx = (cache->line_size - 1) |
373 ((cache->partitions - 1) << 12) |
374 ((cache->associativity - 1) << 22);
375
376 assert(cache->sets > 0);
377 *ecx = cache->sets - 1;
378
379 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
380 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
381 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
382 }
383
384 /* Encode cache info for CPUID[8000001E] */
385 static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu,
386 uint32_t *eax, uint32_t *ebx,
387 uint32_t *ecx, uint32_t *edx)
388 {
389 X86CPUTopoIDs topo_ids = {0};
390 unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1);
391 int shift;
392
393 x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids);
394
395 *eax = cpu->apic_id;
396 /*
397 * CPUID_Fn8000001E_EBX
398 * 31:16 Reserved
399 * 15:8 Threads per core (The number of threads per core is
400 * Threads per core + 1)
401 * 7:0 Core id (see bit decoding below)
402 * SMT:
403 * 4:3 node id
404 * 2 Core complex id
405 * 1:0 Core id
406 * Non SMT:
407 * 5:4 node id
408 * 3 Core complex id
409 * 1:0 Core id
410 */
411 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) |
412 (topo_ids.core_id);
413 /*
414 * CPUID_Fn8000001E_ECX
415 * 31:11 Reserved
416 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
417 * 7:0 Node id (see bit decoding below)
418 * 2 Socket id
419 * 1:0 Node id
420 */
421 if (nodes <= 4) {
422 *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id;
423 } else {
424 /*
425 * Node id fix up. Actual hardware supports up to 4 nodes. But with
426 * more than 32 cores, we may end up with more than 4 nodes.
427 * Node id is a combination of socket id and node id. Only requirement
428 * here is that this number should be unique accross the system.
429 * Shift the socket id to accommodate more nodes. We dont expect both
430 * socket id and node id to be big number at the same time. This is not
431 * an ideal config but we need to to support it. Max nodes we can have
432 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
433 * 5 bits for nodes. Find the left most set bit to represent the total
434 * number of nodes. find_last_bit returns last set bit(0 based). Left
435 * shift(+1) the socket id to represent all the nodes.
436 */
437 nodes -= 1;
438 shift = find_last_bit(&nodes, 8);
439 *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) |
440 topo_ids.node_id;
441 }
442 *edx = 0;
443 }
444
445 /*
446 * Definitions of the hardcoded cache entries we expose:
447 * These are legacy cache values. If there is a need to change any
448 * of these values please use builtin_x86_defs
449 */
450
451 /* L1 data cache: */
452 static CPUCacheInfo legacy_l1d_cache = {
453 .type = DATA_CACHE,
454 .level = 1,
455 .size = 32 * KiB,
456 .self_init = 1,
457 .line_size = 64,
458 .associativity = 8,
459 .sets = 64,
460 .partitions = 1,
461 .no_invd_sharing = true,
462 };
463
464 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
465 static CPUCacheInfo legacy_l1d_cache_amd = {
466 .type = DATA_CACHE,
467 .level = 1,
468 .size = 64 * KiB,
469 .self_init = 1,
470 .line_size = 64,
471 .associativity = 2,
472 .sets = 512,
473 .partitions = 1,
474 .lines_per_tag = 1,
475 .no_invd_sharing = true,
476 };
477
478 /* L1 instruction cache: */
479 static CPUCacheInfo legacy_l1i_cache = {
480 .type = INSTRUCTION_CACHE,
481 .level = 1,
482 .size = 32 * KiB,
483 .self_init = 1,
484 .line_size = 64,
485 .associativity = 8,
486 .sets = 64,
487 .partitions = 1,
488 .no_invd_sharing = true,
489 };
490
491 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
492 static CPUCacheInfo legacy_l1i_cache_amd = {
493 .type = INSTRUCTION_CACHE,
494 .level = 1,
495 .size = 64 * KiB,
496 .self_init = 1,
497 .line_size = 64,
498 .associativity = 2,
499 .sets = 512,
500 .partitions = 1,
501 .lines_per_tag = 1,
502 .no_invd_sharing = true,
503 };
504
505 /* Level 2 unified cache: */
506 static CPUCacheInfo legacy_l2_cache = {
507 .type = UNIFIED_CACHE,
508 .level = 2,
509 .size = 4 * MiB,
510 .self_init = 1,
511 .line_size = 64,
512 .associativity = 16,
513 .sets = 4096,
514 .partitions = 1,
515 .no_invd_sharing = true,
516 };
517
518 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
519 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
520 .type = UNIFIED_CACHE,
521 .level = 2,
522 .size = 2 * MiB,
523 .line_size = 64,
524 .associativity = 8,
525 };
526
527
528 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
529 static CPUCacheInfo legacy_l2_cache_amd = {
530 .type = UNIFIED_CACHE,
531 .level = 2,
532 .size = 512 * KiB,
533 .line_size = 64,
534 .lines_per_tag = 1,
535 .associativity = 16,
536 .sets = 512,
537 .partitions = 1,
538 };
539
540 /* Level 3 unified cache: */
541 static CPUCacheInfo legacy_l3_cache = {
542 .type = UNIFIED_CACHE,
543 .level = 3,
544 .size = 16 * MiB,
545 .line_size = 64,
546 .associativity = 16,
547 .sets = 16384,
548 .partitions = 1,
549 .lines_per_tag = 1,
550 .self_init = true,
551 .inclusive = true,
552 .complex_indexing = true,
553 };
554
555 /* TLB definitions: */
556
557 #define L1_DTLB_2M_ASSOC 1
558 #define L1_DTLB_2M_ENTRIES 255
559 #define L1_DTLB_4K_ASSOC 1
560 #define L1_DTLB_4K_ENTRIES 255
561
562 #define L1_ITLB_2M_ASSOC 1
563 #define L1_ITLB_2M_ENTRIES 255
564 #define L1_ITLB_4K_ASSOC 1
565 #define L1_ITLB_4K_ENTRIES 255
566
567 #define L2_DTLB_2M_ASSOC 0 /* disabled */
568 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
569 #define L2_DTLB_4K_ASSOC 4
570 #define L2_DTLB_4K_ENTRIES 512
571
572 #define L2_ITLB_2M_ASSOC 0 /* disabled */
573 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
574 #define L2_ITLB_4K_ASSOC 4
575 #define L2_ITLB_4K_ENTRIES 512
576
577 /* CPUID Leaf 0x14 constants: */
578 #define INTEL_PT_MAX_SUBLEAF 0x1
579 /*
580 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
581 * MSR can be accessed;
582 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
583 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
584 * of Intel PT MSRs across warm reset;
585 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
586 */
587 #define INTEL_PT_MINIMAL_EBX 0xf
588 /*
589 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
590 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
591 * accessed;
592 * bit[01]: ToPA tables can hold any number of output entries, up to the
593 * maximum allowed by the MaskOrTableOffset field of
594 * IA32_RTIT_OUTPUT_MASK_PTRS;
595 * bit[02]: Support Single-Range Output scheme;
596 */
597 #define INTEL_PT_MINIMAL_ECX 0x7
598 /* generated packets which contain IP payloads have LIP values */
599 #define INTEL_PT_IP_LIP (1 << 31)
600 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
601 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
602 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
603 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
604 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
605
606 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
607 uint32_t vendor2, uint32_t vendor3)
608 {
609 int i;
610 for (i = 0; i < 4; i++) {
611 dst[i] = vendor1 >> (8 * i);
612 dst[i + 4] = vendor2 >> (8 * i);
613 dst[i + 8] = vendor3 >> (8 * i);
614 }
615 dst[CPUID_VENDOR_SZ] = '\0';
616 }
617
618 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
619 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
620 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
621 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
622 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
623 CPUID_PSE36 | CPUID_FXSR)
624 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
625 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
626 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
627 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
628 CPUID_PAE | CPUID_SEP | CPUID_APIC)
629
630 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
631 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
632 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
633 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
634 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
635 /* partly implemented:
636 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
637 /* missing:
638 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
639 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
640 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
641 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
642 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
643 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
644 CPUID_EXT_RDRAND)
645 /* missing:
646 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
647 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
648 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
649 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
650 CPUID_EXT_F16C */
651
652 #ifdef TARGET_X86_64
653 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
654 #else
655 #define TCG_EXT2_X86_64_FEATURES 0
656 #endif
657
658 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
659 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
660 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
661 TCG_EXT2_X86_64_FEATURES)
662 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
663 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
664 #define TCG_EXT4_FEATURES 0
665 #define TCG_SVM_FEATURES CPUID_SVM_NPT
666 #define TCG_KVM_FEATURES 0
667 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
668 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
669 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
670 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
671 CPUID_7_0_EBX_ERMS)
672 /* missing:
673 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
674 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
675 CPUID_7_0_EBX_RDSEED */
676 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
677 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
678 CPUID_7_0_ECX_LA57)
679 #define TCG_7_0_EDX_FEATURES 0
680 #define TCG_7_1_EAX_FEATURES 0
681 #define TCG_APM_FEATURES 0
682 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
683 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
684 /* missing:
685 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
686
687 typedef enum FeatureWordType {
688 CPUID_FEATURE_WORD,
689 MSR_FEATURE_WORD,
690 } FeatureWordType;
691
692 typedef struct FeatureWordInfo {
693 FeatureWordType type;
694 /* feature flags names are taken from "Intel Processor Identification and
695 * the CPUID Instruction" and AMD's "CPUID Specification".
696 * In cases of disagreement between feature naming conventions,
697 * aliases may be added.
698 */
699 const char *feat_names[64];
700 union {
701 /* If type==CPUID_FEATURE_WORD */
702 struct {
703 uint32_t eax; /* Input EAX for CPUID */
704 bool needs_ecx; /* CPUID instruction uses ECX as input */
705 uint32_t ecx; /* Input ECX value for CPUID */
706 int reg; /* output register (R_* constant) */
707 } cpuid;
708 /* If type==MSR_FEATURE_WORD */
709 struct {
710 uint32_t index;
711 } msr;
712 };
713 uint64_t tcg_features; /* Feature flags supported by TCG */
714 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
715 uint64_t migratable_flags; /* Feature flags known to be migratable */
716 /* Features that shouldn't be auto-enabled by "-cpu host" */
717 uint64_t no_autoenable_flags;
718 } FeatureWordInfo;
719
720 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
721 [FEAT_1_EDX] = {
722 .type = CPUID_FEATURE_WORD,
723 .feat_names = {
724 "fpu", "vme", "de", "pse",
725 "tsc", "msr", "pae", "mce",
726 "cx8", "apic", NULL, "sep",
727 "mtrr", "pge", "mca", "cmov",
728 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
729 NULL, "ds" /* Intel dts */, "acpi", "mmx",
730 "fxsr", "sse", "sse2", "ss",
731 "ht" /* Intel htt */, "tm", "ia64", "pbe",
732 },
733 .cpuid = {.eax = 1, .reg = R_EDX, },
734 .tcg_features = TCG_FEATURES,
735 },
736 [FEAT_1_ECX] = {
737 .type = CPUID_FEATURE_WORD,
738 .feat_names = {
739 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
740 "ds-cpl", "vmx", "smx", "est",
741 "tm2", "ssse3", "cid", NULL,
742 "fma", "cx16", "xtpr", "pdcm",
743 NULL, "pcid", "dca", "sse4.1",
744 "sse4.2", "x2apic", "movbe", "popcnt",
745 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
746 "avx", "f16c", "rdrand", "hypervisor",
747 },
748 .cpuid = { .eax = 1, .reg = R_ECX, },
749 .tcg_features = TCG_EXT_FEATURES,
750 },
751 /* Feature names that are already defined on feature_name[] but
752 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
753 * names on feat_names below. They are copied automatically
754 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
755 */
756 [FEAT_8000_0001_EDX] = {
757 .type = CPUID_FEATURE_WORD,
758 .feat_names = {
759 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
760 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
761 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
762 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
763 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
764 "nx", NULL, "mmxext", NULL /* mmx */,
765 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
766 NULL, "lm", "3dnowext", "3dnow",
767 },
768 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
769 .tcg_features = TCG_EXT2_FEATURES,
770 },
771 [FEAT_8000_0001_ECX] = {
772 .type = CPUID_FEATURE_WORD,
773 .feat_names = {
774 "lahf-lm", "cmp-legacy", "svm", "extapic",
775 "cr8legacy", "abm", "sse4a", "misalignsse",
776 "3dnowprefetch", "osvw", "ibs", "xop",
777 "skinit", "wdt", NULL, "lwp",
778 "fma4", "tce", NULL, "nodeid-msr",
779 NULL, "tbm", "topoext", "perfctr-core",
780 "perfctr-nb", NULL, NULL, NULL,
781 NULL, NULL, NULL, NULL,
782 },
783 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
784 .tcg_features = TCG_EXT3_FEATURES,
785 /*
786 * TOPOEXT is always allowed but can't be enabled blindly by
787 * "-cpu host", as it requires consistent cache topology info
788 * to be provided so it doesn't confuse guests.
789 */
790 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
791 },
792 [FEAT_C000_0001_EDX] = {
793 .type = CPUID_FEATURE_WORD,
794 .feat_names = {
795 NULL, NULL, "xstore", "xstore-en",
796 NULL, NULL, "xcrypt", "xcrypt-en",
797 "ace2", "ace2-en", "phe", "phe-en",
798 "pmm", "pmm-en", NULL, NULL,
799 NULL, NULL, NULL, NULL,
800 NULL, NULL, NULL, NULL,
801 NULL, NULL, NULL, NULL,
802 NULL, NULL, NULL, NULL,
803 },
804 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
805 .tcg_features = TCG_EXT4_FEATURES,
806 },
807 [FEAT_KVM] = {
808 .type = CPUID_FEATURE_WORD,
809 .feat_names = {
810 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
811 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
812 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
813 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
814 NULL, NULL, NULL, NULL,
815 NULL, NULL, NULL, NULL,
816 "kvmclock-stable-bit", NULL, NULL, NULL,
817 NULL, NULL, NULL, NULL,
818 },
819 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
820 .tcg_features = TCG_KVM_FEATURES,
821 },
822 [FEAT_KVM_HINTS] = {
823 .type = CPUID_FEATURE_WORD,
824 .feat_names = {
825 "kvm-hint-dedicated", NULL, NULL, NULL,
826 NULL, NULL, NULL, NULL,
827 NULL, NULL, NULL, NULL,
828 NULL, NULL, NULL, NULL,
829 NULL, NULL, NULL, NULL,
830 NULL, NULL, NULL, NULL,
831 NULL, NULL, NULL, NULL,
832 NULL, NULL, NULL, NULL,
833 },
834 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
835 .tcg_features = TCG_KVM_FEATURES,
836 /*
837 * KVM hints aren't auto-enabled by -cpu host, they need to be
838 * explicitly enabled in the command-line.
839 */
840 .no_autoenable_flags = ~0U,
841 },
842 /*
843 * .feat_names are commented out for Hyper-V enlightenments because we
844 * don't want to have two different ways for enabling them on QEMU command
845 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
846 * enabling several feature bits simultaneously, exposing these bits
847 * individually may just confuse guests.
848 */
849 [FEAT_HYPERV_EAX] = {
850 .type = CPUID_FEATURE_WORD,
851 .feat_names = {
852 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
853 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
854 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
855 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
856 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
857 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
858 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
859 NULL, NULL,
860 NULL, NULL, NULL, NULL,
861 NULL, NULL, NULL, NULL,
862 NULL, NULL, NULL, NULL,
863 NULL, NULL, NULL, NULL,
864 },
865 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
866 },
867 [FEAT_HYPERV_EBX] = {
868 .type = CPUID_FEATURE_WORD,
869 .feat_names = {
870 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
871 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
872 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
873 NULL /* hv_create_port */, NULL /* hv_connect_port */,
874 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
875 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
876 NULL, NULL,
877 NULL, NULL, NULL, NULL,
878 NULL, NULL, NULL, NULL,
879 NULL, NULL, NULL, NULL,
880 NULL, NULL, NULL, NULL,
881 },
882 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
883 },
884 [FEAT_HYPERV_EDX] = {
885 .type = CPUID_FEATURE_WORD,
886 .feat_names = {
887 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
888 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
889 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
890 NULL, NULL,
891 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 },
898 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
899 },
900 [FEAT_HV_RECOMM_EAX] = {
901 .type = CPUID_FEATURE_WORD,
902 .feat_names = {
903 NULL /* hv_recommend_pv_as_switch */,
904 NULL /* hv_recommend_pv_tlbflush_local */,
905 NULL /* hv_recommend_pv_tlbflush_remote */,
906 NULL /* hv_recommend_msr_apic_access */,
907 NULL /* hv_recommend_msr_reset */,
908 NULL /* hv_recommend_relaxed_timing */,
909 NULL /* hv_recommend_dma_remapping */,
910 NULL /* hv_recommend_int_remapping */,
911 NULL /* hv_recommend_x2apic_msrs */,
912 NULL /* hv_recommend_autoeoi_deprecation */,
913 NULL /* hv_recommend_pv_ipi */,
914 NULL /* hv_recommend_ex_hypercalls */,
915 NULL /* hv_hypervisor_is_nested */,
916 NULL /* hv_recommend_int_mbec */,
917 NULL /* hv_recommend_evmcs */,
918 NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 },
924 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
925 },
926 [FEAT_HV_NESTED_EAX] = {
927 .type = CPUID_FEATURE_WORD,
928 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
929 },
930 [FEAT_SVM] = {
931 .type = CPUID_FEATURE_WORD,
932 .feat_names = {
933 "npt", "lbrv", "svm-lock", "nrip-save",
934 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
935 NULL, NULL, "pause-filter", NULL,
936 "pfthreshold", NULL, NULL, NULL,
937 NULL, NULL, NULL, NULL,
938 NULL, NULL, NULL, NULL,
939 NULL, NULL, NULL, NULL,
940 NULL, NULL, NULL, NULL,
941 },
942 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
943 .tcg_features = TCG_SVM_FEATURES,
944 },
945 [FEAT_7_0_EBX] = {
946 .type = CPUID_FEATURE_WORD,
947 .feat_names = {
948 "fsgsbase", "tsc-adjust", NULL, "bmi1",
949 "hle", "avx2", NULL, "smep",
950 "bmi2", "erms", "invpcid", "rtm",
951 NULL, NULL, "mpx", NULL,
952 "avx512f", "avx512dq", "rdseed", "adx",
953 "smap", "avx512ifma", "pcommit", "clflushopt",
954 "clwb", "intel-pt", "avx512pf", "avx512er",
955 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
956 },
957 .cpuid = {
958 .eax = 7,
959 .needs_ecx = true, .ecx = 0,
960 .reg = R_EBX,
961 },
962 .tcg_features = TCG_7_0_EBX_FEATURES,
963 },
964 [FEAT_7_0_ECX] = {
965 .type = CPUID_FEATURE_WORD,
966 .feat_names = {
967 NULL, "avx512vbmi", "umip", "pku",
968 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
969 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
970 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
971 "la57", NULL, NULL, NULL,
972 NULL, NULL, "rdpid", NULL,
973 NULL, "cldemote", NULL, "movdiri",
974 "movdir64b", NULL, NULL, NULL,
975 },
976 .cpuid = {
977 .eax = 7,
978 .needs_ecx = true, .ecx = 0,
979 .reg = R_ECX,
980 },
981 .tcg_features = TCG_7_0_ECX_FEATURES,
982 },
983 [FEAT_7_0_EDX] = {
984 .type = CPUID_FEATURE_WORD,
985 .feat_names = {
986 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, "md-clear", NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL /* pconfig */, NULL,
991 NULL, NULL, NULL, NULL,
992 NULL, NULL, "spec-ctrl", "stibp",
993 NULL, "arch-capabilities", "core-capability", "ssbd",
994 },
995 .cpuid = {
996 .eax = 7,
997 .needs_ecx = true, .ecx = 0,
998 .reg = R_EDX,
999 },
1000 .tcg_features = TCG_7_0_EDX_FEATURES,
1001 },
1002 [FEAT_7_1_EAX] = {
1003 .type = CPUID_FEATURE_WORD,
1004 .feat_names = {
1005 NULL, NULL, NULL, NULL,
1006 NULL, "avx512-bf16", NULL, NULL,
1007 NULL, NULL, NULL, NULL,
1008 NULL, NULL, NULL, NULL,
1009 NULL, NULL, NULL, NULL,
1010 NULL, NULL, NULL, NULL,
1011 NULL, NULL, NULL, NULL,
1012 NULL, NULL, NULL, NULL,
1013 },
1014 .cpuid = {
1015 .eax = 7,
1016 .needs_ecx = true, .ecx = 1,
1017 .reg = R_EAX,
1018 },
1019 .tcg_features = TCG_7_1_EAX_FEATURES,
1020 },
1021 [FEAT_8000_0007_EDX] = {
1022 .type = CPUID_FEATURE_WORD,
1023 .feat_names = {
1024 NULL, NULL, NULL, NULL,
1025 NULL, NULL, NULL, NULL,
1026 "invtsc", NULL, NULL, NULL,
1027 NULL, NULL, NULL, NULL,
1028 NULL, NULL, NULL, NULL,
1029 NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 },
1033 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1034 .tcg_features = TCG_APM_FEATURES,
1035 .unmigratable_flags = CPUID_APM_INVTSC,
1036 },
1037 [FEAT_8000_0008_EBX] = {
1038 .type = CPUID_FEATURE_WORD,
1039 .feat_names = {
1040 "clzero", NULL, "xsaveerptr", NULL,
1041 NULL, NULL, NULL, NULL,
1042 NULL, "wbnoinvd", NULL, NULL,
1043 "ibpb", NULL, NULL, "amd-stibp",
1044 NULL, NULL, NULL, NULL,
1045 NULL, NULL, NULL, NULL,
1046 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1047 NULL, NULL, NULL, NULL,
1048 },
1049 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1050 .tcg_features = 0,
1051 .unmigratable_flags = 0,
1052 },
1053 [FEAT_XSAVE] = {
1054 .type = CPUID_FEATURE_WORD,
1055 .feat_names = {
1056 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1057 NULL, NULL, NULL, NULL,
1058 NULL, NULL, NULL, NULL,
1059 NULL, NULL, NULL, NULL,
1060 NULL, NULL, NULL, NULL,
1061 NULL, NULL, NULL, NULL,
1062 NULL, NULL, NULL, NULL,
1063 NULL, NULL, NULL, NULL,
1064 },
1065 .cpuid = {
1066 .eax = 0xd,
1067 .needs_ecx = true, .ecx = 1,
1068 .reg = R_EAX,
1069 },
1070 .tcg_features = TCG_XSAVE_FEATURES,
1071 },
1072 [FEAT_6_EAX] = {
1073 .type = CPUID_FEATURE_WORD,
1074 .feat_names = {
1075 NULL, NULL, "arat", NULL,
1076 NULL, NULL, NULL, NULL,
1077 NULL, NULL, NULL, NULL,
1078 NULL, NULL, NULL, NULL,
1079 NULL, NULL, NULL, NULL,
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, NULL, NULL,
1082 NULL, NULL, NULL, NULL,
1083 },
1084 .cpuid = { .eax = 6, .reg = R_EAX, },
1085 .tcg_features = TCG_6_EAX_FEATURES,
1086 },
1087 [FEAT_XSAVE_COMP_LO] = {
1088 .type = CPUID_FEATURE_WORD,
1089 .cpuid = {
1090 .eax = 0xD,
1091 .needs_ecx = true, .ecx = 0,
1092 .reg = R_EAX,
1093 },
1094 .tcg_features = ~0U,
1095 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1096 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1097 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1098 XSTATE_PKRU_MASK,
1099 },
1100 [FEAT_XSAVE_COMP_HI] = {
1101 .type = CPUID_FEATURE_WORD,
1102 .cpuid = {
1103 .eax = 0xD,
1104 .needs_ecx = true, .ecx = 0,
1105 .reg = R_EDX,
1106 },
1107 .tcg_features = ~0U,
1108 },
1109 /*Below are MSR exposed features*/
1110 [FEAT_ARCH_CAPABILITIES] = {
1111 .type = MSR_FEATURE_WORD,
1112 .feat_names = {
1113 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1114 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
1115 "taa-no", NULL, NULL, NULL,
1116 NULL, NULL, NULL, NULL,
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 NULL, NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 },
1122 .msr = {
1123 .index = MSR_IA32_ARCH_CAPABILITIES,
1124 },
1125 },
1126 [FEAT_CORE_CAPABILITY] = {
1127 .type = MSR_FEATURE_WORD,
1128 .feat_names = {
1129 NULL, NULL, NULL, NULL,
1130 NULL, "split-lock-detect", NULL, NULL,
1131 NULL, NULL, NULL, NULL,
1132 NULL, NULL, NULL, NULL,
1133 NULL, NULL, NULL, NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 },
1138 .msr = {
1139 .index = MSR_IA32_CORE_CAPABILITY,
1140 },
1141 },
1142
1143 [FEAT_VMX_PROCBASED_CTLS] = {
1144 .type = MSR_FEATURE_WORD,
1145 .feat_names = {
1146 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
1147 NULL, NULL, NULL, "vmx-hlt-exit",
1148 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
1149 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
1150 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
1151 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
1152 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
1153 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
1154 },
1155 .msr = {
1156 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1157 }
1158 },
1159
1160 [FEAT_VMX_SECONDARY_CTLS] = {
1161 .type = MSR_FEATURE_WORD,
1162 .feat_names = {
1163 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
1164 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
1165 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
1166 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
1167 "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
1168 "vmx-xsaves", NULL, NULL, NULL,
1169 NULL, NULL, NULL, NULL,
1170 NULL, NULL, NULL, NULL,
1171 },
1172 .msr = {
1173 .index = MSR_IA32_VMX_PROCBASED_CTLS2,
1174 }
1175 },
1176
1177 [FEAT_VMX_PINBASED_CTLS] = {
1178 .type = MSR_FEATURE_WORD,
1179 .feat_names = {
1180 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
1181 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
1182 NULL, NULL, NULL, NULL,
1183 NULL, NULL, NULL, NULL,
1184 NULL, NULL, NULL, NULL,
1185 NULL, NULL, NULL, NULL,
1186 NULL, NULL, NULL, NULL,
1187 NULL, NULL, NULL, NULL,
1188 },
1189 .msr = {
1190 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1191 }
1192 },
1193
1194 [FEAT_VMX_EXIT_CTLS] = {
1195 .type = MSR_FEATURE_WORD,
1196 /*
1197 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
1198 * the LM CPUID bit.
1199 */
1200 .feat_names = {
1201 NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
1202 NULL, NULL, NULL, NULL,
1203 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
1204 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
1205 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
1206 "vmx-exit-save-efer", "vmx-exit-load-efer",
1207 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
1208 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
1209 NULL, NULL, NULL, NULL,
1210 },
1211 .msr = {
1212 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
1213 }
1214 },
1215
1216 [FEAT_VMX_ENTRY_CTLS] = {
1217 .type = MSR_FEATURE_WORD,
1218 .feat_names = {
1219 NULL, NULL, "vmx-entry-noload-debugctl", NULL,
1220 NULL, NULL, NULL, NULL,
1221 NULL, "vmx-entry-ia32e-mode", NULL, NULL,
1222 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
1223 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
1224 NULL, NULL, NULL, NULL,
1225 NULL, NULL, NULL, NULL,
1226 NULL, NULL, NULL, NULL,
1227 },
1228 .msr = {
1229 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1230 }
1231 },
1232
1233 [FEAT_VMX_MISC] = {
1234 .type = MSR_FEATURE_WORD,
1235 .feat_names = {
1236 NULL, NULL, NULL, NULL,
1237 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
1238 "vmx-activity-wait-sipi", NULL, NULL, NULL,
1239 NULL, NULL, NULL, NULL,
1240 NULL, NULL, NULL, NULL,
1241 NULL, NULL, NULL, NULL,
1242 NULL, NULL, NULL, NULL,
1243 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
1244 },
1245 .msr = {
1246 .index = MSR_IA32_VMX_MISC,
1247 }
1248 },
1249
1250 [FEAT_VMX_EPT_VPID_CAPS] = {
1251 .type = MSR_FEATURE_WORD,
1252 .feat_names = {
1253 "vmx-ept-execonly", NULL, NULL, NULL,
1254 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
1255 NULL, NULL, NULL, NULL,
1256 NULL, NULL, NULL, NULL,
1257 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
1258 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
1259 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
1260 NULL, NULL, NULL, NULL,
1261 "vmx-invvpid", NULL, NULL, NULL,
1262 NULL, NULL, NULL, NULL,
1263 "vmx-invvpid-single-addr", "vmx-invept-single-context",
1264 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
1265 NULL, NULL, NULL, NULL,
1266 NULL, NULL, NULL, NULL,
1267 NULL, NULL, NULL, NULL,
1268 NULL, NULL, NULL, NULL,
1269 NULL, NULL, NULL, NULL,
1270 },
1271 .msr = {
1272 .index = MSR_IA32_VMX_EPT_VPID_CAP,
1273 }
1274 },
1275
1276 [FEAT_VMX_BASIC] = {
1277 .type = MSR_FEATURE_WORD,
1278 .feat_names = {
1279 [54] = "vmx-ins-outs",
1280 [55] = "vmx-true-ctls",
1281 },
1282 .msr = {
1283 .index = MSR_IA32_VMX_BASIC,
1284 },
1285 /* Just to be safe - we don't support setting the MSEG version field. */
1286 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
1287 },
1288
1289 [FEAT_VMX_VMFUNC] = {
1290 .type = MSR_FEATURE_WORD,
1291 .feat_names = {
1292 [0] = "vmx-eptp-switching",
1293 },
1294 .msr = {
1295 .index = MSR_IA32_VMX_VMFUNC,
1296 }
1297 },
1298
1299 };
1300
1301 typedef struct FeatureMask {
1302 FeatureWord index;
1303 uint64_t mask;
1304 } FeatureMask;
1305
1306 typedef struct FeatureDep {
1307 FeatureMask from, to;
1308 } FeatureDep;
1309
1310 static FeatureDep feature_dependencies[] = {
1311 {
1312 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
1313 .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
1314 },
1315 {
1316 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
1317 .to = { FEAT_CORE_CAPABILITY, ~0ull },
1318 },
1319 {
1320 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1321 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
1322 },
1323 {
1324 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1325 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
1326 },
1327 {
1328 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1329 .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
1330 },
1331 {
1332 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1333 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
1334 },
1335 {
1336 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1337 .to = { FEAT_VMX_MISC, ~0ull },
1338 },
1339 {
1340 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1341 .to = { FEAT_VMX_BASIC, ~0ull },
1342 },
1343 {
1344 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
1345 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
1346 },
1347 {
1348 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
1349 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
1350 },
1351 {
1352 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
1353 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
1354 },
1355 {
1356 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
1357 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
1358 },
1359 {
1360 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
1361 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
1362 },
1363 {
1364 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
1365 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
1366 },
1367 {
1368 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
1369 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
1370 },
1371 {
1372 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1373 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
1374 },
1375 {
1376 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1377 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
1378 },
1379 {
1380 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
1381 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
1382 },
1383 {
1384 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
1385 .to = { FEAT_VMX_VMFUNC, ~0ull },
1386 },
1387 };
1388
1389 typedef struct X86RegisterInfo32 {
1390 /* Name of register */
1391 const char *name;
1392 /* QAPI enum value register */
1393 X86CPURegister32 qapi_enum;
1394 } X86RegisterInfo32;
1395
1396 #define REGISTER(reg) \
1397 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1398 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1399 REGISTER(EAX),
1400 REGISTER(ECX),
1401 REGISTER(EDX),
1402 REGISTER(EBX),
1403 REGISTER(ESP),
1404 REGISTER(EBP),
1405 REGISTER(ESI),
1406 REGISTER(EDI),
1407 };
1408 #undef REGISTER
1409
1410 typedef struct ExtSaveArea {
1411 uint32_t feature, bits;
1412 uint32_t offset, size;
1413 } ExtSaveArea;
1414
1415 static const ExtSaveArea x86_ext_save_areas[] = {
1416 [XSTATE_FP_BIT] = {
1417 /* x87 FP state component is always enabled if XSAVE is supported */
1418 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1419 /* x87 state is in the legacy region of the XSAVE area */
1420 .offset = 0,
1421 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1422 },
1423 [XSTATE_SSE_BIT] = {
1424 /* SSE state component is always enabled if XSAVE is supported */
1425 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1426 /* SSE state is in the legacy region of the XSAVE area */
1427 .offset = 0,
1428 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1429 },
1430 [XSTATE_YMM_BIT] =
1431 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1432 .offset = offsetof(X86XSaveArea, avx_state),
1433 .size = sizeof(XSaveAVX) },
1434 [XSTATE_BNDREGS_BIT] =
1435 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1436 .offset = offsetof(X86XSaveArea, bndreg_state),
1437 .size = sizeof(XSaveBNDREG) },
1438 [XSTATE_BNDCSR_BIT] =
1439 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1440 .offset = offsetof(X86XSaveArea, bndcsr_state),
1441 .size = sizeof(XSaveBNDCSR) },
1442 [XSTATE_OPMASK_BIT] =
1443 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1444 .offset = offsetof(X86XSaveArea, opmask_state),
1445 .size = sizeof(XSaveOpmask) },
1446 [XSTATE_ZMM_Hi256_BIT] =
1447 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1448 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1449 .size = sizeof(XSaveZMM_Hi256) },
1450 [XSTATE_Hi16_ZMM_BIT] =
1451 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1452 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1453 .size = sizeof(XSaveHi16_ZMM) },
1454 [XSTATE_PKRU_BIT] =
1455 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1456 .offset = offsetof(X86XSaveArea, pkru_state),
1457 .size = sizeof(XSavePKRU) },
1458 };
1459
1460 static uint32_t xsave_area_size(uint64_t mask)
1461 {
1462 int i;
1463 uint64_t ret = 0;
1464
1465 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1466 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1467 if ((mask >> i) & 1) {
1468 ret = MAX(ret, esa->offset + esa->size);
1469 }
1470 }
1471 return ret;
1472 }
1473
1474 static inline bool accel_uses_host_cpuid(void)
1475 {
1476 return kvm_enabled() || hvf_enabled();
1477 }
1478
1479 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1480 {
1481 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1482 cpu->env.features[FEAT_XSAVE_COMP_LO];
1483 }
1484
1485 const char *get_register_name_32(unsigned int reg)
1486 {
1487 if (reg >= CPU_NB_REGS32) {
1488 return NULL;
1489 }
1490 return x86_reg_info_32[reg].name;
1491 }
1492
1493 /*
1494 * Returns the set of feature flags that are supported and migratable by
1495 * QEMU, for a given FeatureWord.
1496 */
1497 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
1498 {
1499 FeatureWordInfo *wi = &feature_word_info[w];
1500 uint64_t r = 0;
1501 int i;
1502
1503 for (i = 0; i < 64; i++) {
1504 uint64_t f = 1ULL << i;
1505
1506 /* If the feature name is known, it is implicitly considered migratable,
1507 * unless it is explicitly set in unmigratable_flags */
1508 if ((wi->migratable_flags & f) ||
1509 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1510 r |= f;
1511 }
1512 }
1513 return r;
1514 }
1515
1516 void host_cpuid(uint32_t function, uint32_t count,
1517 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1518 {
1519 uint32_t vec[4];
1520
1521 #ifdef __x86_64__
1522 asm volatile("cpuid"
1523 : "=a"(vec[0]), "=b"(vec[1]),
1524 "=c"(vec[2]), "=d"(vec[3])
1525 : "0"(function), "c"(count) : "cc");
1526 #elif defined(__i386__)
1527 asm volatile("pusha \n\t"
1528 "cpuid \n\t"
1529 "mov %%eax, 0(%2) \n\t"
1530 "mov %%ebx, 4(%2) \n\t"
1531 "mov %%ecx, 8(%2) \n\t"
1532 "mov %%edx, 12(%2) \n\t"
1533 "popa"
1534 : : "a"(function), "c"(count), "S"(vec)
1535 : "memory", "cc");
1536 #else
1537 abort();
1538 #endif
1539
1540 if (eax)
1541 *eax = vec[0];
1542 if (ebx)
1543 *ebx = vec[1];
1544 if (ecx)
1545 *ecx = vec[2];
1546 if (edx)
1547 *edx = vec[3];
1548 }
1549
1550 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1551 {
1552 uint32_t eax, ebx, ecx, edx;
1553
1554 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1555 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1556
1557 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1558 if (family) {
1559 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1560 }
1561 if (model) {
1562 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1563 }
1564 if (stepping) {
1565 *stepping = eax & 0x0F;
1566 }
1567 }
1568
1569 /* CPU class name definitions: */
1570
1571 /* Return type name for a given CPU model name
1572 * Caller is responsible for freeing the returned string.
1573 */
1574 static char *x86_cpu_type_name(const char *model_name)
1575 {
1576 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1577 }
1578
1579 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1580 {
1581 g_autofree char *typename = x86_cpu_type_name(cpu_model);
1582 return object_class_by_name(typename);
1583 }
1584
1585 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1586 {
1587 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1588 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1589 return g_strndup(class_name,
1590 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1591 }
1592
1593 typedef struct PropValue {
1594 const char *prop, *value;
1595 } PropValue;
1596
1597 typedef struct X86CPUVersionDefinition {
1598 X86CPUVersion version;
1599 const char *alias;
1600 const char *note;
1601 PropValue *props;
1602 } X86CPUVersionDefinition;
1603
1604 /* Base definition for a CPU model */
1605 typedef struct X86CPUDefinition {
1606 const char *name;
1607 uint32_t level;
1608 uint32_t xlevel;
1609 /* vendor is zero-terminated, 12 character ASCII string */
1610 char vendor[CPUID_VENDOR_SZ + 1];
1611 int family;
1612 int model;
1613 int stepping;
1614 FeatureWordArray features;
1615 const char *model_id;
1616 CPUCaches *cache_info;
1617
1618 /* Use AMD EPYC encoding for apic id */
1619 bool use_epyc_apic_id_encoding;
1620
1621 /*
1622 * Definitions for alternative versions of CPU model.
1623 * List is terminated by item with version == 0.
1624 * If NULL, version 1 will be registered automatically.
1625 */
1626 const X86CPUVersionDefinition *versions;
1627 } X86CPUDefinition;
1628
1629 /* Reference to a specific CPU model version */
1630 struct X86CPUModel {
1631 /* Base CPU definition */
1632 X86CPUDefinition *cpudef;
1633 /* CPU model version */
1634 X86CPUVersion version;
1635 const char *note;
1636 /*
1637 * If true, this is an alias CPU model.
1638 * This matters only for "-cpu help" and query-cpu-definitions
1639 */
1640 bool is_alias;
1641 };
1642
1643 /* Get full model name for CPU version */
1644 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1645 X86CPUVersion version)
1646 {
1647 assert(version > 0);
1648 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1649 }
1650
1651 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1652 {
1653 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1654 static const X86CPUVersionDefinition default_version_list[] = {
1655 { 1 },
1656 { /* end of list */ }
1657 };
1658
1659 return def->versions ?: default_version_list;
1660 }
1661
1662 bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type)
1663 {
1664 X86CPUClass *xcc = X86_CPU_CLASS(object_class_by_name(cpu_type));
1665
1666 assert(xcc);
1667 if (xcc->model && xcc->model->cpudef) {
1668 return xcc->model->cpudef->use_epyc_apic_id_encoding;
1669 } else {
1670 return false;
1671 }
1672 }
1673
1674 static CPUCaches epyc_cache_info = {
1675 .l1d_cache = &(CPUCacheInfo) {
1676 .type = DATA_CACHE,
1677 .level = 1,
1678 .size = 32 * KiB,
1679 .line_size = 64,
1680 .associativity = 8,
1681 .partitions = 1,
1682 .sets = 64,
1683 .lines_per_tag = 1,
1684 .self_init = 1,
1685 .no_invd_sharing = true,
1686 },
1687 .l1i_cache = &(CPUCacheInfo) {
1688 .type = INSTRUCTION_CACHE,
1689 .level = 1,
1690 .size = 64 * KiB,
1691 .line_size = 64,
1692 .associativity = 4,
1693 .partitions = 1,
1694 .sets = 256,
1695 .lines_per_tag = 1,
1696 .self_init = 1,
1697 .no_invd_sharing = true,
1698 },
1699 .l2_cache = &(CPUCacheInfo) {
1700 .type = UNIFIED_CACHE,
1701 .level = 2,
1702 .size = 512 * KiB,
1703 .line_size = 64,
1704 .associativity = 8,
1705 .partitions = 1,
1706 .sets = 1024,
1707 .lines_per_tag = 1,
1708 },
1709 .l3_cache = &(CPUCacheInfo) {
1710 .type = UNIFIED_CACHE,
1711 .level = 3,
1712 .size = 8 * MiB,
1713 .line_size = 64,
1714 .associativity = 16,
1715 .partitions = 1,
1716 .sets = 8192,
1717 .lines_per_tag = 1,
1718 .self_init = true,
1719 .inclusive = true,
1720 .complex_indexing = true,
1721 },
1722 };
1723
1724 static CPUCaches epyc_rome_cache_info = {
1725 .l1d_cache = &(CPUCacheInfo) {
1726 .type = DATA_CACHE,
1727 .level = 1,
1728 .size = 32 * KiB,
1729 .line_size = 64,
1730 .associativity = 8,
1731 .partitions = 1,
1732 .sets = 64,
1733 .lines_per_tag = 1,
1734 .self_init = 1,
1735 .no_invd_sharing = true,
1736 },
1737 .l1i_cache = &(CPUCacheInfo) {
1738 .type = INSTRUCTION_CACHE,
1739 .level = 1,
1740 .size = 32 * KiB,
1741 .line_size = 64,
1742 .associativity = 8,
1743 .partitions = 1,
1744 .sets = 64,
1745 .lines_per_tag = 1,
1746 .self_init = 1,
1747 .no_invd_sharing = true,
1748 },
1749 .l2_cache = &(CPUCacheInfo) {
1750 .type = UNIFIED_CACHE,
1751 .level = 2,
1752 .size = 512 * KiB,
1753 .line_size = 64,
1754 .associativity = 8,
1755 .partitions = 1,
1756 .sets = 1024,
1757 .lines_per_tag = 1,
1758 },
1759 .l3_cache = &(CPUCacheInfo) {
1760 .type = UNIFIED_CACHE,
1761 .level = 3,
1762 .size = 16 * MiB,
1763 .line_size = 64,
1764 .associativity = 16,
1765 .partitions = 1,
1766 .sets = 16384,
1767 .lines_per_tag = 1,
1768 .self_init = true,
1769 .inclusive = true,
1770 .complex_indexing = true,
1771 },
1772 };
1773
1774 /* The following VMX features are not supported by KVM and are left out in the
1775 * CPU definitions:
1776 *
1777 * Dual-monitor support (all processors)
1778 * Entry to SMM
1779 * Deactivate dual-monitor treatment
1780 * Number of CR3-target values
1781 * Shutdown activity state
1782 * Wait-for-SIPI activity state
1783 * PAUSE-loop exiting (Westmere and newer)
1784 * EPT-violation #VE (Broadwell and newer)
1785 * Inject event with insn length=0 (Skylake and newer)
1786 * Conceal non-root operation from PT
1787 * Conceal VM exits from PT
1788 * Conceal VM entries from PT
1789 * Enable ENCLS exiting
1790 * Mode-based execute control (XS/XU)
1791 s TSC scaling (Skylake Server and newer)
1792 * GPA translation for PT (IceLake and newer)
1793 * User wait and pause
1794 * ENCLV exiting
1795 * Load IA32_RTIT_CTL
1796 * Clear IA32_RTIT_CTL
1797 * Advanced VM-exit information for EPT violations
1798 * Sub-page write permissions
1799 * PT in VMX operation
1800 */
1801
1802 static X86CPUDefinition builtin_x86_defs[] = {
1803 {
1804 .name = "qemu64",
1805 .level = 0xd,
1806 .vendor = CPUID_VENDOR_AMD,
1807 .family = 6,
1808 .model = 6,
1809 .stepping = 3,
1810 .features[FEAT_1_EDX] =
1811 PPRO_FEATURES |
1812 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1813 CPUID_PSE36,
1814 .features[FEAT_1_ECX] =
1815 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1816 .features[FEAT_8000_0001_EDX] =
1817 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1818 .features[FEAT_8000_0001_ECX] =
1819 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1820 .xlevel = 0x8000000A,
1821 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1822 },
1823 {
1824 .name = "phenom",
1825 .level = 5,
1826 .vendor = CPUID_VENDOR_AMD,
1827 .family = 16,
1828 .model = 2,
1829 .stepping = 3,
1830 /* Missing: CPUID_HT */
1831 .features[FEAT_1_EDX] =
1832 PPRO_FEATURES |
1833 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1834 CPUID_PSE36 | CPUID_VME,
1835 .features[FEAT_1_ECX] =
1836 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1837 CPUID_EXT_POPCNT,
1838 .features[FEAT_8000_0001_EDX] =
1839 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1840 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1841 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1842 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1843 CPUID_EXT3_CR8LEG,
1844 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1845 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1846 .features[FEAT_8000_0001_ECX] =
1847 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1848 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1849 /* Missing: CPUID_SVM_LBRV */
1850 .features[FEAT_SVM] =
1851 CPUID_SVM_NPT,
1852 .xlevel = 0x8000001A,
1853 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1854 },
1855 {
1856 .name = "core2duo",
1857 .level = 10,
1858 .vendor = CPUID_VENDOR_INTEL,
1859 .family = 6,
1860 .model = 15,
1861 .stepping = 11,
1862 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1863 .features[FEAT_1_EDX] =
1864 PPRO_FEATURES |
1865 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1866 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1867 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1868 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1869 .features[FEAT_1_ECX] =
1870 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1871 CPUID_EXT_CX16,
1872 .features[FEAT_8000_0001_EDX] =
1873 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1874 .features[FEAT_8000_0001_ECX] =
1875 CPUID_EXT3_LAHF_LM,
1876 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
1877 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1878 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1879 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1880 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1881 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
1882 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1883 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1884 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1885 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1886 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1887 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1888 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1889 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
1890 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
1891 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
1892 .features[FEAT_VMX_SECONDARY_CTLS] =
1893 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
1894 .xlevel = 0x80000008,
1895 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1896 },
1897 {
1898 .name = "kvm64",
1899 .level = 0xd,
1900 .vendor = CPUID_VENDOR_INTEL,
1901 .family = 15,
1902 .model = 6,
1903 .stepping = 1,
1904 /* Missing: CPUID_HT */
1905 .features[FEAT_1_EDX] =
1906 PPRO_FEATURES | CPUID_VME |
1907 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1908 CPUID_PSE36,
1909 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1910 .features[FEAT_1_ECX] =
1911 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1912 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1913 .features[FEAT_8000_0001_EDX] =
1914 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1915 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1916 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1917 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1918 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1919 .features[FEAT_8000_0001_ECX] =
1920 0,
1921 /* VMX features from Cedar Mill/Prescott */
1922 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1923 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1924 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1925 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1926 VMX_PIN_BASED_NMI_EXITING,
1927 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1928 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1929 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1930 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1931 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1932 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1933 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1934 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING,
1935 .xlevel = 0x80000008,
1936 .model_id = "Common KVM processor"
1937 },
1938 {
1939 .name = "qemu32",
1940 .level = 4,
1941 .vendor = CPUID_VENDOR_INTEL,
1942 .family = 6,
1943 .model = 6,
1944 .stepping = 3,
1945 .features[FEAT_1_EDX] =
1946 PPRO_FEATURES,
1947 .features[FEAT_1_ECX] =
1948 CPUID_EXT_SSE3,
1949 .xlevel = 0x80000004,
1950 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1951 },
1952 {
1953 .name = "kvm32",
1954 .level = 5,
1955 .vendor = CPUID_VENDOR_INTEL,
1956 .family = 15,
1957 .model = 6,
1958 .stepping = 1,
1959 .features[FEAT_1_EDX] =
1960 PPRO_FEATURES | CPUID_VME |
1961 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1962 .features[FEAT_1_ECX] =
1963 CPUID_EXT_SSE3,
1964 .features[FEAT_8000_0001_ECX] =
1965 0,
1966 /* VMX features from Yonah */
1967 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1968 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1969 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1970 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1971 VMX_PIN_BASED_NMI_EXITING,
1972 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1973 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1974 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1975 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1976 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
1977 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
1978 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
1979 .xlevel = 0x80000008,
1980 .model_id = "Common 32-bit KVM processor"
1981 },
1982 {
1983 .name = "coreduo",
1984 .level = 10,
1985 .vendor = CPUID_VENDOR_INTEL,
1986 .family = 6,
1987 .model = 14,
1988 .stepping = 8,
1989 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1990 .features[FEAT_1_EDX] =
1991 PPRO_FEATURES | CPUID_VME |
1992 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1993 CPUID_SS,
1994 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1995 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1996 .features[FEAT_1_ECX] =
1997 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1998 .features[FEAT_8000_0001_EDX] =
1999 CPUID_EXT2_NX,
2000 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2001 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2002 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2003 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2004 VMX_PIN_BASED_NMI_EXITING,
2005 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2006 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2007 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2008 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2009 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2010 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2011 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2012 .xlevel = 0x80000008,
2013 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
2014 },
2015 {
2016 .name = "486",
2017 .level = 1,
2018 .vendor = CPUID_VENDOR_INTEL,
2019 .family = 4,
2020 .model = 8,
2021 .stepping = 0,
2022 .features[FEAT_1_EDX] =
2023 I486_FEATURES,
2024 .xlevel = 0,
2025 .model_id = "",
2026 },
2027 {
2028 .name = "pentium",
2029 .level = 1,
2030 .vendor = CPUID_VENDOR_INTEL,
2031 .family = 5,
2032 .model = 4,
2033 .stepping = 3,
2034 .features[FEAT_1_EDX] =
2035 PENTIUM_FEATURES,
2036 .xlevel = 0,
2037 .model_id = "",
2038 },
2039 {
2040 .name = "pentium2",
2041 .level = 2,
2042 .vendor = CPUID_VENDOR_INTEL,
2043 .family = 6,
2044 .model = 5,
2045 .stepping = 2,
2046 .features[FEAT_1_EDX] =
2047 PENTIUM2_FEATURES,
2048 .xlevel = 0,
2049 .model_id = "",
2050 },
2051 {
2052 .name = "pentium3",
2053 .level = 3,
2054 .vendor = CPUID_VENDOR_INTEL,
2055 .family = 6,
2056 .model = 7,
2057 .stepping = 3,
2058 .features[FEAT_1_EDX] =
2059 PENTIUM3_FEATURES,
2060 .xlevel = 0,
2061 .model_id = "",
2062 },
2063 {
2064 .name = "athlon",
2065 .level = 2,
2066 .vendor = CPUID_VENDOR_AMD,
2067 .family = 6,
2068 .model = 2,
2069 .stepping = 3,
2070 .features[FEAT_1_EDX] =
2071 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
2072 CPUID_MCA,
2073 .features[FEAT_8000_0001_EDX] =
2074 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
2075 .xlevel = 0x80000008,
2076 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
2077 },
2078 {
2079 .name = "n270",
2080 .level = 10,
2081 .vendor = CPUID_VENDOR_INTEL,
2082 .family = 6,
2083 .model = 28,
2084 .stepping = 2,
2085 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2086 .features[FEAT_1_EDX] =
2087 PPRO_FEATURES |
2088 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
2089 CPUID_ACPI | CPUID_SS,
2090 /* Some CPUs got no CPUID_SEP */
2091 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
2092 * CPUID_EXT_XTPR */
2093 .features[FEAT_1_ECX] =
2094 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
2095 CPUID_EXT_MOVBE,
2096 .features[FEAT_8000_0001_EDX] =
2097 CPUID_EXT2_NX,
2098 .features[FEAT_8000_0001_ECX] =
2099 CPUID_EXT3_LAHF_LM,
2100 .xlevel = 0x80000008,
2101 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2102 },
2103 {
2104 .name = "Conroe",
2105 .level = 10,
2106 .vendor = CPUID_VENDOR_INTEL,
2107 .family = 6,
2108 .model = 15,
2109 .stepping = 3,
2110 .features[FEAT_1_EDX] =
2111 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2112 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2113 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2114 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2115 CPUID_DE | CPUID_FP87,
2116 .features[FEAT_1_ECX] =
2117 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2118 .features[FEAT_8000_0001_EDX] =
2119 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2120 .features[FEAT_8000_0001_ECX] =
2121 CPUID_EXT3_LAHF_LM,
2122 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2123 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2124 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2125 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2126 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2127 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2128 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2129 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2130 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2131 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2132 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2133 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2134 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2135 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2136 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2137 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2138 .features[FEAT_VMX_SECONDARY_CTLS] =
2139 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
2140 .xlevel = 0x80000008,
2141 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2142 },
2143 {
2144 .name = "Penryn",
2145 .level = 10,
2146 .vendor = CPUID_VENDOR_INTEL,
2147 .family = 6,
2148 .model = 23,
2149 .stepping = 3,
2150 .features[FEAT_1_EDX] =
2151 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2152 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2153 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2154 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2155 CPUID_DE | CPUID_FP87,
2156 .features[FEAT_1_ECX] =
2157 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2158 CPUID_EXT_SSE3,
2159 .features[FEAT_8000_0001_EDX] =
2160 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2161 .features[FEAT_8000_0001_ECX] =
2162 CPUID_EXT3_LAHF_LM,
2163 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2164 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2165 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2166 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT |
2167 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2168 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2169 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2170 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2171 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2172 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2173 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2174 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2175 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2176 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2177 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2178 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2179 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2180 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2181 .features[FEAT_VMX_SECONDARY_CTLS] =
2182 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2183 VMX_SECONDARY_EXEC_WBINVD_EXITING,
2184 .xlevel = 0x80000008,
2185 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2186 },
2187 {
2188 .name = "Nehalem",
2189 .level = 11,
2190 .vendor = CPUID_VENDOR_INTEL,
2191 .family = 6,
2192 .model = 26,
2193 .stepping = 3,
2194 .features[FEAT_1_EDX] =
2195 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2196 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2197 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2198 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2199 CPUID_DE | CPUID_FP87,
2200 .features[FEAT_1_ECX] =
2201 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2202 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2203 .features[FEAT_8000_0001_EDX] =
2204 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2205 .features[FEAT_8000_0001_ECX] =
2206 CPUID_EXT3_LAHF_LM,
2207 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2208 MSR_VMX_BASIC_TRUE_CTLS,
2209 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2210 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2211 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2212 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2213 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2214 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2215 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2216 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2217 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2218 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2219 .features[FEAT_VMX_EXIT_CTLS] =
2220 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2221 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2222 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2223 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2224 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2225 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2226 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2227 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2228 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2229 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2230 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2231 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2232 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2233 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2234 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2235 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2236 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2237 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2238 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2239 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2240 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2241 .features[FEAT_VMX_SECONDARY_CTLS] =
2242 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2243 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2244 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2245 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2246 VMX_SECONDARY_EXEC_ENABLE_VPID,
2247 .xlevel = 0x80000008,
2248 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
2249 .versions = (X86CPUVersionDefinition[]) {
2250 { .version = 1 },
2251 {
2252 .version = 2,
2253 .alias = "Nehalem-IBRS",
2254 .props = (PropValue[]) {
2255 { "spec-ctrl", "on" },
2256 { "model-id",
2257 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
2258 { /* end of list */ }
2259 }
2260 },
2261 { /* end of list */ }
2262 }
2263 },
2264 {
2265 .name = "Westmere",
2266 .level = 11,
2267 .vendor = CPUID_VENDOR_INTEL,
2268 .family = 6,
2269 .model = 44,
2270 .stepping = 1,
2271 .features[FEAT_1_EDX] =
2272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2276 CPUID_DE | CPUID_FP87,
2277 .features[FEAT_1_ECX] =
2278 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2279 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2280 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2281 .features[FEAT_8000_0001_EDX] =
2282 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2283 .features[FEAT_8000_0001_ECX] =
2284 CPUID_EXT3_LAHF_LM,
2285 .features[FEAT_6_EAX] =
2286 CPUID_6_EAX_ARAT,
2287 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2288 MSR_VMX_BASIC_TRUE_CTLS,
2289 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2290 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2291 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2292 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2293 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2294 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2295 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2296 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2297 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2298 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2299 .features[FEAT_VMX_EXIT_CTLS] =
2300 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2301 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2302 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2303 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2304 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2305 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2306 MSR_VMX_MISC_STORE_LMA,
2307 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2308 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2309 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2310 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2311 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2312 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2313 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2314 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2315 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2316 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2317 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2318 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2319 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2320 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2321 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2322 .features[FEAT_VMX_SECONDARY_CTLS] =
2323 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2324 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2325 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2326 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2327 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2328 .xlevel = 0x80000008,
2329 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2330 .versions = (X86CPUVersionDefinition[]) {
2331 { .version = 1 },
2332 {
2333 .version = 2,
2334 .alias = "Westmere-IBRS",
2335 .props = (PropValue[]) {
2336 { "spec-ctrl", "on" },
2337 { "model-id",
2338 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
2339 { /* end of list */ }
2340 }
2341 },
2342 { /* end of list */ }
2343 }
2344 },
2345 {
2346 .name = "SandyBridge",
2347 .level = 0xd,
2348 .vendor = CPUID_VENDOR_INTEL,
2349 .family = 6,
2350 .model = 42,
2351 .stepping = 1,
2352 .features[FEAT_1_EDX] =
2353 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2354 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2355 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2356 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2357 CPUID_DE | CPUID_FP87,
2358 .features[FEAT_1_ECX] =
2359 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2360 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2361 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2362 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2363 CPUID_EXT_SSE3,
2364 .features[FEAT_8000_0001_EDX] =
2365 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2366 CPUID_EXT2_SYSCALL,
2367 .features[FEAT_8000_0001_ECX] =
2368 CPUID_EXT3_LAHF_LM,
2369 .features[FEAT_XSAVE] =
2370 CPUID_XSAVE_XSAVEOPT,
2371 .features[FEAT_6_EAX] =
2372 CPUID_6_EAX_ARAT,
2373 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2374 MSR_VMX_BASIC_TRUE_CTLS,
2375 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2376 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2377 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2378 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2379 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2380 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2381 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2382 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2383 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2384 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2385 .features[FEAT_VMX_EXIT_CTLS] =
2386 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2387 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2388 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2389 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2390 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2391 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2392 MSR_VMX_MISC_STORE_LMA,
2393 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2394 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2395 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2396 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2397 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2398 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2399 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2400 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2401 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2402 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2403 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2404 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2405 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2406 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2407 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2408 .features[FEAT_VMX_SECONDARY_CTLS] =
2409 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2410 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2411 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2412 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2413 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2414 .xlevel = 0x80000008,
2415 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
2416 .versions = (X86CPUVersionDefinition[]) {
2417 { .version = 1 },
2418 {
2419 .version = 2,
2420 .alias = "SandyBridge-IBRS",
2421 .props = (PropValue[]) {
2422 { "spec-ctrl", "on" },
2423 { "model-id",
2424 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
2425 { /* end of list */ }
2426 }
2427 },
2428 { /* end of list */ }
2429 }
2430 },
2431 {
2432 .name = "IvyBridge",
2433 .level = 0xd,
2434 .vendor = CPUID_VENDOR_INTEL,
2435 .family = 6,
2436 .model = 58,
2437 .stepping = 9,
2438 .features[FEAT_1_EDX] =
2439 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2440 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2441 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2442 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2443 CPUID_DE | CPUID_FP87,
2444 .features[FEAT_1_ECX] =
2445 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2446 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2447 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2448 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2449 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2450 .features[FEAT_7_0_EBX] =
2451 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2452 CPUID_7_0_EBX_ERMS,
2453 .features[FEAT_8000_0001_EDX] =
2454 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2455 CPUID_EXT2_SYSCALL,
2456 .features[FEAT_8000_0001_ECX] =
2457 CPUID_EXT3_LAHF_LM,
2458 .features[FEAT_XSAVE] =
2459 CPUID_XSAVE_XSAVEOPT,
2460 .features[FEAT_6_EAX] =
2461 CPUID_6_EAX_ARAT,
2462 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2463 MSR_VMX_BASIC_TRUE_CTLS,
2464 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2465 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2466 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2467 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2468 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2469 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2470 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2471 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2472 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2473 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2474 .features[FEAT_VMX_EXIT_CTLS] =
2475 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2476 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2477 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2478 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2479 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2480 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2481 MSR_VMX_MISC_STORE_LMA,
2482 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2483 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2484 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2485 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2486 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2487 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2488 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2489 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2490 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2491 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2492 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2493 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2494 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2495 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2496 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2497 .features[FEAT_VMX_SECONDARY_CTLS] =
2498 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2499 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2500 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2501 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2502 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2503 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2504 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2505 VMX_SECONDARY_EXEC_RDRAND_EXITING,
2506 .xlevel = 0x80000008,
2507 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
2508 .versions = (X86CPUVersionDefinition[]) {
2509 { .version = 1 },
2510 {
2511 .version = 2,
2512 .alias = "IvyBridge-IBRS",
2513 .props = (PropValue[]) {
2514 { "spec-ctrl", "on" },
2515 { "model-id",
2516 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
2517 { /* end of list */ }
2518 }
2519 },
2520 { /* end of list */ }
2521 }
2522 },
2523 {
2524 .name = "Haswell",
2525 .level = 0xd,
2526 .vendor = CPUID_VENDOR_INTEL,
2527 .family = 6,
2528 .model = 60,
2529 .stepping = 4,
2530 .features[FEAT_1_EDX] =
2531 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2532 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2533 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2534 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2535 CPUID_DE | CPUID_FP87,
2536 .features[FEAT_1_ECX] =
2537 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2538 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2539 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2540 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2541 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2542 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2543 .features[FEAT_8000_0001_EDX] =
2544 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2545 CPUID_EXT2_SYSCALL,
2546 .features[FEAT_8000_0001_ECX] =
2547 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2548 .features[FEAT_7_0_EBX] =
2549 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2550 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2551 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2552 CPUID_7_0_EBX_RTM,
2553 .features[FEAT_XSAVE] =
2554 CPUID_XSAVE_XSAVEOPT,
2555 .features[FEAT_6_EAX] =
2556 CPUID_6_EAX_ARAT,
2557 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2558 MSR_VMX_BASIC_TRUE_CTLS,
2559 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2560 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2561 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2562 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2563 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2564 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2565 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2566 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2567 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2568 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2569 .features[FEAT_VMX_EXIT_CTLS] =
2570 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2571 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2572 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2573 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2574 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2575 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2576 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2577 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2578 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2579 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2580 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2581 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2582 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2583 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2584 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2585 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2586 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2587 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2588 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2589 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2590 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2591 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2592 .features[FEAT_VMX_SECONDARY_CTLS] =
2593 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2594 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2595 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2596 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2597 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2598 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2599 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2600 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2601 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
2602 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2603 .xlevel = 0x80000008,
2604 .model_id = "Intel Core Processor (Haswell)",
2605 .versions = (X86CPUVersionDefinition[]) {
2606 { .version = 1 },
2607 {
2608 .version = 2,
2609 .alias = "Haswell-noTSX",
2610 .props = (PropValue[]) {
2611 { "hle", "off" },
2612 { "rtm", "off" },
2613 { "stepping", "1" },
2614 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2615 { /* end of list */ }
2616 },
2617 },
2618 {
2619 .version = 3,
2620 .alias = "Haswell-IBRS",
2621 .props = (PropValue[]) {
2622 /* Restore TSX features removed by -v2 above */
2623 { "hle", "on" },
2624 { "rtm", "on" },
2625 /*
2626 * Haswell and Haswell-IBRS had stepping=4 in
2627 * QEMU 4.0 and older
2628 */
2629 { "stepping", "4" },
2630 { "spec-ctrl", "on" },
2631 { "model-id",
2632 "Intel Core Processor (Haswell, IBRS)" },
2633 { /* end of list */ }
2634 }
2635 },
2636 {
2637 .version = 4,
2638 .alias = "Haswell-noTSX-IBRS",
2639 .props = (PropValue[]) {
2640 { "hle", "off" },
2641 { "rtm", "off" },
2642 /* spec-ctrl was already enabled by -v3 above */
2643 { "stepping", "1" },
2644 { "model-id",
2645 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2646 { /* end of list */ }
2647 }
2648 },
2649 { /* end of list */ }
2650 }
2651 },
2652 {
2653 .name = "Broadwell",
2654 .level = 0xd,
2655 .vendor = CPUID_VENDOR_INTEL,
2656 .family = 6,
2657 .model = 61,
2658 .stepping = 2,
2659 .features[FEAT_1_EDX] =
2660 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2661 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2662 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2663 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2664 CPUID_DE | CPUID_FP87,
2665 .features[FEAT_1_ECX] =
2666 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2667 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2668 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2669 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2670 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2671 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2672 .features[FEAT_8000_0001_EDX] =
2673 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2674 CPUID_EXT2_SYSCALL,
2675 .features[FEAT_8000_0001_ECX] =
2676 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2677 .features[FEAT_7_0_EBX] =
2678 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2679 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2680 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2681 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2682 CPUID_7_0_EBX_SMAP,
2683 .features[FEAT_XSAVE] =
2684 CPUID_XSAVE_XSAVEOPT,
2685 .features[FEAT_6_EAX] =
2686 CPUID_6_EAX_ARAT,
2687 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2688 MSR_VMX_BASIC_TRUE_CTLS,
2689 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2690 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2691 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2692 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2693 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2694 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2695 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2696 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2697 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2698 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2699 .features[FEAT_VMX_EXIT_CTLS] =
2700 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2701 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2702 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2703 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2704 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2705 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2706 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2707 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2708 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2709 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2710 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2711 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2712 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2713 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2714 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2715 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2716 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2717 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2718 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2719 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2720 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2721 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2722 .features[FEAT_VMX_SECONDARY_CTLS] =
2723 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2724 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2725 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2726 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2727 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2728 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2729 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2730 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2731 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2732 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2733 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2734 .xlevel = 0x80000008,
2735 .model_id = "Intel Core Processor (Broadwell)",
2736 .versions = (X86CPUVersionDefinition[]) {
2737 { .version = 1 },
2738 {
2739 .version = 2,
2740 .alias = "Broadwell-noTSX",
2741 .props = (PropValue[]) {
2742 { "hle", "off" },
2743 { "rtm", "off" },
2744 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2745 { /* end of list */ }
2746 },
2747 },
2748 {
2749 .version = 3,
2750 .alias = "Broadwell-IBRS",
2751 .props = (PropValue[]) {
2752 /* Restore TSX features removed by -v2 above */
2753 { "hle", "on" },
2754 { "rtm", "on" },
2755 { "spec-ctrl", "on" },
2756 { "model-id",
2757 "Intel Core Processor (Broadwell, IBRS)" },
2758 { /* end of list */ }
2759 }
2760 },
2761 {
2762 .version = 4,
2763 .alias = "Broadwell-noTSX-IBRS",
2764 .props = (PropValue[]) {
2765 { "hle", "off" },
2766 { "rtm", "off" },
2767 /* spec-ctrl was already enabled by -v3 above */
2768 { "model-id",
2769 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2770 { /* end of list */ }
2771 }
2772 },
2773 { /* end of list */ }
2774 }
2775 },
2776 {
2777 .name = "Skylake-Client",
2778 .level = 0xd,
2779 .vendor = CPUID_VENDOR_INTEL,
2780 .family = 6,
2781 .model = 94,
2782 .stepping = 3,
2783 .features[FEAT_1_EDX] =
2784 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2785 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2786 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2787 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2788 CPUID_DE | CPUID_FP87,
2789 .features[FEAT_1_ECX] =
2790 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2791 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2792 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2793 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2794 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2795 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2796 .features[FEAT_8000_0001_EDX] =
2797 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2798 CPUID_EXT2_SYSCALL,
2799 .features[FEAT_8000_0001_ECX] =
2800 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2801 .features[FEAT_7_0_EBX] =
2802 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2803 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2804 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2805 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2806 CPUID_7_0_EBX_SMAP,
2807 /* Missing: XSAVES (not supported by some Linux versions,
2808 * including v4.1 to v4.12).
2809 * KVM doesn't yet expose any XSAVES state save component,
2810 * and the only one defined in Skylake (processor tracing)
2811 * probably will block migration anyway.
2812 */
2813 .features[FEAT_XSAVE] =
2814 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2815 CPUID_XSAVE_XGETBV1,
2816 .features[FEAT_6_EAX] =
2817 CPUID_6_EAX_ARAT,
2818 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2819 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2820 MSR_VMX_BASIC_TRUE_CTLS,
2821 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2822 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2823 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2824 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2825 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2826 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2827 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2828 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2829 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2830 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2831 .features[FEAT_VMX_EXIT_CTLS] =
2832 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2833 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2834 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2835 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2836 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2837 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2838 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2839 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2840 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2841 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2842 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2843 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2844 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2845 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2846 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2847 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2848 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2849 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2850 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2851 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2852 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2853 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2854 .features[FEAT_VMX_SECONDARY_CTLS] =
2855 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2856 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2857 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2858 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2859 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2860 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2861 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2862 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2863 .xlevel = 0x80000008,
2864 .model_id = "Intel Core Processor (Skylake)",
2865 .versions = (X86CPUVersionDefinition[]) {
2866 { .version = 1 },
2867 {
2868 .version = 2,
2869 .alias = "Skylake-Client-IBRS",
2870 .props = (PropValue[]) {
2871 { "spec-ctrl", "on" },
2872 { "model-id",
2873 "Intel Core Processor (Skylake, IBRS)" },
2874 { /* end of list */ }
2875 }
2876 },
2877 {
2878 .version = 3,
2879 .alias = "Skylake-Client-noTSX-IBRS",
2880 .props = (PropValue[]) {
2881 { "hle", "off" },
2882 { "rtm", "off" },
2883 { "model-id",
2884 "Intel Core Processor (Skylake, IBRS, no TSX)" },
2885 { /* end of list */ }
2886 }
2887 },
2888 { /* end of list */ }
2889 }
2890 },
2891 {
2892 .name = "Skylake-Server",
2893 .level = 0xd,
2894 .vendor = CPUID_VENDOR_INTEL,
2895 .family = 6,
2896 .model = 85,
2897 .stepping = 4,
2898 .features[FEAT_1_EDX] =
2899 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2903 CPUID_DE | CPUID_FP87,
2904 .features[FEAT_1_ECX] =
2905 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2906 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2907 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2908 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2909 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2910 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2911 .features[FEAT_8000_0001_EDX] =
2912 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2913 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2914 .features[FEAT_8000_0001_ECX] =
2915 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2916 .features[FEAT_7_0_EBX] =
2917 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2918 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2919 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2920 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2921 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2922 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2923 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2924 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2925 .features[FEAT_7_0_ECX] =
2926 CPUID_7_0_ECX_PKU,
2927 /* Missing: XSAVES (not supported by some Linux versions,
2928 * including v4.1 to v4.12).
2929 * KVM doesn't yet expose any XSAVES state save component,
2930 * and the only one defined in Skylake (processor tracing)
2931 * probably will block migration anyway.
2932 */
2933 .features[FEAT_XSAVE] =
2934 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2935 CPUID_XSAVE_XGETBV1,
2936 .features[FEAT_6_EAX] =
2937 CPUID_6_EAX_ARAT,
2938 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2939 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2940 MSR_VMX_BASIC_TRUE_CTLS,
2941 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2942 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2943 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2944 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2945 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2946 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2947 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2948 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2949 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2950 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2951 .features[FEAT_VMX_EXIT_CTLS] =
2952 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2953 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2954 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2955 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2956 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2957 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2958 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2959 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2960 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2961 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2962 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2963 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2964 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2965 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2966 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2967 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2968 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2969 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2970 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2971 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2972 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2973 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2974 .features[FEAT_VMX_SECONDARY_CTLS] =
2975 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2976 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2977 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2978 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2979 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2980 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2981 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2982 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2983 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2984 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2985 .xlevel = 0x80000008,
2986 .model_id = "Intel Xeon Processor (Skylake)",
2987 .versions = (X86CPUVersionDefinition[]) {
2988 { .version = 1 },
2989 {
2990 .version = 2,
2991 .alias = "Skylake-Server-IBRS",
2992 .props = (PropValue[]) {
2993 /* clflushopt was not added to Skylake-Server-IBRS */
2994 /* TODO: add -v3 including clflushopt */
2995 { "clflushopt", "off" },
2996 { "spec-ctrl", "on" },
2997 { "model-id",
2998 "Intel Xeon Processor (Skylake, IBRS)" },
2999 { /* end of list */ }
3000 }
3001 },
3002 {
3003 .version = 3,
3004 .alias = "Skylake-Server-noTSX-IBRS",
3005 .props = (PropValue[]) {
3006 { "hle", "off" },
3007 { "rtm", "off" },
3008 { "model-id",
3009 "Intel Xeon Processor (Skylake, IBRS, no TSX)" },
3010 { /* end of list */ }
3011 }
3012 },
3013 { /* end of list */ }
3014 }
3015 },
3016 {
3017 .name = "Cascadelake-Server",
3018 .level = 0xd,
3019 .vendor = CPUID_VENDOR_INTEL,
3020 .family = 6,
3021 .model = 85,
3022 .stepping = 6,
3023 .features[FEAT_1_EDX] =
3024 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3025 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3026 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3027 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3028 CPUID_DE | CPUID_FP87,
3029 .features[FEAT_1_ECX] =
3030 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3031 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3032 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3033 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3034 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3035 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3036 .features[FEAT_8000_0001_EDX] =
3037 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3038 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3039 .features[FEAT_8000_0001_ECX] =
3040 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3041 .features[FEAT_7_0_EBX] =
3042 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3043 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3044 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3045 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3046 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3047 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3048 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3049 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3050 .features[FEAT_7_0_ECX] =
3051 CPUID_7_0_ECX_PKU |
3052 CPUID_7_0_ECX_AVX512VNNI,
3053 .features[FEAT_7_0_EDX] =
3054 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3055 /* Missing: XSAVES (not supported by some Linux versions,
3056 * including v4.1 to v4.12).
3057 * KVM doesn't yet expose any XSAVES state save component,
3058 * and the only one defined in Skylake (processor tracing)
3059 * probably will block migration anyway.
3060 */
3061 .features[FEAT_XSAVE] =
3062 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3063 CPUID_XSAVE_XGETBV1,
3064 .features[FEAT_6_EAX] =
3065 CPUID_6_EAX_ARAT,
3066 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3067 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3068 MSR_VMX_BASIC_TRUE_CTLS,
3069 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3070 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3071 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3072 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3073 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3074 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3075 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3076 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3077 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3078 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3079 .features[FEAT_VMX_EXIT_CTLS] =
3080 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3081 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3082 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3083 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3084 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3085 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3086 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3087 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3088 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3089 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3090 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3091 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3092 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3093 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3094 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3095 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3096 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3097 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3098 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3099 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3100 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3101 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3102 .features[FEAT_VMX_SECONDARY_CTLS] =
3103 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3104 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3105 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3106 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3107 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3108 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3109 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3110 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3111 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3112 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3113 .xlevel = 0x80000008,
3114 .model_id = "Intel Xeon Processor (Cascadelake)",
3115 .versions = (X86CPUVersionDefinition[]) {
3116 { .version = 1 },
3117 { .version = 2,
3118 .props = (PropValue[]) {
3119 { "arch-capabilities", "on" },
3120 { "rdctl-no", "on" },
3121 { "ibrs-all", "on" },
3122 { "skip-l1dfl-vmentry", "on" },
3123 { "mds-no", "on" },
3124 { /* end of list */ }
3125 },
3126 },
3127 { .version = 3,
3128 .alias = "Cascadelake-Server-noTSX",
3129 .props = (PropValue[]) {
3130 { "hle", "off" },
3131 { "rtm", "off" },
3132 { /* end of list */ }
3133 },
3134 },
3135 { /* end of list */ }
3136 }
3137 },
3138 {
3139 .name = "Cooperlake",
3140 .level = 0xd,
3141 .vendor = CPUID_VENDOR_INTEL,
3142 .family = 6,
3143 .model = 85,
3144 .stepping = 10,
3145 .features[FEAT_1_EDX] =
3146 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3147 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3148 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3149 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3150 CPUID_DE | CPUID_FP87,
3151 .features[FEAT_1_ECX] =
3152 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3153 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3154 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3155 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3156 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3157 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3158 .features[FEAT_8000_0001_EDX] =
3159 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3160 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3161 .features[FEAT_8000_0001_ECX] =
3162 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3163 .features[FEAT_7_0_EBX] =
3164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3165 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3167 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3168 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3169 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3170 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3171 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3172 .features[FEAT_7_0_ECX] =
3173 CPUID_7_0_ECX_PKU |
3174 CPUID_7_0_ECX_AVX512VNNI,
3175 .features[FEAT_7_0_EDX] =
3176 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP |
3177 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES,
3178 .features[FEAT_ARCH_CAPABILITIES] =
3179 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
3180 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO |
3181 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO,
3182 .features[FEAT_7_1_EAX] =
3183 CPUID_7_1_EAX_AVX512_BF16,
3184 /*
3185 * Missing: XSAVES (not supported by some Linux versions,
3186 * including v4.1 to v4.12).
3187 * KVM doesn't yet expose any XSAVES state save component,
3188 * and the only one defined in Skylake (processor tracing)
3189 * probably will block migration anyway.
3190 */
3191 .features[FEAT_XSAVE] =
3192 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3193 CPUID_XSAVE_XGETBV1,
3194 .features[FEAT_6_EAX] =
3195 CPUID_6_EAX_ARAT,
3196 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3197 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3198 MSR_VMX_BASIC_TRUE_CTLS,
3199 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3200 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3201 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3202 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3203 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3204 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3205 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3206 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3207 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3208 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3209 .features[FEAT_VMX_EXIT_CTLS] =
3210 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3211 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3212 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3213 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3214 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3215 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3216 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3217 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3218 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3219 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3220 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3221 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3222 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3223 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3224 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3225 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3226 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3227 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3228 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3229 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3230 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3231 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3232 .features[FEAT_VMX_SECONDARY_CTLS] =
3233 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3234 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3235 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3236 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3237 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3238 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3239 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3240 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3241 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3242 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3243 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3244 .xlevel = 0x80000008,
3245 .model_id = "Intel Xeon Processor (Cooperlake)",
3246 },
3247 {
3248 .name = "Icelake-Client",
3249 .level = 0xd,
3250 .vendor = CPUID_VENDOR_INTEL,
3251 .family = 6,
3252 .model = 126,
3253 .stepping = 0,
3254 .features[FEAT_1_EDX] =
3255 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3256 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3257 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3258 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3259 CPUID_DE | CPUID_FP87,
3260 .features[FEAT_1_ECX] =
3261 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3262 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3263 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3264 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3265 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3266 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3267 .features[FEAT_8000_0001_EDX] =
3268 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
3269 CPUID_EXT2_SYSCALL,
3270 .features[FEAT_8000_0001_ECX] =
3271 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3272 .features[FEAT_8000_0008_EBX] =
3273 CPUID_8000_0008_EBX_WBNOINVD,
3274 .features[FEAT_7_0_EBX] =
3275 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3276 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3277 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3278 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3279 CPUID_7_0_EBX_SMAP,
3280 .features[FEAT_7_0_ECX] =
3281 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3282 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3283 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3284 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3285 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3286 .features[FEAT_7_0_EDX] =
3287 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3288 /* Missing: XSAVES (not supported by some Linux versions,
3289 * including v4.1 to v4.12).
3290 * KVM doesn't yet expose any XSAVES state save component,
3291 * and the only one defined in Skylake (processor tracing)
3292 * probably will block migration anyway.
3293 */
3294 .features[FEAT_XSAVE] =
3295 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3296 CPUID_XSAVE_XGETBV1,
3297 .features[FEAT_6_EAX] =
3298 CPUID_6_EAX_ARAT,
3299 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3300 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3301 MSR_VMX_BASIC_TRUE_CTLS,
3302 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3303 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3304 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3305 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3306 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3307 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3308 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3309 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3310 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3311 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3312 .features[FEAT_VMX_EXIT_CTLS] =
3313 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3314 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3315 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3316 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3317 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3318 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3319 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3320 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3321 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3322 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
3323 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3324 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3325 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3326 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3327 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3328 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3329 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3330 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3331 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3332 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3333 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3334 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3335 .features[FEAT_VMX_SECONDARY_CTLS] =
3336 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3337 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3338 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3339 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3340 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3341 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3342 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3343 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3344 .xlevel = 0x80000008,
3345 .model_id = "Intel Core Processor (Icelake)",
3346 .versions = (X86CPUVersionDefinition[]) {
3347 { .version = 1 },
3348 {
3349 .version = 2,
3350 .alias = "Icelake-Client-noTSX",
3351 .props = (PropValue[]) {
3352 { "hle", "off" },
3353 { "rtm", "off" },
3354 { /* end of list */ }
3355 },
3356 },
3357 { /* end of list */ }
3358 }
3359 },
3360 {
3361 .name = "Icelake-Server",
3362 .level = 0xd,
3363 .vendor = CPUID_VENDOR_INTEL,
3364 .family = 6,
3365 .model = 134,
3366 .stepping = 0,
3367 .features[FEAT_1_EDX] =
3368 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3369 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3370 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3371 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3372 CPUID_DE | CPUID_FP87,
3373 .features[FEAT_1_ECX] =
3374 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3375 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3376 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3377 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3378 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3379 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3380 .features[FEAT_8000_0001_EDX] =
3381 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3382 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3383 .features[FEAT_8000_0001_ECX] =
3384 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3385 .features[FEAT_8000_0008_EBX] =
3386 CPUID_8000_0008_EBX_WBNOINVD,
3387 .features[FEAT_7_0_EBX] =
3388 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3389 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3390 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3391 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3392 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3393 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3394 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3395 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3396 .features[FEAT_7_0_ECX] =
3397 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3398 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3399 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3400 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3401 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
3402 .features[FEAT_7_0_EDX] =
3403 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3404 /* Missing: XSAVES (not supported by some Linux versions,
3405 * including v4.1 to v4.12).
3406 * KVM doesn't yet expose any XSAVES state save component,
3407 * and the only one defined in Skylake (processor tracing)
3408 * probably will block migration anyway.
3409 */
3410 .features[FEAT_XSAVE] =
3411 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3412 CPUID_XSAVE_XGETBV1,
3413 .features[FEAT_6_EAX] =
3414 CPUID_6_EAX_ARAT,
3415 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3416 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3417 MSR_VMX_BASIC_TRUE_CTLS,
3418 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3419 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3420 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3421 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3422 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3423 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3424 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3425 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3426 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3427 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3428 .features[FEAT_VMX_EXIT_CTLS] =
3429 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3430 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3431 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3432 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3433 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3434 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3435 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3436 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3437 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3438 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3439 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3440 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3441 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3442 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3443 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3444 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3445 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3446 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3447 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3448 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3449 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3450 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3451 .features[FEAT_VMX_SECONDARY_CTLS] =
3452 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3453 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3454 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3455 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3456 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3457 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3458 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3459 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3460 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
3461 .xlevel = 0x80000008,
3462 .model_id = "Intel Xeon Processor (Icelake)",
3463 .versions = (X86CPUVersionDefinition[]) {
3464 { .version = 1 },
3465 {
3466 .version = 2,
3467 .alias = "Icelake-Server-noTSX",
3468 .props = (PropValue[]) {
3469 { "hle", "off" },
3470 { "rtm", "off" },
3471 { /* end of list */ }
3472 },
3473 },
3474 {
3475 .version = 3,
3476 .props = (PropValue[]) {
3477 { "arch-capabilities", "on" },
3478 { "rdctl-no", "on" },
3479 { "ibrs-all", "on" },
3480 { "skip-l1dfl-vmentry", "on" },
3481 { "mds-no", "on" },
3482 { "pschange-mc-no", "on" },
3483 { "taa-no", "on" },
3484 { /* end of list */ }
3485 },
3486 },
3487 { /* end of list */ }
3488 }
3489 },
3490 {
3491 .name = "Denverton",
3492 .level = 21,
3493 .vendor = CPUID_VENDOR_INTEL,
3494 .family = 6,
3495 .model = 95,
3496 .stepping = 1,
3497 .features[FEAT_1_EDX] =
3498 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
3499 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
3500 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3501 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
3502 CPUID_SSE | CPUID_SSE2,
3503 .features[FEAT_1_ECX] =
3504 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3505 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 |
3506 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3507 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER |
3508 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND,
3509 .features[FEAT_8000_0001_EDX] =
3510 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
3511 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
3512 .features[FEAT_8000_0001_ECX] =
3513 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3514 .features[FEAT_7_0_EBX] =
3515 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS |
3516 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP |
3517 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI,
3518 .features[FEAT_7_0_EDX] =
3519 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
3520 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3521 /*
3522 * Missing: XSAVES (not supported by some Linux versions,
3523 * including v4.1 to v4.12).
3524 * KVM doesn't yet expose any XSAVES state save component,
3525 * and the only one defined in Skylake (processor tracing)
3526 * probably will block migration anyway.
3527 */
3528 .features[FEAT_XSAVE] =
3529 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1,
3530 .features[FEAT_6_EAX] =
3531 CPUID_6_EAX_ARAT,
3532 .features[FEAT_ARCH_CAPABILITIES] =
3533 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY,
3534 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3535 MSR_VMX_BASIC_TRUE_CTLS,
3536 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3537 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3538 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3539 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3540 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3541 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3542 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3543 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3544 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3545 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3546 .features[FEAT_VMX_EXIT_CTLS] =
3547 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3548 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3549 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3550 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3551 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3552 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3553 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3554 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3555 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3556 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3557 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3558 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3559 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3560 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3561 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3562 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3563 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3564 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3565 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3566 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3567 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3568 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3569 .features[FEAT_VMX_SECONDARY_CTLS] =
3570 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3571 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3572 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3573 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3574 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3575 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3576 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3577 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3578 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3579 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3580 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3581 .xlevel = 0x80000008,
3582 .model_id = "Intel Atom Processor (Denverton)",
3583 .versions = (X86CPUVersionDefinition[]) {
3584 { .version = 1 },
3585 {
3586 .version = 2,
3587 .props = (PropValue[]) {
3588 { "monitor", "off" },
3589 { "mpx", "off" },
3590 { /* end of list */ },
3591 },
3592 },
3593 { /* end of list */ },
3594 },
3595 },
3596 {
3597 .name = "Snowridge",
3598 .level = 27,
3599 .vendor = CPUID_VENDOR_INTEL,
3600 .family = 6,
3601 .model = 134,
3602 .stepping = 1,
3603 .features[FEAT_1_EDX] =
3604 /* missing: CPUID_PN CPUID_IA64 */
3605 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
3606 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
3607 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
3608 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
3609 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3610 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
3611 CPUID_MMX |
3612 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
3613 .features[FEAT_1_ECX] =
3614 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3615 CPUID_EXT_SSSE3 |
3616 CPUID_EXT_CX16 |
3617 CPUID_EXT_SSE41 |
3618 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3619 CPUID_EXT_POPCNT |
3620 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
3621 CPUID_EXT_RDRAND,
3622 .features[FEAT_8000_0001_EDX] =
3623 CPUID_EXT2_SYSCALL |
3624 CPUID_EXT2_NX |
3625 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3626 CPUID_EXT2_LM,
3627 .features[FEAT_8000_0001_ECX] =
3628 CPUID_EXT3_LAHF_LM |
3629 CPUID_EXT3_3DNOWPREFETCH,
3630 .features[FEAT_7_0_EBX] =
3631 CPUID_7_0_EBX_FSGSBASE |
3632 CPUID_7_0_EBX_SMEP |
3633 CPUID_7_0_EBX_ERMS |
3634 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
3635 CPUID_7_0_EBX_RDSEED |
3636 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3637 CPUID_7_0_EBX_CLWB |
3638 CPUID_7_0_EBX_SHA_NI,
3639 .features[FEAT_7_0_ECX] =
3640 CPUID_7_0_ECX_UMIP |
3641 /* missing bit 5 */
3642 CPUID_7_0_ECX_GFNI |
3643 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
3644 CPUID_7_0_ECX_MOVDIR64B,
3645 .features[FEAT_7_0_EDX] =
3646 CPUID_7_0_EDX_SPEC_CTRL |
3647 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
3648 CPUID_7_0_EDX_CORE_CAPABILITY,
3649 .features[FEAT_CORE_CAPABILITY] =
3650 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
3651 /*
3652 * Missing: XSAVES (not supported by some Linux versions,
3653 * including v4.1 to v4.12).
3654 * KVM doesn't yet expose any XSAVES state save component,
3655 * and the only one defined in Skylake (processor tracing)
3656 * probably will block migration anyway.
3657 */
3658 .features[FEAT_XSAVE] =
3659 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3660 CPUID_XSAVE_XGETBV1,
3661 .features[FEAT_6_EAX] =
3662 CPUID_6_EAX_ARAT,
3663 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3664 MSR_VMX_BASIC_TRUE_CTLS,
3665 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3666 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3667 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3668 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3669 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3670 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3671 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3672 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3673 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3674 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3675 .features[FEAT_VMX_EXIT_CTLS] =
3676 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3677 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3678 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3679 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3680 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3681 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3682 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3683 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3684 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3685 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3686 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3687 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3688 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3689 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3690 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3691 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3692 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3693 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3694 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3695 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3696 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3697 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3698 .features[FEAT_VMX_SECONDARY_CTLS] =
3699 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3700 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3701 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3702 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3703 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3704 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3705 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3706 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3707 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3708 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3709 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3710 .xlevel = 0x80000008,
3711 .model_id = "Intel Atom Processor (SnowRidge)",
3712 .versions = (X86CPUVersionDefinition[]) {
3713 { .version = 1 },
3714 {
3715 .version = 2,
3716 .props = (PropValue[]) {
3717 { "mpx", "off" },
3718 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
3719 { /* end of list */ },
3720 },
3721 },
3722 { /* end of list */ },
3723 },
3724 },
3725 {
3726 .name = "KnightsMill",
3727 .level = 0xd,
3728 .vendor = CPUID_VENDOR_INTEL,
3729 .family = 6,
3730 .model = 133,
3731 .stepping = 0,
3732 .features[FEAT_1_EDX] =
3733 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
3734 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
3735 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
3736 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
3737 CPUID_PSE | CPUID_DE | CPUID_FP87,
3738 .features[FEAT_1_ECX] =
3739 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3740 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3741 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3742 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3743 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3744 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3745 .features[FEAT_8000_0001_EDX] =
3746 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3747 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3748 .features[FEAT_8000_0001_ECX] =
3749 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3750 .features[FEAT_7_0_EBX] =
3751 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3752 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
3753 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
3754 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
3755 CPUID_7_0_EBX_AVX512ER,
3756 .features[FEAT_7_0_ECX] =
3757 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3758 .features[FEAT_7_0_EDX] =
3759 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
3760 .features[FEAT_XSAVE] =
3761 CPUID_XSAVE_XSAVEOPT,
3762 .features[FEAT_6_EAX] =
3763 CPUID_6_EAX_ARAT,
3764 .xlevel = 0x80000008,
3765 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
3766 },
3767 {
3768 .name = "Opteron_G1",
3769 .level = 5,
3770 .vendor = CPUID_VENDOR_AMD,
3771 .family = 15,
3772 .model = 6,
3773 .stepping = 1,
3774 .features[FEAT_1_EDX] =
3775 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3776 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3777 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3778 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3779 CPUID_DE | CPUID_FP87,
3780 .features[FEAT_1_ECX] =
3781 CPUID_EXT_SSE3,
3782 .features[FEAT_8000_0001_EDX] =
3783 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3784 .xlevel = 0x80000008,
3785 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
3786 },
3787 {
3788 .name = "Opteron_G2",
3789 .level = 5,
3790 .vendor = CPUID_VENDOR_AMD,
3791 .family = 15,
3792 .model = 6,
3793 .stepping = 1,
3794 .features[FEAT_1_EDX] =
3795 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3796 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3797 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3798 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3799 CPUID_DE | CPUID_FP87,
3800 .features[FEAT_1_ECX] =
3801 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
3802 .features[FEAT_8000_0001_EDX] =
3803 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3804 .features[FEAT_8000_0001_ECX] =
3805 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3806 .xlevel = 0x80000008,
3807 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
3808 },
3809 {
3810 .name = "Opteron_G3",
3811 .level = 5,
3812 .vendor = CPUID_VENDOR_AMD,
3813 .family = 16,
3814 .model = 2,
3815 .stepping = 3,
3816 .features[FEAT_1_EDX] =
3817 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3818 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3819 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3820 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3821 CPUID_DE | CPUID_FP87,
3822 .features[FEAT_1_ECX] =
3823 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
3824 CPUID_EXT_SSE3,
3825 .features[FEAT_8000_0001_EDX] =
3826 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
3827 CPUID_EXT2_RDTSCP,
3828 .features[FEAT_8000_0001_ECX] =
3829 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
3830 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3831 .xlevel = 0x80000008,
3832 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
3833 },
3834 {
3835 .name = "Opteron_G4",
3836 .level = 0xd,
3837 .vendor = CPUID_VENDOR_AMD,
3838 .family = 21,
3839 .model = 1,
3840 .stepping = 2,
3841 .features[FEAT_1_EDX] =
3842 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3843 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3844 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3845 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3846 CPUID_DE | CPUID_FP87,
3847 .features[FEAT_1_ECX] =
3848 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3849 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3850 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
3851 CPUID_EXT_SSE3,
3852 .features[FEAT_8000_0001_EDX] =
3853 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3854 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3855 .features[FEAT_8000_0001_ECX] =
3856 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3857 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3858 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3859 CPUID_EXT3_LAHF_LM,
3860 .features[FEAT_SVM] =
3861 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3862 /* no xsaveopt! */
3863 .xlevel = 0x8000001A,
3864 .model_id = "AMD Opteron 62xx class CPU",
3865 },
3866 {
3867 .name = "Opteron_G5",
3868 .level = 0xd,
3869 .vendor = CPUID_VENDOR_AMD,
3870 .family = 21,
3871 .model = 2,
3872 .stepping = 0,
3873 .features[FEAT_1_EDX] =
3874 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3875 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3876 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3877 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3878 CPUID_DE | CPUID_FP87,
3879 .features[FEAT_1_ECX] =
3880 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
3881 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
3882 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
3883 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3884 .features[FEAT_8000_0001_EDX] =
3885 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3886 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3887 .features[FEAT_8000_0001_ECX] =
3888 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3889 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3890 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3891 CPUID_EXT3_LAHF_LM,
3892 .features[FEAT_SVM] =
3893 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3894 /* no xsaveopt! */
3895 .xlevel = 0x8000001A,
3896 .model_id = "AMD Opteron 63xx class CPU",
3897 },
3898 {
3899 .name = "EPYC",
3900 .level = 0xd,
3901 .vendor = CPUID_VENDOR_AMD,
3902 .family = 23,
3903 .model = 1,
3904 .stepping = 2,
3905 .features[FEAT_1_EDX] =
3906 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3907 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3908 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3909 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3910 CPUID_VME | CPUID_FP87,
3911 .features[FEAT_1_ECX] =
3912 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3913 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
3914 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3915 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3916 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3917 .features[FEAT_8000_0001_EDX] =
3918 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3919 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3920 CPUID_EXT2_SYSCALL,
3921 .features[FEAT_8000_0001_ECX] =
3922 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3923 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3924 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3925 CPUID_EXT3_TOPOEXT,
3926 .features[FEAT_7_0_EBX] =
3927 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3928 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3929 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3930 CPUID_7_0_EBX_SHA_NI,
3931 .features[FEAT_XSAVE] =
3932 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3933 CPUID_XSAVE_XGETBV1,
3934 .features[FEAT_6_EAX] =
3935 CPUID_6_EAX_ARAT,
3936 .features[FEAT_SVM] =
3937 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3938 .xlevel = 0x8000001E,
3939 .model_id = "AMD EPYC Processor",
3940 .cache_info = &epyc_cache_info,
3941 .use_epyc_apic_id_encoding = 1,
3942 .versions = (X86CPUVersionDefinition[]) {
3943 { .version = 1 },
3944 {
3945 .version = 2,
3946 .alias = "EPYC-IBPB",
3947 .props = (PropValue[]) {
3948 { "ibpb", "on" },
3949 { "model-id",
3950 "AMD EPYC Processor (with IBPB)" },
3951 { /* end of list */ }
3952 }
3953 },
3954 {
3955 .version = 3,
3956 .props = (PropValue[]) {
3957 { "ibpb", "on" },
3958 { "perfctr-core", "on" },
3959 { "clzero", "on" },
3960 { "xsaveerptr", "on" },
3961 { "xsaves", "on" },
3962 { "model-id",
3963 "AMD EPYC Processor" },
3964 { /* end of list */ }
3965 }
3966 },
3967 { /* end of list */ }
3968 }
3969 },
3970 {
3971 .name = "Dhyana",
3972 .level = 0xd,
3973 .vendor = CPUID_VENDOR_HYGON,
3974 .family = 24,
3975 .model = 0,
3976 .stepping = 1,
3977 .features[FEAT_1_EDX] =
3978 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3979 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3980 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3981 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3982 CPUID_VME | CPUID_FP87,
3983 .features[FEAT_1_ECX] =
3984 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3985 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3986 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3987 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3988 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3989 .features[FEAT_8000_0001_EDX] =
3990 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3991 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3992 CPUID_EXT2_SYSCALL,
3993 .features[FEAT_8000_0001_ECX] =
3994 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3995 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3996 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3997 CPUID_EXT3_TOPOEXT,
3998 .features[FEAT_8000_0008_EBX] =
3999 CPUID_8000_0008_EBX_IBPB,
4000 .features[FEAT_7_0_EBX] =
4001 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
4002 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
4003 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
4004 /*
4005 * Missing: XSAVES (not supported by some Linux versions,
4006 * including v4.1 to v4.12).
4007 * KVM doesn't yet expose any XSAVES state save component.
4008 */
4009 .features[FEAT_XSAVE] =
4010 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
4011 CPUID_XSAVE_XGETBV1,
4012 .features[FEAT_6_EAX] =
4013 CPUID_6_EAX_ARAT,
4014 .features[FEAT_SVM] =
4015 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
4016 .xlevel = 0x8000001E,
4017 .model_id = "Hygon Dhyana Processor",
4018 .cache_info = &epyc_cache_info,
4019 },
4020 {
4021 .name = "EPYC-Rome",
4022 .level = 0xd,
4023 .vendor = CPUID_VENDOR_AMD,
4024 .family = 23,
4025 .model = 49,
4026 .stepping = 0,
4027 .features[FEAT_1_EDX] =
4028 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
4029 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
4030 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
4031 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
4032 CPUID_VME | CPUID_FP87,
4033 .features[FEAT_1_ECX] =
4034 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
4035 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
4036 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
4037 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
4038 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
4039 .features[FEAT_8000_0001_EDX] =
4040 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
4041 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
4042 CPUID_EXT2_SYSCALL,
4043 .features[FEAT_8000_0001_ECX] =
4044 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
4045 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
4046 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
4047 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
4048 .features[FEAT_8000_0008_EBX] =
4049 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
4050 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
4051 CPUID_8000_0008_EBX_STIBP,
4052 .features[FEAT_7_0_EBX] =
4053 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
4054 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
4055 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
4056 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB,
4057 .features[FEAT_7_0_ECX] =
4058 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID,
4059 .features[FEAT_XSAVE] =
4060 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
4061 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
4062 .features[FEAT_6_EAX] =
4063 CPUID_6_EAX_ARAT,
4064 .features[FEAT_SVM] =
4065 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
4066 .xlevel = 0x8000001E,
4067 .model_id = "AMD EPYC-Rome Processor",
4068 .cache_info = &epyc_rome_cache_info,
4069 .use_epyc_apic_id_encoding = 1,
4070 },
4071 };
4072
4073 /* KVM-specific features that are automatically added/removed
4074 * from all CPU models when KVM is enabled.
4075 */
4076 static PropValue kvm_default_props[] = {
4077 { "kvmclock", "on" },
4078 { "kvm-nopiodelay", "on" },
4079 { "kvm-asyncpf", "on" },
4080 { "kvm-steal-time", "on" },
4081 { "kvm-pv-eoi", "on" },
4082 { "kvmclock-stable-bit", "on" },
4083 { "x2apic", "on" },
4084 { "acpi", "off" },
4085 { "monitor", "off" },
4086 { "svm", "off" },
4087 { NULL, NULL },
4088 };
4089
4090 /* TCG-specific defaults that override all CPU models when using TCG
4091 */
4092 static PropValue tcg_default_props[] = {
4093 { "vme", "off" },
4094 { NULL, NULL },
4095 };
4096
4097
4098 /*
4099 * We resolve CPU model aliases using -v1 when using "-machine
4100 * none", but this is just for compatibility while libvirt isn't
4101 * adapted to resolve CPU model versions before creating VMs.
4102 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi.
4103 */
4104 X86CPUVersion default_cpu_version = 1;
4105
4106 void x86_cpu_set_default_version(X86CPUVersion version)
4107 {
4108 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
4109 assert(version != CPU_VERSION_AUTO);
4110 default_cpu_version = version;
4111 }
4112
4113 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
4114 {
4115 int v = 0;
4116 const X86CPUVersionDefinition *vdef =
4117 x86_cpu_def_get_versions(model->cpudef);
4118 while (vdef->version) {
4119 v = vdef->version;
4120 vdef++;
4121 }
4122 return v;
4123 }
4124
4125 /* Return the actual version being used for a specific CPU model */
4126 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
4127 {
4128 X86CPUVersion v = model->version;
4129 if (v == CPU_VERSION_AUTO) {
4130 v = default_cpu_version;
4131 }
4132 if (v == CPU_VERSION_LATEST) {
4133 return x86_cpu_model_last_version(model);
4134 }
4135 return v;
4136 }
4137
4138 void x86_cpu_change_kvm_default(const char *prop, const char *value)
4139 {
4140 PropValue *pv;
4141 for (pv = kvm_default_props; pv->prop; pv++) {
4142 if (!strcmp(pv->prop, prop)) {
4143 pv->value = value;
4144 break;
4145 }
4146 }
4147
4148 /* It is valid to call this function only for properties that
4149 * are already present in the kvm_default_props table.
4150 */
4151 assert(pv->prop);
4152 }
4153
4154 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4155 bool migratable_only);
4156
4157 static bool lmce_supported(void)
4158 {
4159 uint64_t mce_cap = 0;
4160
4161 #ifdef CONFIG_KVM
4162 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
4163 return false;
4164 }
4165 #endif
4166
4167 return !!(mce_cap & MCG_LMCE_P);
4168 }
4169
4170 #define CPUID_MODEL_ID_SZ 48
4171
4172 /**
4173 * cpu_x86_fill_model_id:
4174 * Get CPUID model ID string from host CPU.
4175 *
4176 * @str should have at least CPUID_MODEL_ID_SZ bytes
4177 *
4178 * The function does NOT add a null terminator to the string
4179 * automatically.
4180 */
4181 static int cpu_x86_fill_model_id(char *str)
4182 {
4183 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
4184 int i;
4185
4186 for (i = 0; i < 3; i++) {
4187 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
4188 memcpy(str + i * 16 + 0, &eax, 4);
4189 memcpy(str + i * 16 + 4, &ebx, 4);
4190 memcpy(str + i * 16 + 8, &ecx, 4);
4191 memcpy(str + i * 16 + 12, &edx, 4);
4192 }
4193 return 0;
4194 }
4195
4196 static Property max_x86_cpu_properties[] = {
4197 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
4198 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
4199 DEFINE_PROP_END_OF_LIST()
4200 };
4201
4202 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
4203 {
4204 DeviceClass *dc = DEVICE_CLASS(oc);
4205 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4206
4207 xcc->ordering = 9;
4208
4209 xcc->model_description =
4210 "Enables all features supported by the accelerator in the current host";
4211
4212 device_class_set_props(dc, max_x86_cpu_properties);
4213 }
4214
4215 static void max_x86_cpu_initfn(Object *obj)
4216 {
4217 X86CPU *cpu = X86_CPU(obj);
4218 CPUX86State *env = &cpu->env;
4219 KVMState *s = kvm_state;
4220
4221 /* We can't fill the features array here because we don't know yet if
4222 * "migratable" is true or false.
4223 */
4224 cpu->max_features = true;
4225
4226 if (accel_uses_host_cpuid()) {
4227 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
4228 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
4229 int family, model, stepping;
4230
4231 host_vendor_fms(vendor, &family, &model, &stepping);
4232 cpu_x86_fill_model_id(model_id);
4233
4234 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
4235 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
4236 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
4237 object_property_set_int(OBJECT(cpu), stepping, "stepping",
4238 &error_abort);
4239 object_property_set_str(OBJECT(cpu), model_id, "model-id",
4240 &error_abort);
4241
4242 if (kvm_enabled()) {
4243 env->cpuid_min_level =
4244 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
4245 env->cpuid_min_xlevel =
4246 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
4247 env->cpuid_min_xlevel2 =
4248 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
4249 } else {
4250 env->cpuid_min_level =
4251 hvf_get_supported_cpuid(0x0, 0, R_EAX);
4252 env->cpuid_min_xlevel =
4253 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
4254 env->cpuid_min_xlevel2 =
4255 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
4256 }
4257
4258 if (lmce_supported()) {
4259 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
4260 }
4261 } else {
4262 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
4263 "vendor", &error_abort);
4264 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
4265 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
4266 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
4267 object_property_set_str(OBJECT(cpu),
4268 "QEMU TCG CPU version " QEMU_HW_VERSION,
4269 "model-id", &error_abort);
4270 }
4271
4272 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
4273 }
4274
4275 static const TypeInfo max_x86_cpu_type_info = {
4276 .name = X86_CPU_TYPE_NAME("max"),
4277 .parent = TYPE_X86_CPU,
4278 .instance_init = max_x86_cpu_initfn,
4279 .class_init = max_x86_cpu_class_init,
4280 };
4281
4282 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4283 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
4284 {
4285 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4286
4287 xcc->host_cpuid_required = true;
4288 xcc->ordering = 8;
4289
4290 #if defined(CONFIG_KVM)
4291 xcc->model_description =
4292 "KVM processor with all supported host features ";
4293 #elif defined(CONFIG_HVF)
4294 xcc->model_description =
4295 "HVF processor with all supported host features ";
4296 #endif
4297 }
4298
4299 static const TypeInfo host_x86_cpu_type_info = {
4300 .name = X86_CPU_TYPE_NAME("host"),
4301 .parent = X86_CPU_TYPE_NAME("max"),
4302 .class_init = host_x86_cpu_class_init,
4303 };
4304
4305 #endif
4306
4307 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
4308 {
4309 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
4310
4311 switch (f->type) {
4312 case CPUID_FEATURE_WORD:
4313 {
4314 const char *reg = get_register_name_32(f->cpuid.reg);
4315 assert(reg);
4316 return g_strdup_printf("CPUID.%02XH:%s",
4317 f->cpuid.eax, reg);
4318 }
4319 case MSR_FEATURE_WORD:
4320 return g_strdup_printf("MSR(%02XH)",
4321 f->msr.index);
4322 }
4323
4324 return NULL;
4325 }
4326
4327 static bool x86_cpu_have_filtered_features(X86CPU *cpu)
4328 {
4329 FeatureWord w;
4330
4331 for (w = 0; w < FEATURE_WORDS; w++) {
4332 if (cpu->filtered_features[w]) {
4333 return true;
4334 }
4335 }
4336
4337 return false;
4338 }
4339
4340 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
4341 const char *verbose_prefix)
4342 {
4343 CPUX86State *env = &cpu->env;
4344 FeatureWordInfo *f = &feature_word_info[w];
4345 int i;
4346
4347 if (!cpu->force_features) {
4348 env->features[w] &= ~mask;
4349 }
4350 cpu->filtered_features[w] |= mask;
4351
4352 if (!verbose_prefix) {
4353 return;
4354 }
4355
4356 for (i = 0; i < 64; ++i) {
4357 if ((1ULL << i) & mask) {
4358 g_autofree char *feat_word_str = feature_word_description(f, i);
4359 warn_report("%s: %s%s%s [bit %d]",
4360 verbose_prefix,
4361 feat_word_str,
4362 f->feat_names[i] ? "." : "",
4363 f->feat_names[i] ? f->feat_names[i] : "", i);
4364 }
4365 }
4366 }
4367
4368 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
4369 const char *name, void *opaque,
4370 Error **errp)
4371 {
4372 X86CPU *cpu = X86_CPU(obj);
4373 CPUX86State *env = &cpu->env;
4374 int64_t value;
4375
4376 value = (env->cpuid_version >> 8) & 0xf;
4377 if (value == 0xf) {
4378 value += (env->cpuid_version >> 20) & 0xff;
4379 }
4380 visit_type_int(v, name, &value, errp);
4381 }
4382
4383 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
4384 const char *name, void *opaque,
4385 Error **errp)
4386 {
4387 X86CPU *cpu = X86_CPU(obj);
4388 CPUX86State *env = &cpu->env;
4389 const int64_t min = 0;
4390 const int64_t max = 0xff + 0xf;
4391 Error *local_err = NULL;
4392 int64_t value;
4393
4394 visit_type_int(v, name, &value, &local_err);
4395 if (local_err) {
4396 error_propagate(errp, local_err);
4397 return;
4398 }
4399 if (value < min || value > max) {
4400 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4401 name ? name : "null", value, min, max);
4402 return;
4403 }
4404
4405 env->cpuid_version &= ~0xff00f00;
4406 if (value > 0x0f) {
4407 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
4408 } else {
4409 env->cpuid_version |= value << 8;
4410 }
4411 }
4412
4413 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
4414 const char *name, void *opaque,
4415 Error **errp)
4416 {
4417 X86CPU *cpu = X86_CPU(obj);
4418 CPUX86State *env = &cpu->env;
4419 int64_t value;
4420
4421 value = (env->cpuid_version >> 4) & 0xf;
4422 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
4423 visit_type_int(v, name, &value, errp);
4424 }
4425
4426 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
4427 const char *name, void *opaque,
4428 Error **errp)
4429 {
4430 X86CPU *cpu = X86_CPU(obj);
4431 CPUX86State *env = &cpu->env;
4432 const int64_t min = 0;
4433 const int64_t max = 0xff;
4434 Error *local_err = NULL;
4435 int64_t value;
4436
4437 visit_type_int(v, name, &value, &local_err);
4438 if (local_err) {
4439 error_propagate(errp, local_err);
4440 return;
4441 }
4442 if (value < min || value > max) {
4443 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4444 name ? name : "null", value, min, max);
4445 return;
4446 }
4447
4448 env->cpuid_version &= ~0xf00f0;
4449 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
4450 }
4451
4452 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
4453 const char *name, void *opaque,
4454 Error **errp)
4455 {
4456 X86CPU *cpu = X86_CPU(obj);
4457 CPUX86State *env = &cpu->env;
4458 int64_t value;
4459
4460 value = env->cpuid_version & 0xf;
4461 visit_type_int(v, name, &value, errp);
4462 }
4463
4464 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
4465 const char *name, void *opaque,
4466 Error **errp)
4467 {
4468 X86CPU *cpu = X86_CPU(obj);
4469 CPUX86State *env = &cpu->env;
4470 const int64_t min = 0;
4471 const int64_t max = 0xf;
4472 Error *local_err = NULL;
4473 int64_t value;
4474
4475 visit_type_int(v, name, &value, &local_err);
4476 if (local_err) {
4477 error_propagate(errp, local_err);
4478 return;
4479 }
4480 if (value < min || value > max) {
4481 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4482 name ? name : "null", value, min, max);
4483 return;
4484 }
4485
4486 env->cpuid_version &= ~0xf;
4487 env->cpuid_version |= value & 0xf;
4488 }
4489
4490 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
4491 {
4492 X86CPU *cpu = X86_CPU(obj);
4493 CPUX86State *env = &cpu->env;
4494 char *value;
4495
4496 value = g_malloc(CPUID_VENDOR_SZ + 1);
4497 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
4498 env->cpuid_vendor3);
4499 return value;
4500 }
4501
4502 static void x86_cpuid_set_vendor(Object *obj, const char *value,
4503 Error **errp)
4504 {
4505 X86CPU *cpu = X86_CPU(obj);
4506 CPUX86State *env = &cpu->env;
4507 int i;
4508
4509 if (strlen(value) != CPUID_VENDOR_SZ) {
4510 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
4511 return;
4512 }
4513
4514 env->cpuid_vendor1 = 0;
4515 env->cpuid_vendor2 = 0;
4516 env->cpuid_vendor3 = 0;
4517 for (i = 0; i < 4; i++) {
4518 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
4519 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
4520 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
4521 }
4522 }
4523
4524 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
4525 {
4526 X86CPU *cpu = X86_CPU(obj);
4527 CPUX86State *env = &cpu->env;
4528 char *value;
4529 int i;
4530
4531 value = g_malloc(48 + 1);
4532 for (i = 0; i < 48; i++) {
4533 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
4534 }
4535 value[48] = '\0';
4536 return value;
4537 }
4538
4539 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
4540 Error **errp)
4541 {
4542 X86CPU *cpu = X86_CPU(obj);
4543 CPUX86State *env = &cpu->env;
4544 int c, len, i;
4545
4546 if (model_id == NULL) {
4547 model_id = "";
4548 }
4549 len = strlen(model_id);
4550 memset(env->cpuid_model, 0, 48);
4551 for (i = 0; i < 48; i++) {
4552 if (i >= len) {
4553 c = '\0';
4554 } else {
4555 c = (uint8_t)model_id[i];
4556 }
4557 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
4558 }
4559 }
4560
4561 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
4562 void *opaque, Error **errp)
4563 {
4564 X86CPU *cpu = X86_CPU(obj);
4565 int64_t value;
4566
4567 value = cpu->env.tsc_khz * 1000;
4568 visit_type_int(v, name, &value, errp);
4569 }
4570
4571 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
4572 void *opaque, Error **errp)
4573 {
4574 X86CPU *cpu = X86_CPU(obj);
4575 const int64_t min = 0;
4576 const int64_t max = INT64_MAX;
4577 Error *local_err = NULL;
4578 int64_t value;
4579
4580 visit_type_int(v, name, &value, &local_err);
4581 if (local_err) {
4582 error_propagate(errp, local_err);
4583 return;
4584 }
4585 if (value < min || value > max) {
4586 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4587 name ? name : "null", value, min, max);
4588 return;
4589 }
4590
4591 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
4592 }
4593
4594 /* Generic getter for "feature-words" and "filtered-features" properties */
4595 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
4596 const char *name, void *opaque,
4597 Error **errp)
4598 {
4599 uint64_t *array = (uint64_t *)opaque;
4600 FeatureWord w;
4601 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
4602 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
4603 X86CPUFeatureWordInfoList *list = NULL;
4604
4605 for (w = 0; w < FEATURE_WORDS; w++) {
4606 FeatureWordInfo *wi = &feature_word_info[w];
4607 /*
4608 * We didn't have MSR features when "feature-words" was
4609 * introduced. Therefore skipped other type entries.
4610 */
4611 if (wi->type != CPUID_FEATURE_WORD) {
4612 continue;
4613 }
4614 X86CPUFeatureWordInfo *qwi = &word_infos[w];
4615 qwi->cpuid_input_eax = wi->cpuid.eax;
4616 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
4617 qwi->cpuid_input_ecx = wi->cpuid.ecx;
4618 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
4619 qwi->features = array[w];
4620
4621 /* List will be in reverse order, but order shouldn't matter */
4622 list_entries[w].next = list;
4623 list_entries[w].value = &word_infos[w];
4624 list = &list_entries[w];
4625 }
4626
4627 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
4628 }
4629
4630 /* Convert all '_' in a feature string option name to '-', to make feature
4631 * name conform to QOM property naming rule, which uses '-' instead of '_'.
4632 */
4633 static inline void feat2prop(char *s)
4634 {
4635 while ((s = strchr(s, '_'))) {
4636 *s = '-';
4637 }
4638 }
4639
4640 /* Return the feature property name for a feature flag bit */
4641 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
4642 {
4643 const char *name;
4644 /* XSAVE components are automatically enabled by other features,
4645 * so return the original feature name instead
4646 */
4647 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
4648 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
4649
4650 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
4651 x86_ext_save_areas[comp].bits) {
4652 w = x86_ext_save_areas[comp].feature;
4653 bitnr = ctz32(x86_ext_save_areas[comp].bits);
4654 }
4655 }
4656
4657 assert(bitnr < 64);
4658 assert(w < FEATURE_WORDS);
4659 name = feature_word_info[w].feat_names[bitnr];
4660 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
4661 return name;
4662 }
4663
4664 /* Compatibily hack to maintain legacy +-feat semantic,
4665 * where +-feat overwrites any feature set by
4666 * feat=on|feat even if the later is parsed after +-feat
4667 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
4668 */
4669 static GList *plus_features, *minus_features;
4670
4671 static gint compare_string(gconstpointer a, gconstpointer b)
4672 {
4673 return g_strcmp0(a, b);
4674 }
4675
4676 /* Parse "+feature,-feature,feature=foo" CPU feature string
4677 */
4678 static void x86_cpu_parse_featurestr(const char *typename, char *features,
4679 Error **errp)
4680 {
4681 char *featurestr; /* Single 'key=value" string being parsed */
4682 static bool cpu_globals_initialized;
4683 bool ambiguous = false;
4684
4685 if (cpu_globals_initialized) {
4686 return;
4687 }
4688 cpu_globals_initialized = true;
4689
4690 if (!features) {
4691 return;
4692 }
4693
4694 for (featurestr = strtok(features, ",");
4695 featurestr;
4696 featurestr = strtok(NULL, ",")) {
4697 const char *name;
4698 const char *val = NULL;
4699 char *eq = NULL;
4700 char num[32];
4701 GlobalProperty *prop;
4702
4703 /* Compatibility syntax: */
4704 if (featurestr[0] == '+') {
4705 plus_features = g_list_append(plus_features,
4706 g_strdup(featurestr + 1));
4707 continue;
4708 } else if (featurestr[0] == '-') {
4709 minus_features = g_list_append(minus_features,
4710 g_strdup(featurestr + 1));
4711 continue;
4712 }
4713
4714 eq = strchr(featurestr, '=');
4715 if (eq) {
4716 *eq++ = 0;
4717 val = eq;
4718 } else {
4719 val = "on";
4720 }
4721
4722 feat2prop(featurestr);
4723 name = featurestr;
4724
4725 if (g_list_find_custom(plus_features, name, compare_string)) {
4726 warn_report("Ambiguous CPU model string. "
4727 "Don't mix both \"+%s\" and \"%s=%s\"",
4728 name, name, val);
4729 ambiguous = true;
4730 }
4731 if (g_list_find_custom(minus_features, name, compare_string)) {
4732 warn_report("Ambiguous CPU model string. "
4733 "Don't mix both \"-%s\" and \"%s=%s\"",
4734 name, name, val);
4735 ambiguous = true;
4736 }
4737
4738 /* Special case: */
4739 if (!strcmp(name, "tsc-freq")) {
4740 int ret;
4741 uint64_t tsc_freq;
4742
4743 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
4744 if (ret < 0 || tsc_freq > INT64_MAX) {
4745 error_setg(errp, "bad numerical value %s", val);
4746 return;
4747 }
4748 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
4749 val = num;
4750 name = "tsc-frequency";
4751 }
4752
4753 prop = g_new0(typeof(*prop), 1);
4754 prop->driver = typename;
4755 prop->property = g_strdup(name);
4756 prop->value = g_strdup(val);
4757 qdev_prop_register_global(prop);
4758 }
4759
4760 if (ambiguous) {
4761 warn_report("Compatibility of ambiguous CPU model "
4762 "strings won't be kept on future QEMU versions");
4763 }
4764 }
4765
4766 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
4767 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
4768
4769 /* Build a list with the name of all features on a feature word array */
4770 static void x86_cpu_list_feature_names(FeatureWordArray features,
4771 strList **feat_names)
4772 {
4773 FeatureWord w;
4774 strList **next = feat_names;
4775
4776 for (w = 0; w < FEATURE_WORDS; w++) {
4777 uint64_t filtered = features[w];
4778 int i;
4779 for (i = 0; i < 64; i++) {
4780 if (filtered & (1ULL << i)) {
4781 strList *new = g_new0(strList, 1);
4782 new->value = g_strdup(x86_cpu_feature_name(w, i));
4783 *next = new;
4784 next = &new->next;
4785 }
4786 }
4787 }
4788 }
4789
4790 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
4791 const char *name, void *opaque,
4792 Error **errp)
4793 {
4794 X86CPU *xc = X86_CPU(obj);
4795 strList *result = NULL;
4796
4797 x86_cpu_list_feature_names(xc->filtered_features, &result);
4798 visit_type_strList(v, "unavailable-features", &result, errp);
4799 }
4800
4801 /* Check for missing features that may prevent the CPU class from
4802 * running using the current machine and accelerator.
4803 */
4804 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
4805 strList **missing_feats)
4806 {
4807 X86CPU *xc;
4808 Error *err = NULL;
4809 strList **next = missing_feats;
4810
4811 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4812 strList *new = g_new0(strList, 1);
4813 new->value = g_strdup("kvm");
4814 *missing_feats = new;
4815 return;
4816 }
4817
4818 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
4819
4820 x86_cpu_expand_features(xc, &err);
4821 if (err) {
4822 /* Errors at x86_cpu_expand_features should never happen,
4823 * but in case it does, just report the model as not
4824 * runnable at all using the "type" property.
4825 */
4826 strList *new = g_new0(strList, 1);
4827 new->value = g_strdup("type");
4828 *next = new;
4829 next = &new->next;
4830 }
4831
4832 x86_cpu_filter_features(xc, false);
4833
4834 x86_cpu_list_feature_names(xc->filtered_features, next);
4835
4836 object_unref(OBJECT(xc));
4837 }
4838
4839 /* Print all cpuid feature names in featureset
4840 */
4841 static void listflags(GList *features)
4842 {
4843 size_t len = 0;
4844 GList *tmp;
4845
4846 for (tmp = features; tmp; tmp = tmp->next) {
4847 const char *name = tmp->data;
4848 if ((len + strlen(name) + 1) >= 75) {
4849 qemu_printf("\n");
4850 len = 0;
4851 }
4852 qemu_printf("%s%s", len == 0 ? " " : " ", name);
4853 len += strlen(name) + 1;
4854 }
4855 qemu_printf("\n");
4856 }
4857
4858 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
4859 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
4860 {
4861 ObjectClass *class_a = (ObjectClass *)a;
4862 ObjectClass *class_b = (ObjectClass *)b;
4863 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
4864 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
4865 int ret;
4866
4867 if (cc_a->ordering != cc_b->ordering) {
4868 ret = cc_a->ordering - cc_b->ordering;
4869 } else {
4870 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a);
4871 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b);
4872 ret = strcmp(name_a, name_b);
4873 }
4874 return ret;
4875 }
4876
4877 static GSList *get_sorted_cpu_model_list(void)
4878 {
4879 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
4880 list = g_slist_sort(list, x86_cpu_list_compare);
4881 return list;
4882 }
4883
4884 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
4885 {
4886 Object *obj = object_new_with_class(OBJECT_CLASS(xc));
4887 char *r = object_property_get_str(obj, "model-id", &error_abort);
4888 object_unref(obj);
4889 return r;
4890 }
4891
4892 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
4893 {
4894 X86CPUVersion version;
4895
4896 if (!cc->model || !cc->model->is_alias) {
4897 return NULL;
4898 }
4899 version = x86_cpu_model_resolve_version(cc->model);
4900 if (version <= 0) {
4901 return NULL;
4902 }
4903 return x86_cpu_versioned_model_name(cc->model->cpudef, version);
4904 }
4905
4906 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
4907 {
4908 ObjectClass *oc = data;
4909 X86CPUClass *cc = X86_CPU_CLASS(oc);
4910 g_autofree char *name = x86_cpu_class_get_model_name(cc);
4911 g_autofree char *desc = g_strdup(cc->model_description);
4912 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc);
4913 g_autofree char *model_id = x86_cpu_class_get_model_id(cc);
4914
4915 if (!desc && alias_of) {
4916 if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
4917 desc = g_strdup("(alias configured by machine type)");
4918 } else {
4919 desc = g_strdup_printf("(alias of %s)", alias_of);
4920 }
4921 }
4922 if (!desc && cc->model && cc->model->note) {
4923 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note);
4924 }
4925 if (!desc) {
4926 desc = g_strdup_printf("%s", model_id);
4927 }
4928
4929 qemu_printf("x86 %-20s %-58s\n", name, desc);
4930 }
4931
4932 /* list available CPU models and flags */
4933 void x86_cpu_list(void)
4934 {
4935 int i, j;
4936 GSList *list;
4937 GList *names = NULL;
4938
4939 qemu_printf("Available CPUs:\n");
4940 list = get_sorted_cpu_model_list();
4941 g_slist_foreach(list, x86_cpu_list_entry, NULL);
4942 g_slist_free(list);
4943
4944 names = NULL;
4945 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
4946 FeatureWordInfo *fw = &feature_word_info[i];
4947 for (j = 0; j < 64; j++) {
4948 if (fw->feat_names[j]) {
4949 names = g_list_append(names, (gpointer)fw->feat_names[j]);
4950 }
4951 }
4952 }
4953
4954 names = g_list_sort(names, (GCompareFunc)strcmp);
4955
4956 qemu_printf("\nRecognized CPUID flags:\n");
4957 listflags(names);
4958 qemu_printf("\n");
4959 g_list_free(names);
4960 }
4961
4962 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
4963 {
4964 ObjectClass *oc = data;
4965 X86CPUClass *cc = X86_CPU_CLASS(oc);
4966 CpuDefinitionInfoList **cpu_list = user_data;
4967 CpuDefinitionInfoList *entry;
4968 CpuDefinitionInfo *info;
4969
4970 info = g_malloc0(sizeof(*info));
4971 info->name = x86_cpu_class_get_model_name(cc);
4972 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
4973 info->has_unavailable_features = true;
4974 info->q_typename = g_strdup(object_class_get_name(oc));
4975 info->migration_safe = cc->migration_safe;
4976 info->has_migration_safe = true;
4977 info->q_static = cc->static_model;
4978 /*
4979 * Old machine types won't report aliases, so that alias translation
4980 * doesn't break compatibility with previous QEMU versions.
4981 */
4982 if (default_cpu_version != CPU_VERSION_LEGACY) {
4983 info->alias_of = x86_cpu_class_get_alias_of(cc);
4984 info->has_alias_of = !!info->alias_of;
4985 }
4986
4987 entry = g_malloc0(sizeof(*entry));
4988 entry->value = info;
4989 entry->next = *cpu_list;
4990 *cpu_list = entry;
4991 }
4992
4993 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
4994 {
4995 CpuDefinitionInfoList *cpu_list = NULL;
4996 GSList *list = get_sorted_cpu_model_list();
4997 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
4998 g_slist_free(list);
4999 return cpu_list;
5000 }
5001
5002 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
5003 bool migratable_only)
5004 {
5005 FeatureWordInfo *wi = &feature_word_info[w];
5006 uint64_t r = 0;
5007
5008 if (kvm_enabled()) {
5009 switch (wi->type) {
5010 case CPUID_FEATURE_WORD:
5011 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
5012 wi->cpuid.ecx,
5013 wi->cpuid.reg);
5014 break;
5015 case MSR_FEATURE_WORD:
5016 r = kvm_arch_get_supported_msr_feature(kvm_state,
5017 wi->msr.index);
5018 break;
5019 }
5020 } else if (hvf_enabled()) {
5021 if (wi->type != CPUID_FEATURE_WORD) {
5022 return 0;
5023 }
5024 r = hvf_get_supported_cpuid(wi->cpuid.eax,
5025 wi->cpuid.ecx,
5026 wi->cpuid.reg);
5027 } else if (tcg_enabled()) {
5028 r = wi->tcg_features;
5029 } else {
5030 return ~0;
5031 }
5032 if (migratable_only) {
5033 r &= x86_cpu_get_migratable_flags(w);
5034 }
5035 return r;
5036 }
5037
5038 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
5039 {
5040 PropValue *pv;
5041 for (pv = props; pv->prop; pv++) {
5042 if (!pv->value) {
5043 continue;
5044 }
5045 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
5046 &error_abort);
5047 }
5048 }
5049
5050 /* Apply properties for the CPU model version specified in model */
5051 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
5052 {
5053 const X86CPUVersionDefinition *vdef;
5054 X86CPUVersion version = x86_cpu_model_resolve_version(model);
5055
5056 if (version == CPU_VERSION_LEGACY) {
5057 return;
5058 }
5059
5060 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
5061 PropValue *p;
5062
5063 for (p = vdef->props; p && p->prop; p++) {
5064 object_property_parse(OBJECT(cpu), p->value, p->prop,
5065 &error_abort);
5066 }
5067
5068 if (vdef->version == version) {
5069 break;
5070 }
5071 }
5072
5073 /*
5074 * If we reached the end of the list, version number was invalid
5075 */
5076 assert(vdef->version == version);
5077 }
5078
5079 /* Load data from X86CPUDefinition into a X86CPU object
5080 */
5081 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
5082 {
5083 X86CPUDefinition *def = model->cpudef;
5084 CPUX86State *env = &cpu->env;
5085 const char *vendor;
5086 char host_vendor[CPUID_VENDOR_SZ + 1];
5087 FeatureWord w;
5088
5089 /*NOTE: any property set by this function should be returned by
5090 * x86_cpu_static_props(), so static expansion of
5091 * query-cpu-model-expansion is always complete.
5092 */
5093
5094 /* CPU models only set _minimum_ values for level/xlevel: */
5095 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
5096 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
5097
5098 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
5099 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
5100 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
5101 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
5102 for (w = 0; w < FEATURE_WORDS; w++) {
5103 env->features[w] = def->features[w];
5104 }
5105
5106 /* legacy-cache defaults to 'off' if CPU model provides cache info */
5107 cpu->legacy_cache = !def->cache_info;
5108
5109 /* Special cases not set in the X86CPUDefinition structs: */
5110 /* TODO: in-kernel irqchip for hvf */
5111 if (kvm_enabled()) {
5112 if (!kvm_irqchip_in_kernel()) {
5113 x86_cpu_change_kvm_default("x2apic", "off");
5114 }
5115
5116 x86_cpu_apply_props(cpu, kvm_default_props);
5117 } else if (tcg_enabled()) {
5118 x86_cpu_apply_props(cpu, tcg_default_props);
5119 }
5120
5121 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
5122
5123 /* sysenter isn't supported in compatibility mode on AMD,
5124 * syscall isn't supported in compatibility mode on Intel.
5125 * Normally we advertise the actual CPU vendor, but you can
5126 * override this using the 'vendor' property if you want to use
5127 * KVM's sysenter/syscall emulation in compatibility mode and
5128 * when doing cross vendor migration
5129 */
5130 vendor = def->vendor;
5131 if (accel_uses_host_cpuid()) {
5132 uint32_t ebx = 0, ecx = 0, edx = 0;
5133 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
5134 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
5135 vendor = host_vendor;
5136 }
5137
5138 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
5139
5140 x86_cpu_apply_version_props(cpu, model);
5141 }
5142
5143 #ifndef CONFIG_USER_ONLY
5144 /* Return a QDict containing keys for all properties that can be included
5145 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
5146 * must be included in the dictionary.
5147 */
5148 static QDict *x86_cpu_static_props(void)
5149 {
5150 FeatureWord w;
5151 int i;
5152 static const char *props[] = {
5153 "min-level",
5154 "min-xlevel",
5155 "family",
5156 "model",
5157 "stepping",
5158 "model-id",
5159 "vendor",
5160 "lmce",
5161 NULL,
5162 };
5163 static QDict *d;
5164
5165 if (d) {
5166 return d;
5167 }
5168
5169 d = qdict_new();
5170 for (i = 0; props[i]; i++) {
5171 qdict_put_null(d, props[i]);
5172 }
5173
5174 for (w = 0; w < FEATURE_WORDS; w++) {
5175 FeatureWordInfo *fi = &feature_word_info[w];
5176 int bit;
5177 for (bit = 0; bit < 64; bit++) {
5178 if (!fi->feat_names[bit]) {
5179 continue;
5180 }
5181 qdict_put_null(d, fi->feat_names[bit]);
5182 }
5183 }
5184
5185 return d;
5186 }
5187
5188 /* Add an entry to @props dict, with the value for property. */
5189 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
5190 {
5191 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
5192 &error_abort);
5193
5194 qdict_put_obj(props, prop, value);
5195 }
5196
5197 /* Convert CPU model data from X86CPU object to a property dictionary
5198 * that can recreate exactly the same CPU model.
5199 */
5200 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
5201 {
5202 QDict *sprops = x86_cpu_static_props();
5203 const QDictEntry *e;
5204
5205 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
5206 const char *prop = qdict_entry_key(e);
5207 x86_cpu_expand_prop(cpu, props, prop);
5208 }
5209 }
5210
5211 /* Convert CPU model data from X86CPU object to a property dictionary
5212 * that can recreate exactly the same CPU model, including every
5213 * writeable QOM property.
5214 */
5215 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
5216 {
5217 ObjectPropertyIterator iter;
5218 ObjectProperty *prop;
5219
5220 object_property_iter_init(&iter, OBJECT(cpu));
5221 while ((prop = object_property_iter_next(&iter))) {
5222 /* skip read-only or write-only properties */
5223 if (!prop->get || !prop->set) {
5224 continue;
5225 }
5226
5227 /* "hotplugged" is the only property that is configurable
5228 * on the command-line but will be set differently on CPUs
5229 * created using "-cpu ... -smp ..." and by CPUs created
5230 * on the fly by x86_cpu_from_model() for querying. Skip it.
5231 */
5232 if (!strcmp(prop->name, "hotplugged")) {
5233 continue;
5234 }
5235 x86_cpu_expand_prop(cpu, props, prop->name);
5236 }
5237 }
5238
5239 static void object_apply_props(Object *obj, QDict *props, Error **errp)
5240 {
5241 const QDictEntry *prop;
5242 Error *err = NULL;
5243
5244 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
5245 object_property_set_qobject(obj, qdict_entry_value(prop),
5246 qdict_entry_key(prop), &err);
5247 if (err) {
5248 break;
5249 }
5250 }
5251
5252 error_propagate(errp, err);
5253 }
5254
5255 /* Create X86CPU object according to model+props specification */
5256 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
5257 {
5258 X86CPU *xc = NULL;
5259 X86CPUClass *xcc;
5260 Error *err = NULL;
5261
5262 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
5263 if (xcc == NULL) {
5264 error_setg(&err, "CPU model '%s' not found", model);
5265 goto out;
5266 }
5267
5268 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
5269 if (props) {
5270 object_apply_props(OBJECT(xc), props, &err);
5271 if (err) {
5272 goto out;
5273 }
5274 }
5275
5276 x86_cpu_expand_features(xc, &err);
5277 if (err) {
5278 goto out;
5279 }
5280
5281 out:
5282 if (err) {
5283 error_propagate(errp, err);
5284 object_unref(OBJECT(xc));
5285 xc = NULL;
5286 }
5287 return xc;
5288 }
5289
5290 CpuModelExpansionInfo *
5291 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
5292 CpuModelInfo *model,
5293 Error **errp)
5294 {
5295 X86CPU *xc = NULL;
5296 Error *err = NULL;
5297 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
5298 QDict *props = NULL;
5299 const char *base_name;
5300
5301 xc = x86_cpu_from_model(model->name,
5302 model->has_props ?
5303 qobject_to(QDict, model->props) :
5304 NULL, &err);
5305 if (err) {
5306 goto out;
5307 }
5308
5309 props = qdict_new();
5310 ret->model = g_new0(CpuModelInfo, 1);
5311 ret->model->props = QOBJECT(props);
5312 ret->model->has_props = true;
5313
5314 switch (type) {
5315 case CPU_MODEL_EXPANSION_TYPE_STATIC:
5316 /* Static expansion will be based on "base" only */
5317 base_name = "base";
5318 x86_cpu_to_dict(xc, props);
5319 break;
5320 case CPU_MODEL_EXPANSION_TYPE_FULL:
5321 /* As we don't return every single property, full expansion needs
5322 * to keep the original model name+props, and add extra
5323 * properties on top of that.
5324 */
5325 base_name = model->name;
5326 x86_cpu_to_dict_full(xc, props);
5327 break;
5328 default:
5329 error_setg(&err, "Unsupported expansion type");
5330 goto out;
5331 }
5332
5333 x86_cpu_to_dict(xc, props);
5334
5335 ret->model->name = g_strdup(base_name);
5336
5337 out:
5338 object_unref(OBJECT(xc));
5339 if (err) {
5340 error_propagate(errp, err);
5341 qapi_free_CpuModelExpansionInfo(ret);
5342 ret = NULL;
5343 }
5344 return ret;
5345 }
5346 #endif /* !CONFIG_USER_ONLY */
5347
5348 static gchar *x86_gdb_arch_name(CPUState *cs)
5349 {
5350 #ifdef TARGET_X86_64
5351 return g_strdup("i386:x86-64");
5352 #else
5353 return g_strdup("i386");
5354 #endif
5355 }
5356
5357 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
5358 {
5359 X86CPUModel *model = data;
5360 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5361
5362 xcc->model = model;
5363 xcc->migration_safe = true;
5364 }
5365
5366 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
5367 {
5368 g_autofree char *typename = x86_cpu_type_name(name);
5369 TypeInfo ti = {
5370 .name = typename,
5371 .parent = TYPE_X86_CPU,
5372 .class_init = x86_cpu_cpudef_class_init,
5373 .class_data = model,
5374 };
5375
5376 type_register(&ti);
5377 }
5378
5379 static void x86_register_cpudef_types(X86CPUDefinition *def)
5380 {
5381 X86CPUModel *m;
5382 const X86CPUVersionDefinition *vdef;
5383
5384 /* AMD aliases are handled at runtime based on CPUID vendor, so
5385 * they shouldn't be set on the CPU model table.
5386 */
5387 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
5388 /* catch mistakes instead of silently truncating model_id when too long */
5389 assert(def->model_id && strlen(def->model_id) <= 48);
5390
5391 /* Unversioned model: */
5392 m = g_new0(X86CPUModel, 1);
5393 m->cpudef = def;
5394 m->version = CPU_VERSION_AUTO;
5395 m->is_alias = true;
5396 x86_register_cpu_model_type(def->name, m);
5397
5398 /* Versioned models: */
5399
5400 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
5401 X86CPUModel *m = g_new0(X86CPUModel, 1);
5402 g_autofree char *name =
5403 x86_cpu_versioned_model_name(def, vdef->version);
5404 m->cpudef = def;
5405 m->version = vdef->version;
5406 m->note = vdef->note;
5407 x86_register_cpu_model_type(name, m);
5408
5409 if (vdef->alias) {
5410 X86CPUModel *am = g_new0(X86CPUModel, 1);
5411 am->cpudef = def;
5412 am->version = vdef->version;
5413 am->is_alias = true;
5414 x86_register_cpu_model_type(vdef->alias, am);
5415 }
5416 }
5417
5418 }
5419
5420 #if !defined(CONFIG_USER_ONLY)
5421
5422 void cpu_clear_apic_feature(CPUX86State *env)
5423 {
5424 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
5425 }
5426
5427 #endif /* !CONFIG_USER_ONLY */
5428
5429 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
5430 uint32_t *eax, uint32_t *ebx,
5431 uint32_t *ecx, uint32_t *edx)
5432 {
5433 X86CPU *cpu = env_archcpu(env);
5434 CPUState *cs = env_cpu(env);
5435 uint32_t die_offset;
5436 uint32_t limit;
5437 uint32_t signature[3];
5438 X86CPUTopoInfo topo_info;
5439
5440 topo_info.nodes_per_pkg = env->nr_nodes;
5441 topo_info.dies_per_pkg = env->nr_dies;
5442 topo_info.cores_per_die = cs->nr_cores;
5443 topo_info.threads_per_core = cs->nr_threads;
5444
5445 /* Calculate & apply limits for different index ranges */
5446 if (index >= 0xC0000000) {
5447 limit = env->cpuid_xlevel2;
5448 } else if (index >= 0x80000000) {
5449 limit = env->cpuid_xlevel;
5450 } else if (index >= 0x40000000) {
5451 limit = 0x40000001;
5452 } else {
5453 limit = env->cpuid_level;
5454 }
5455
5456 if (index > limit) {
5457 /* Intel documentation states that invalid EAX input will
5458 * return the same information as EAX=cpuid_level
5459 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
5460 */
5461 index = env->cpuid_level;
5462 }
5463
5464 switch(index) {
5465 case 0:
5466 *eax = env->cpuid_level;
5467 *ebx = env->cpuid_vendor1;
5468 *edx = env->cpuid_vendor2;
5469 *ecx = env->cpuid_vendor3;
5470 break;
5471 case 1:
5472 *eax = env->cpuid_version;
5473 *ebx = (cpu->apic_id << 24) |
5474 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
5475 *ecx = env->features[FEAT_1_ECX];
5476 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
5477 *ecx |= CPUID_EXT_OSXSAVE;
5478 }
5479 *edx = env->features[FEAT_1_EDX];
5480 if (cs->nr_cores * cs->nr_threads > 1) {
5481 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
5482 *edx |= CPUID_HT;
5483 }
5484 break;
5485 case 2:
5486 /* cache info: needed for Pentium Pro compatibility */
5487 if (cpu->cache_info_passthrough) {
5488 host_cpuid(index, 0, eax, ebx, ecx, edx);
5489 break;
5490 }
5491 *eax = 1; /* Number of CPUID[EAX=2] calls required */
5492 *ebx = 0;
5493 if (!cpu->enable_l3_cache) {
5494 *ecx = 0;
5495 } else {
5496 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
5497 }
5498 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
5499 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
5500 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
5501 break;
5502 case 4:
5503 /* cache info: needed for Core compatibility */
5504 if (cpu->cache_info_passthrough) {
5505 host_cpuid(index, count, eax, ebx, ecx, edx);
5506 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
5507 *eax &= ~0xFC000000;
5508 if ((*eax & 31) && cs->nr_cores > 1) {
5509 *eax |= (cs->nr_cores - 1) << 26;
5510 }
5511 } else {
5512 *eax = 0;
5513 switch (count) {
5514 case 0: /* L1 dcache info */
5515 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
5516 1, cs->nr_cores,
5517 eax, ebx, ecx, edx);
5518 break;
5519 case 1: /* L1 icache info */
5520 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
5521 1, cs->nr_cores,
5522 eax, ebx, ecx, edx);
5523 break;
5524 case 2: /* L2 cache info */
5525 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
5526 cs->nr_threads, cs->nr_cores,
5527 eax, ebx, ecx, edx);
5528 break;
5529 case 3: /* L3 cache info */
5530 die_offset = apicid_die_offset(&topo_info);
5531 if (cpu->enable_l3_cache) {
5532 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
5533 (1 << die_offset), cs->nr_cores,
5534 eax, ebx, ecx, edx);
5535 break;
5536 }
5537 /* fall through */
5538 default: /* end of info */
5539 *eax = *ebx = *ecx = *edx = 0;
5540 break;
5541 }
5542 }
5543 break;
5544 case 5:
5545 /* MONITOR/MWAIT Leaf */
5546 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
5547 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
5548 *ecx = cpu->mwait.ecx; /* flags */
5549 *edx = cpu->mwait.edx; /* mwait substates */
5550 break;
5551 case 6:
5552 /* Thermal and Power Leaf */
5553 *eax = env->features[FEAT_6_EAX];
5554 *ebx = 0;
5555 *ecx = 0;
5556 *edx = 0;
5557 break;
5558 case 7:
5559 /* Structured Extended Feature Flags Enumeration Leaf */
5560 if (count == 0) {
5561 /* Maximum ECX value for sub-leaves */
5562 *eax = env->cpuid_level_func7;
5563 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
5564 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
5565 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
5566 *ecx |= CPUID_7_0_ECX_OSPKE;
5567 }
5568 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
5569 } else if (count == 1) {
5570 *eax = env->features[FEAT_7_1_EAX];
5571 *ebx = 0;
5572 *ecx = 0;
5573 *edx = 0;
5574 } else {
5575 *eax = 0;
5576 *ebx = 0;
5577 *ecx = 0;
5578 *edx = 0;
5579 }
5580 break;
5581 case 9:
5582 /* Direct Cache Access Information Leaf */
5583 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
5584 *ebx = 0;
5585 *ecx = 0;
5586 *edx = 0;
5587 break;
5588 case 0xA:
5589 /* Architectural Performance Monitoring Leaf */
5590 if (kvm_enabled() && cpu->enable_pmu) {
5591 KVMState *s = cs->kvm_state;
5592
5593 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
5594 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
5595 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
5596 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
5597 } else if (hvf_enabled() && cpu->enable_pmu) {
5598 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
5599 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
5600 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
5601 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
5602 } else {
5603 *eax = 0;
5604 *ebx = 0;
5605 *ecx = 0;
5606 *edx = 0;
5607 }
5608 break;
5609 case 0xB:
5610 /* Extended Topology Enumeration Leaf */
5611 if (!cpu->enable_cpuid_0xb) {
5612 *eax = *ebx = *ecx = *edx = 0;
5613 break;
5614 }
5615
5616 *ecx = count & 0xff;
5617 *edx = cpu->apic_id;
5618
5619 switch (count) {
5620 case 0:
5621 *eax = apicid_core_offset(&topo_info);
5622 *ebx = cs->nr_threads;
5623 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5624 break;
5625 case 1:
5626 *eax = env->pkg_offset;
5627 *ebx = cs->nr_cores * cs->nr_threads;
5628 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5629 break;
5630 default:
5631 *eax = 0;
5632 *ebx = 0;
5633 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5634 }
5635
5636 assert(!(*eax & ~0x1f));
5637 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5638 break;
5639 case 0x1F:
5640 /* V2 Extended Topology Enumeration Leaf */
5641 if (env->nr_dies < 2) {
5642 *eax = *ebx = *ecx = *edx = 0;
5643 break;
5644 }
5645
5646 *ecx = count & 0xff;
5647 *edx = cpu->apic_id;
5648 switch (count) {
5649 case 0:
5650 *eax = apicid_core_offset(&topo_info);
5651 *ebx = cs->nr_threads;
5652 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5653 break;
5654 case 1:
5655 *eax = apicid_die_offset(&topo_info);
5656 *ebx = cs->nr_cores * cs->nr_threads;
5657 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5658 break;
5659 case 2:
5660 *eax = env->pkg_offset;
5661 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
5662 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
5663 break;
5664 default:
5665 *eax = 0;
5666 *ebx = 0;
5667 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5668 }
5669 assert(!(*eax & ~0x1f));
5670 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5671 break;
5672 case 0xD: {
5673 /* Processor Extended State */
5674 *eax = 0;
5675 *ebx = 0;
5676 *ecx = 0;
5677 *edx = 0;
5678 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5679 break;
5680 }
5681
5682 if (count == 0) {
5683 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
5684 *eax = env->features[FEAT_XSAVE_COMP_LO];
5685 *edx = env->features[FEAT_XSAVE_COMP_HI];
5686 /*
5687 * The initial value of xcr0 and ebx == 0, On host without kvm
5688 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
5689 * even through guest update xcr0, this will crash some legacy guest
5690 * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
5691 */
5692 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
5693 } else if (count == 1) {
5694 *eax = env->features[FEAT_XSAVE];
5695 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
5696 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
5697 const ExtSaveArea *esa = &x86_ext_save_areas[count];
5698 *eax = esa->size;
5699 *ebx = esa->offset;
5700 }
5701 }
5702 break;
5703 }
5704 case 0x14: {
5705 /* Intel Processor Trace Enumeration */
5706 *eax = 0;
5707 *ebx = 0;
5708 *ecx = 0;
5709 *edx = 0;
5710 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
5711 !kvm_enabled()) {
5712 break;
5713 }
5714
5715 if (count == 0) {
5716 *eax = INTEL_PT_MAX_SUBLEAF;
5717 *ebx = INTEL_PT_MINIMAL_EBX;
5718 *ecx = INTEL_PT_MINIMAL_ECX;
5719 } else if (count == 1) {
5720 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
5721 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
5722 }
5723 break;
5724 }
5725 case 0x40000000:
5726 /*
5727 * CPUID code in kvm_arch_init_vcpu() ignores stuff
5728 * set here, but we restrict to TCG none the less.
5729 */
5730 if (tcg_enabled() && cpu->expose_tcg) {
5731 memcpy(signature, "TCGTCGTCGTCG", 12);
5732 *eax = 0x40000001;
5733 *ebx = signature[0];
5734 *ecx = signature[1];
5735 *edx = signature[2];
5736 } else {
5737 *eax = 0;
5738 *ebx = 0;
5739 *ecx = 0;
5740 *edx = 0;
5741 }
5742 break;
5743 case 0x40000001:
5744 *eax = 0;
5745 *ebx = 0;
5746 *ecx = 0;
5747 *edx = 0;
5748 break;
5749 case 0x80000000:
5750 *eax = env->cpuid_xlevel;
5751 *ebx = env->cpuid_vendor1;
5752 *edx = env->cpuid_vendor2;
5753 *ecx = env->cpuid_vendor3;
5754 break;
5755 case 0x80000001:
5756 *eax = env->cpuid_version;
5757 *ebx = 0;
5758 *ecx = env->features[FEAT_8000_0001_ECX];
5759 *edx = env->features[FEAT_8000_0001_EDX];
5760
5761 /* The Linux kernel checks for the CMPLegacy bit and
5762 * discards multiple thread information if it is set.
5763 * So don't set it here for Intel to make Linux guests happy.
5764 */
5765 if (cs->nr_cores * cs->nr_threads > 1) {
5766 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
5767 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
5768 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
5769 *ecx |= 1 << 1; /* CmpLegacy bit */
5770 }
5771 }
5772 break;
5773 case 0x80000002:
5774 case 0x80000003:
5775 case 0x80000004:
5776 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
5777 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
5778 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
5779 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
5780 break;
5781 case 0x80000005:
5782 /* cache info (L1 cache) */
5783 if (cpu->cache_info_passthrough) {
5784 host_cpuid(index, 0, eax, ebx, ecx, edx);
5785 break;
5786 }
5787 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) |
5788 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
5789 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) |
5790 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
5791 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
5792 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
5793 break;
5794 case 0x80000006:
5795 /* cache info (L2 cache) */
5796 if (cpu->cache_info_passthrough) {
5797 host_cpuid(index, 0, eax, ebx, ecx, edx);
5798 break;
5799 }
5800 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) |
5801 (L2_DTLB_2M_ENTRIES << 16) |
5802 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) |
5803 (L2_ITLB_2M_ENTRIES);
5804 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) |
5805 (L2_DTLB_4K_ENTRIES << 16) |
5806 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) |
5807 (L2_ITLB_4K_ENTRIES);
5808 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
5809 cpu->enable_l3_cache ?
5810 env->cache_info_amd.l3_cache : NULL,
5811 ecx, edx);
5812 break;
5813 case 0x80000007:
5814 *eax = 0;
5815 *ebx = 0;
5816 *ecx = 0;
5817 *edx = env->features[FEAT_8000_0007_EDX];
5818 break;
5819 case 0x80000008:
5820 /* virtual & phys address size in low 2 bytes. */
5821 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5822 /* 64 bit processor */
5823 *eax = cpu->phys_bits; /* configurable physical bits */
5824 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
5825 *eax |= 0x00003900; /* 57 bits virtual */
5826 } else {
5827 *eax |= 0x00003000; /* 48 bits virtual */
5828 }
5829 } else {
5830 *eax = cpu->phys_bits;
5831 }
5832 *ebx = env->features[FEAT_8000_0008_EBX];
5833 *ecx = 0;
5834 *edx = 0;
5835 if (cs->nr_cores * cs->nr_threads > 1) {
5836 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
5837 }
5838 break;
5839 case 0x8000000A:
5840 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5841 *eax = 0x00000001; /* SVM Revision */
5842 *ebx = 0x00000010; /* nr of ASIDs */
5843 *ecx = 0;
5844 *edx = env->features[FEAT_SVM]; /* optional features */
5845 } else {
5846 *eax = 0;
5847 *ebx = 0;
5848 *ecx = 0;
5849 *edx = 0;
5850 }
5851 break;
5852 case 0x8000001D:
5853 *eax = 0;
5854 if (cpu->cache_info_passthrough) {
5855 host_cpuid(index, count, eax, ebx, ecx, edx);
5856 break;
5857 }
5858 switch (count) {
5859 case 0: /* L1 dcache info */
5860 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
5861 &topo_info, eax, ebx, ecx, edx);
5862 break;
5863 case 1: /* L1 icache info */
5864 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
5865 &topo_info, eax, ebx, ecx, edx);
5866 break;
5867 case 2: /* L2 cache info */
5868 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
5869 &topo_info, eax, ebx, ecx, edx);
5870 break;
5871 case 3: /* L3 cache info */
5872 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
5873 &topo_info, eax, ebx, ecx, edx);
5874 break;
5875 default: /* end of info */
5876 *eax = *ebx = *ecx = *edx = 0;
5877 break;
5878 }
5879 break;
5880 case 0x8000001E:
5881 assert(cpu->core_id <= 255);
5882 encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx);
5883 break;
5884 case 0xC0000000:
5885 *eax = env->cpuid_xlevel2;
5886 *ebx = 0;
5887 *ecx = 0;
5888 *edx = 0;
5889 break;
5890 case 0xC0000001:
5891 /* Support for VIA CPU's CPUID instruction */
5892 *eax = env->cpuid_version;
5893 *ebx = 0;
5894 *ecx = 0;
5895 *edx = env->features[FEAT_C000_0001_EDX];
5896 break;
5897 case 0xC0000002:
5898 case 0xC0000003:
5899 case 0xC0000004:
5900 /* Reserved for the future, and now filled with zero */
5901 *eax = 0;
5902 *ebx = 0;
5903 *ecx = 0;
5904 *edx = 0;
5905 break;
5906 case 0x8000001F:
5907 *eax = sev_enabled() ? 0x2 : 0;
5908 *ebx = sev_get_cbit_position();
5909 *ebx |= sev_get_reduced_phys_bits() << 6;
5910 *ecx = 0;
5911 *edx = 0;
5912 break;
5913 default:
5914 /* reserved values: zero */
5915 *eax = 0;
5916 *ebx = 0;
5917 *ecx = 0;
5918 *edx = 0;
5919 break;
5920 }
5921 }
5922
5923 static void x86_cpu_reset(DeviceState *dev)
5924 {
5925 CPUState *s = CPU(dev);
5926 X86CPU *cpu = X86_CPU(s);
5927 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
5928 CPUX86State *env = &cpu->env;
5929 target_ulong cr4;
5930 uint64_t xcr0;
5931 int i;
5932
5933 xcc->parent_reset(dev);
5934
5935 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
5936
5937 env->old_exception = -1;
5938
5939 /* init to reset state */
5940
5941 env->hflags2 |= HF2_GIF_MASK;
5942
5943 cpu_x86_update_cr0(env, 0x60000010);
5944 env->a20_mask = ~0x0;
5945 env->smbase = 0x30000;
5946 env->msr_smi_count = 0;
5947
5948 env->idt.limit = 0xffff;
5949 env->gdt.limit = 0xffff;
5950 env->ldt.limit = 0xffff;
5951 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
5952 env->tr.limit = 0xffff;
5953 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
5954
5955 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
5956 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
5957 DESC_R_MASK | DESC_A_MASK);
5958 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
5959 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5960 DESC_A_MASK);
5961 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
5962 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5963 DESC_A_MASK);
5964 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
5965 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5966 DESC_A_MASK);
5967 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
5968 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5969 DESC_A_MASK);
5970 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
5971 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5972 DESC_A_MASK);
5973
5974 env->eip = 0xfff0;
5975 env->regs[R_EDX] = env->cpuid_version;
5976
5977 env->eflags = 0x2;
5978
5979 /* FPU init */
5980 for (i = 0; i < 8; i++) {
5981 env->fptags[i] = 1;
5982 }
5983 cpu_set_fpuc(env, 0x37f);
5984
5985 env->mxcsr = 0x1f80;
5986 /* All units are in INIT state. */
5987 env->xstate_bv = 0;
5988
5989 env->pat = 0x0007040600070406ULL;
5990 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
5991 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
5992 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
5993 }
5994
5995 memset(env->dr, 0, sizeof(env->dr));
5996 env->dr[6] = DR6_FIXED_1;
5997 env->dr[7] = DR7_FIXED_1;
5998 cpu_breakpoint_remove_all(s, BP_CPU);
5999 cpu_watchpoint_remove_all(s, BP_CPU);
6000
6001 cr4 = 0;
6002 xcr0 = XSTATE_FP_MASK;
6003
6004 #ifdef CONFIG_USER_ONLY
6005 /* Enable all the features for user-mode. */
6006 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
6007 xcr0 |= XSTATE_SSE_MASK;
6008 }
6009 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
6010 const ExtSaveArea *esa = &x86_ext_save_areas[i];
6011 if (env->features[esa->feature] & esa->bits) {
6012 xcr0 |= 1ull << i;
6013 }
6014 }
6015
6016 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
6017 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
6018 }
6019 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
6020 cr4 |= CR4_FSGSBASE_MASK;
6021 }
6022 #endif
6023
6024 env->xcr0 = xcr0;
6025 cpu_x86_update_cr4(env, cr4);
6026
6027 /*
6028 * SDM 11.11.5 requires:
6029 * - IA32_MTRR_DEF_TYPE MSR.E = 0
6030 * - IA32_MTRR_PHYSMASKn.V = 0
6031 * All other bits are undefined. For simplification, zero it all.
6032 */
6033 env->mtrr_deftype = 0;
6034 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
6035 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
6036
6037 env->interrupt_injected = -1;
6038 env->exception_nr = -1;
6039 env->exception_pending = 0;
6040 env->exception_injected = 0;
6041 env->exception_has_payload = false;
6042 env->exception_payload = 0;
6043 env->nmi_injected = false;
6044 #if !defined(CONFIG_USER_ONLY)
6045 /* We hard-wire the BSP to the first CPU. */
6046 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
6047
6048 s->halted = !cpu_is_bsp(cpu);
6049
6050 if (kvm_enabled()) {
6051 kvm_arch_reset_vcpu(cpu);
6052 }
6053 else if (hvf_enabled()) {
6054 hvf_reset_vcpu(s);
6055 }
6056 #endif
6057 }
6058
6059 #ifndef CONFIG_USER_ONLY
6060 bool cpu_is_bsp(X86CPU *cpu)
6061 {
6062 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
6063 }
6064
6065 /* TODO: remove me, when reset over QOM tree is implemented */
6066 static void x86_cpu_machine_reset_cb(void *opaque)
6067 {
6068 X86CPU *cpu = opaque;
6069 cpu_reset(CPU(cpu));
6070 }
6071 #endif
6072
6073 static void mce_init(X86CPU *cpu)
6074 {
6075 CPUX86State *cenv = &cpu->env;
6076 unsigned int bank;
6077
6078 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
6079 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
6080 (CPUID_MCE | CPUID_MCA)) {
6081 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
6082 (cpu->enable_lmce ? MCG_LMCE_P : 0);
6083 cenv->mcg_ctl = ~(uint64_t)0;
6084 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
6085 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
6086 }
6087 }
6088 }
6089
6090 #ifndef CONFIG_USER_ONLY
6091 APICCommonClass *apic_get_class(void)
6092 {
6093 const char *apic_type = "apic";
6094
6095 /* TODO: in-kernel irqchip for hvf */
6096 if (kvm_apic_in_kernel()) {
6097 apic_type = "kvm-apic";
6098 } else if (xen_enabled()) {
6099 apic_type = "xen-apic";
6100 }
6101
6102 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
6103 }
6104
6105 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
6106 {
6107 APICCommonState *apic;
6108 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
6109
6110 cpu->apic_state = DEVICE(object_new_with_class(apic_class));
6111
6112 object_property_add_child(OBJECT(cpu), "lapic",
6113 OBJECT(cpu->apic_state));
6114 object_unref(OBJECT(cpu->apic_state));
6115
6116 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
6117 /* TODO: convert to link<> */
6118 apic = APIC_COMMON(cpu->apic_state);
6119 apic->cpu = cpu;
6120 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
6121 }
6122
6123 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6124 {
6125 APICCommonState *apic;
6126 static bool apic_mmio_map_once;
6127
6128 if (cpu->apic_state == NULL) {
6129 return;
6130 }
6131 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
6132 errp);
6133
6134 /* Map APIC MMIO area */
6135 apic = APIC_COMMON(cpu->apic_state);
6136 if (!apic_mmio_map_once) {
6137 memory_region_add_subregion_overlap(get_system_memory(),
6138 apic->apicbase &
6139 MSR_IA32_APICBASE_BASE,
6140 &apic->io_memory,
6141 0x1000);
6142 apic_mmio_map_once = true;
6143 }
6144 }
6145
6146 static void x86_cpu_machine_done(Notifier *n, void *unused)
6147 {
6148 X86CPU *cpu = container_of(n, X86CPU, machine_done);
6149 MemoryRegion *smram =
6150 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
6151
6152 if (smram) {
6153 cpu->smram = g_new(MemoryRegion, 1);
6154 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
6155 smram, 0, 1ull << 32);
6156 memory_region_set_enabled(cpu->smram, true);
6157 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
6158 }
6159 }
6160 #else
6161 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6162 {
6163 }
6164 #endif
6165
6166 /* Note: Only safe for use on x86(-64) hosts */
6167 static uint32_t x86_host_phys_bits(void)
6168 {
6169 uint32_t eax;
6170 uint32_t host_phys_bits;
6171
6172 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
6173 if (eax >= 0x80000008) {
6174 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
6175 /* Note: According to AMD doc 25481 rev 2.34 they have a field
6176 * at 23:16 that can specify a maximum physical address bits for
6177 * the guest that can override this value; but I've not seen
6178 * anything with that set.
6179 */
6180 host_phys_bits = eax & 0xff;
6181 } else {
6182 /* It's an odd 64 bit machine that doesn't have the leaf for
6183 * physical address bits; fall back to 36 that's most older
6184 * Intel.
6185 */
6186 host_phys_bits = 36;
6187 }
6188
6189 return host_phys_bits;
6190 }
6191
6192 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
6193 {
6194 if (*min < value) {
6195 *min = value;
6196 }
6197 }
6198
6199 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
6200 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
6201 {
6202 CPUX86State *env = &cpu->env;
6203 FeatureWordInfo *fi = &feature_word_info[w];
6204 uint32_t eax = fi->cpuid.eax;
6205 uint32_t region = eax & 0xF0000000;
6206
6207 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
6208 if (!env->features[w]) {
6209 return;
6210 }
6211
6212 switch (region) {
6213 case 0x00000000:
6214 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
6215 break;
6216 case 0x80000000:
6217 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
6218 break;
6219 case 0xC0000000:
6220 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
6221 break;
6222 }
6223
6224 if (eax == 7) {
6225 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
6226 fi->cpuid.ecx);
6227 }
6228 }
6229
6230 /* Calculate XSAVE components based on the configured CPU feature flags */
6231 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
6232 {
6233 CPUX86State *env = &cpu->env;
6234 int i;
6235 uint64_t mask;
6236
6237 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
6238 return;
6239 }
6240
6241 mask = 0;
6242 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
6243 const ExtSaveArea *esa = &x86_ext_save_areas[i];
6244 if (env->features[esa->feature] & esa->bits) {
6245 mask |= (1ULL << i);
6246 }
6247 }
6248
6249 env->features[FEAT_XSAVE_COMP_LO] = mask;
6250 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
6251 }
6252
6253 /***** Steps involved on loading and filtering CPUID data
6254 *
6255 * When initializing and realizing a CPU object, the steps
6256 * involved in setting up CPUID data are:
6257 *
6258 * 1) Loading CPU model definition (X86CPUDefinition). This is
6259 * implemented by x86_cpu_load_model() and should be completely
6260 * transparent, as it is done automatically by instance_init.
6261 * No code should need to look at X86CPUDefinition structs
6262 * outside instance_init.
6263 *
6264 * 2) CPU expansion. This is done by realize before CPUID
6265 * filtering, and will make sure host/accelerator data is
6266 * loaded for CPU models that depend on host capabilities
6267 * (e.g. "host"). Done by x86_cpu_expand_features().
6268 *
6269 * 3) CPUID filtering. This initializes extra data related to
6270 * CPUID, and checks if the host supports all capabilities
6271 * required by the CPU. Runnability of a CPU model is
6272 * determined at this step. Done by x86_cpu_filter_features().
6273 *
6274 * Some operations don't require all steps to be performed.
6275 * More precisely:
6276 *
6277 * - CPU instance creation (instance_init) will run only CPU
6278 * model loading. CPU expansion can't run at instance_init-time
6279 * because host/accelerator data may be not available yet.
6280 * - CPU realization will perform both CPU model expansion and CPUID
6281 * filtering, and return an error in case one of them fails.
6282 * - query-cpu-definitions needs to run all 3 steps. It needs
6283 * to run CPUID filtering, as the 'unavailable-features'
6284 * field is set based on the filtering results.
6285 * - The query-cpu-model-expansion QMP command only needs to run
6286 * CPU model loading and CPU expansion. It should not filter
6287 * any CPUID data based on host capabilities.
6288 */
6289
6290 /* Expand CPU configuration data, based on configured features
6291 * and host/accelerator capabilities when appropriate.
6292 */
6293 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
6294 {
6295 CPUX86State *env = &cpu->env;
6296 FeatureWord w;
6297 int i;
6298 GList *l;
6299 Error *local_err = NULL;
6300
6301 for (l = plus_features; l; l = l->next) {
6302 const char *prop = l->data;
6303 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
6304 if (local_err) {
6305 goto out;
6306 }
6307 }
6308
6309 for (l = minus_features; l; l = l->next) {
6310 const char *prop = l->data;
6311 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
6312 if (local_err) {
6313 goto out;
6314 }
6315 }
6316
6317 /*TODO: Now cpu->max_features doesn't overwrite features
6318 * set using QOM properties, and we can convert
6319 * plus_features & minus_features to global properties
6320 * inside x86_cpu_parse_featurestr() too.
6321 */
6322 if (cpu->max_features) {
6323 for (w = 0; w < FEATURE_WORDS; w++) {
6324 /* Override only features that weren't set explicitly
6325 * by the user.
6326 */
6327 env->features[w] |=
6328 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
6329 ~env->user_features[w] &
6330 ~feature_word_info[w].no_autoenable_flags;
6331 }
6332 }
6333
6334 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
6335 FeatureDep *d = &feature_dependencies[i];
6336 if (!(env->features[d->from.index] & d->from.mask)) {
6337 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
6338
6339 /* Not an error unless the dependent feature was added explicitly. */
6340 mark_unavailable_features(cpu, d->to.index,
6341 unavailable_features & env->user_features[d->to.index],
6342 "This feature depends on other features that were not requested");
6343
6344 env->user_features[d->to.index] |= unavailable_features;
6345 env->features[d->to.index] &= ~unavailable_features;
6346 }
6347 }
6348
6349 if (!kvm_enabled() || !cpu->expose_kvm) {
6350 env->features[FEAT_KVM] = 0;
6351 }
6352
6353 x86_cpu_enable_xsave_components(cpu);
6354
6355 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
6356 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
6357 if (cpu->full_cpuid_auto_level) {
6358 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
6359 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
6360 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
6361 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
6362 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
6363 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
6364 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
6365 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
6366 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
6367 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
6368 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
6369 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
6370
6371 /* Intel Processor Trace requires CPUID[0x14] */
6372 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) {
6373 if (cpu->intel_pt_auto_level) {
6374 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
6375 } else if (cpu->env.cpuid_min_level < 0x14) {
6376 mark_unavailable_features(cpu, FEAT_7_0_EBX,
6377 CPUID_7_0_EBX_INTEL_PT,
6378 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\"");
6379 }
6380 }
6381
6382 /* CPU topology with multi-dies support requires CPUID[0x1F] */
6383 if (env->nr_dies > 1) {
6384 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
6385 }
6386
6387 /* SVM requires CPUID[0x8000000A] */
6388 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
6389 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
6390 }
6391
6392 /* SEV requires CPUID[0x8000001F] */
6393 if (sev_enabled()) {
6394 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
6395 }
6396 }
6397
6398 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
6399 if (env->cpuid_level_func7 == UINT32_MAX) {
6400 env->cpuid_level_func7 = env->cpuid_min_level_func7;
6401 }
6402 if (env->cpuid_level == UINT32_MAX) {
6403 env->cpuid_level = env->cpuid_min_level;
6404 }
6405 if (env->cpuid_xlevel == UINT32_MAX) {
6406 env->cpuid_xlevel = env->cpuid_min_xlevel;
6407 }
6408 if (env->cpuid_xlevel2 == UINT32_MAX) {
6409 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
6410 }
6411
6412 out:
6413 if (local_err != NULL) {
6414 error_propagate(errp, local_err);
6415 }
6416 }
6417
6418 /*
6419 * Finishes initialization of CPUID data, filters CPU feature
6420 * words based on host availability of each feature.
6421 *
6422 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
6423 */
6424 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
6425 {
6426 CPUX86State *env = &cpu->env;
6427 FeatureWord w;
6428 const char *prefix = NULL;
6429
6430 if (verbose) {
6431 prefix = accel_uses_host_cpuid()
6432 ? "host doesn't support requested feature"
6433 : "TCG doesn't support requested feature";
6434 }
6435
6436 for (w = 0; w < FEATURE_WORDS; w++) {
6437 uint64_t host_feat =
6438 x86_cpu_get_supported_feature_word(w, false);
6439 uint64_t requested_features = env->features[w];
6440 uint64_t unavailable_features = requested_features & ~host_feat;
6441 mark_unavailable_features(cpu, w, unavailable_features, prefix);
6442 }
6443
6444 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6445 kvm_enabled()) {
6446 KVMState *s = CPU(cpu)->kvm_state;
6447 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
6448 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
6449 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
6450 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
6451 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
6452
6453 if (!eax_0 ||
6454 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
6455 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
6456 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
6457 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
6458 INTEL_PT_ADDR_RANGES_NUM) ||
6459 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
6460 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
6461 (ecx_0 & INTEL_PT_IP_LIP)) {
6462 /*
6463 * Processor Trace capabilities aren't configurable, so if the
6464 * host can't emulate the capabilities we report on
6465 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
6466 */
6467 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
6468 }
6469 }
6470 }
6471
6472 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
6473 {
6474 CPUState *cs = CPU(dev);
6475 X86CPU *cpu = X86_CPU(dev);
6476 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6477 CPUX86State *env = &cpu->env;
6478 Error *local_err = NULL;
6479 static bool ht_warned;
6480
6481 if (xcc->host_cpuid_required) {
6482 if (!accel_uses_host_cpuid()) {
6483 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6484 error_setg(&local_err, "CPU model '%s' requires KVM", name);
6485 goto out;
6486 }
6487 }
6488
6489 if (cpu->max_features && accel_uses_host_cpuid()) {
6490 if (enable_cpu_pm) {
6491 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
6492 &cpu->mwait.ecx, &cpu->mwait.edx);
6493 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
6494 }
6495 if (kvm_enabled() && cpu->ucode_rev == 0) {
6496 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state,
6497 MSR_IA32_UCODE_REV);
6498 }
6499 }
6500
6501 if (cpu->ucode_rev == 0) {
6502 /* The default is the same as KVM's. */
6503 if (IS_AMD_CPU(env)) {
6504 cpu->ucode_rev = 0x01000065;
6505 } else {
6506 cpu->ucode_rev = 0x100000000ULL;
6507 }
6508 }
6509
6510 /* mwait extended info: needed for Core compatibility */
6511 /* We always wake on interrupt even if host does not have the capability */
6512 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
6513
6514 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
6515 error_setg(errp, "apic-id property was not initialized properly");
6516 return;
6517 }
6518
6519 x86_cpu_expand_features(cpu, &local_err);
6520 if (local_err) {
6521 goto out;
6522 }
6523
6524 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
6525
6526 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
6527 error_setg(&local_err,
6528 accel_uses_host_cpuid() ?
6529 "Host doesn't support requested features" :
6530 "TCG doesn't support requested features");
6531 goto out;
6532 }
6533
6534 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
6535 * CPUID[1].EDX.
6536 */
6537 if (IS_AMD_CPU(env)) {
6538 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
6539 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
6540 & CPUID_EXT2_AMD_ALIASES);
6541 }
6542
6543 /* For 64bit systems think about the number of physical bits to present.
6544 * ideally this should be the same as the host; anything other than matching
6545 * the host can cause incorrect guest behaviour.
6546 * QEMU used to pick the magic value of 40 bits that corresponds to
6547 * consumer AMD devices but nothing else.
6548 */
6549 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
6550 if (accel_uses_host_cpuid()) {
6551 uint32_t host_phys_bits = x86_host_phys_bits();
6552 static bool warned;
6553
6554 /* Print a warning if the user set it to a value that's not the
6555 * host value.
6556 */
6557 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
6558 !warned) {
6559 warn_report("Host physical bits (%u)"
6560 " does not match phys-bits property (%u)",
6561 host_phys_bits, cpu->phys_bits);
6562 warned = true;
6563 }
6564
6565 if (cpu->host_phys_bits) {
6566 /* The user asked for us to use the host physical bits */
6567 cpu->phys_bits = host_phys_bits;
6568 if (cpu->host_phys_bits_limit &&
6569 cpu->phys_bits > cpu->host_phys_bits_limit) {
6570 cpu->phys_bits = cpu->host_phys_bits_limit;
6571 }
6572 }
6573
6574 if (cpu->phys_bits &&
6575 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
6576 cpu->phys_bits < 32)) {
6577 error_setg(errp, "phys-bits should be between 32 and %u "
6578 " (but is %u)",
6579 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
6580 return;
6581 }
6582 } else {
6583 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
6584 error_setg(errp, "TCG only supports phys-bits=%u",
6585 TCG_PHYS_ADDR_BITS);
6586 return;
6587 }
6588 }
6589 /* 0 means it was not explicitly set by the user (or by machine
6590 * compat_props or by the host code above). In this case, the default
6591 * is the value used by TCG (40).
6592 */
6593 if (cpu->phys_bits == 0) {
6594 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
6595 }
6596 } else {
6597 /* For 32 bit systems don't use the user set value, but keep
6598 * phys_bits consistent with what we tell the guest.
6599 */
6600 if (cpu->phys_bits != 0) {
6601 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
6602 return;
6603 }
6604
6605 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
6606 cpu->phys_bits = 36;
6607 } else {
6608 cpu->phys_bits = 32;
6609 }
6610 }
6611
6612 /* Cache information initialization */
6613 if (!cpu->legacy_cache) {
6614 if (!xcc->model || !xcc->model->cpudef->cache_info) {
6615 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6616 error_setg(errp,
6617 "CPU model '%s' doesn't support legacy-cache=off", name);
6618 return;
6619 }
6620 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
6621 *xcc->model->cpudef->cache_info;
6622 } else {
6623 /* Build legacy cache information */
6624 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
6625 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
6626 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
6627 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
6628
6629 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
6630 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
6631 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
6632 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
6633
6634 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
6635 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
6636 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
6637 env->cache_info_amd.l3_cache = &legacy_l3_cache;
6638 }
6639
6640
6641 cpu_exec_realizefn(cs, &local_err);
6642 if (local_err != NULL) {
6643 error_propagate(errp, local_err);
6644 return;
6645 }
6646
6647 #ifndef CONFIG_USER_ONLY
6648 MachineState *ms = MACHINE(qdev_get_machine());
6649 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
6650
6651 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
6652 x86_cpu_apic_create(cpu, &local_err);
6653 if (local_err != NULL) {
6654 goto out;
6655 }
6656 }
6657 #endif
6658
6659 mce_init(cpu);
6660
6661 #ifndef CONFIG_USER_ONLY
6662 if (tcg_enabled()) {
6663 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
6664 cpu->cpu_as_root = g_new(MemoryRegion, 1);
6665
6666 /* Outer container... */
6667 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
6668 memory_region_set_enabled(cpu->cpu_as_root, true);
6669
6670 /* ... with two regions inside: normal system memory with low
6671 * priority, and...
6672 */
6673 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
6674 get_system_memory(), 0, ~0ull);
6675 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
6676 memory_region_set_enabled(cpu->cpu_as_mem, true);
6677
6678 cs->num_ases = 2;
6679 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
6680 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
6681
6682 /* ... SMRAM with higher priority, linked from /machine/smram. */
6683 cpu->machine_done.notify = x86_cpu_machine_done;
6684 qemu_add_machine_init_done_notifier(&cpu->machine_done);
6685 }
6686 #endif
6687
6688 qemu_init_vcpu(cs);
6689
6690 /*
6691 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
6692 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
6693 * based on inputs (sockets,cores,threads), it is still better to give
6694 * users a warning.
6695 *
6696 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
6697 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
6698 */
6699 if (IS_AMD_CPU(env) &&
6700 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
6701 cs->nr_threads > 1 && !ht_warned) {
6702 warn_report("This family of AMD CPU doesn't support "
6703 "hyperthreading(%d)",
6704 cs->nr_threads);
6705 error_printf("Please configure -smp options properly"
6706 " or try enabling topoext feature.\n");
6707 ht_warned = true;
6708 }
6709
6710 x86_cpu_apic_realize(cpu, &local_err);
6711 if (local_err != NULL) {
6712 goto out;
6713 }
6714 cpu_reset(cs);
6715
6716 xcc->parent_realize(dev, &local_err);
6717
6718 out:
6719 if (local_err != NULL) {
6720 error_propagate(errp, local_err);
6721 return;
6722 }
6723 }
6724
6725 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
6726 {
6727 X86CPU *cpu = X86_CPU(dev);
6728 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6729 Error *local_err = NULL;
6730
6731 #ifndef CONFIG_USER_ONLY
6732 cpu_remove_sync(CPU(dev));
6733 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
6734 #endif
6735
6736 if (cpu->apic_state) {
6737 object_unparent(OBJECT(cpu->apic_state));
6738 cpu->apic_state = NULL;
6739 }
6740
6741 xcc->parent_unrealize(dev, &local_err);
6742 if (local_err != NULL) {
6743 error_propagate(errp, local_err);
6744 return;
6745 }
6746 }
6747
6748 typedef struct BitProperty {
6749 FeatureWord w;
6750 uint64_t mask;
6751 } BitProperty;
6752
6753 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
6754 void *opaque, Error **errp)
6755 {
6756 X86CPU *cpu = X86_CPU(obj);
6757 BitProperty *fp = opaque;
6758 uint64_t f = cpu->env.features[fp->w];
6759 bool value = (f & fp->mask) == fp->mask;
6760 visit_type_bool(v, name, &value, errp);
6761 }
6762
6763 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
6764 void *opaque, Error **errp)
6765 {
6766 DeviceState *dev = DEVICE(obj);
6767 X86CPU *cpu = X86_CPU(obj);
6768 BitProperty *fp = opaque;
6769 Error *local_err = NULL;
6770 bool value;
6771
6772 if (dev->realized) {
6773 qdev_prop_set_after_realize(dev, name, errp);
6774 return;
6775 }
6776
6777 visit_type_bool(v, name, &value, &local_err);
6778 if (local_err) {
6779 error_propagate(errp, local_err);
6780 return;
6781 }
6782
6783 if (value) {
6784 cpu->env.features[fp->w] |= fp->mask;
6785 } else {
6786 cpu->env.features[fp->w] &= ~fp->mask;
6787 }
6788 cpu->env.user_features[fp->w] |= fp->mask;
6789 }
6790
6791 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
6792 void *opaque)
6793 {
6794 BitProperty *prop = opaque;
6795 g_free(prop);
6796 }
6797
6798 /* Register a boolean property to get/set a single bit in a uint32_t field.
6799 *
6800 * The same property name can be registered multiple times to make it affect
6801 * multiple bits in the same FeatureWord. In that case, the getter will return
6802 * true only if all bits are set.
6803 */
6804 static void x86_cpu_register_bit_prop(X86CPU *cpu,
6805 const char *prop_name,
6806 FeatureWord w,
6807 int bitnr)
6808 {
6809 BitProperty *fp;
6810 ObjectProperty *op;
6811 uint64_t mask = (1ULL << bitnr);
6812
6813 op = object_property_find(OBJECT(cpu), prop_name, NULL);
6814 if (op) {
6815 fp = op->opaque;
6816 assert(fp->w == w);
6817 fp->mask |= mask;
6818 } else {
6819 fp = g_new0(BitProperty, 1);
6820 fp->w = w;
6821 fp->mask = mask;
6822 object_property_add(OBJECT(cpu), prop_name, "bool",
6823 x86_cpu_get_bit_prop,
6824 x86_cpu_set_bit_prop,
6825 x86_cpu_release_bit_prop, fp);
6826 }
6827 }
6828
6829 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
6830 FeatureWord w,
6831 int bitnr)
6832 {
6833 FeatureWordInfo *fi = &feature_word_info[w];
6834 const char *name = fi->feat_names[bitnr];
6835
6836 if (!name) {
6837 return;
6838 }
6839
6840 /* Property names should use "-" instead of "_".
6841 * Old names containing underscores are registered as aliases
6842 * using object_property_add_alias()
6843 */
6844 assert(!strchr(name, '_'));
6845 /* aliases don't use "|" delimiters anymore, they are registered
6846 * manually using object_property_add_alias() */
6847 assert(!strchr(name, '|'));
6848 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
6849 }
6850
6851 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
6852 {
6853 X86CPU *cpu = X86_CPU(cs);
6854 CPUX86State *env = &cpu->env;
6855 GuestPanicInformation *panic_info = NULL;
6856
6857 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
6858 panic_info = g_malloc0(sizeof(GuestPanicInformation));
6859
6860 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
6861
6862 assert(HV_CRASH_PARAMS >= 5);
6863 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
6864 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
6865 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
6866 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
6867 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
6868 }
6869
6870 return panic_info;
6871 }
6872 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
6873 const char *name, void *opaque,
6874 Error **errp)
6875 {
6876 CPUState *cs = CPU(obj);
6877 GuestPanicInformation *panic_info;
6878
6879 if (!cs->crash_occurred) {
6880 error_setg(errp, "No crash occured");
6881 return;
6882 }
6883
6884 panic_info = x86_cpu_get_crash_info(cs);
6885 if (panic_info == NULL) {
6886 error_setg(errp, "No crash information");
6887 return;
6888 }
6889
6890 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
6891 errp);
6892 qapi_free_GuestPanicInformation(panic_info);
6893 }
6894
6895 static void x86_cpu_initfn(Object *obj)
6896 {
6897 X86CPU *cpu = X86_CPU(obj);
6898 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
6899 CPUX86State *env = &cpu->env;
6900 FeatureWord w;
6901
6902 env->nr_dies = 1;
6903 env->nr_nodes = 1;
6904 cpu_set_cpustate_pointers(cpu);
6905
6906 object_property_add(obj, "family", "int",
6907 x86_cpuid_version_get_family,
6908 x86_cpuid_version_set_family, NULL, NULL);
6909 object_property_add(obj, "model", "int",
6910 x86_cpuid_version_get_model,
6911 x86_cpuid_version_set_model, NULL, NULL);
6912 object_property_add(obj, "stepping", "int",
6913 x86_cpuid_version_get_stepping,
6914 x86_cpuid_version_set_stepping, NULL, NULL);
6915 object_property_add_str(obj, "vendor",
6916 x86_cpuid_get_vendor,
6917 x86_cpuid_set_vendor);
6918 object_property_add_str(obj, "model-id",
6919 x86_cpuid_get_model_id,
6920 x86_cpuid_set_model_id);
6921 object_property_add(obj, "tsc-frequency", "int",
6922 x86_cpuid_get_tsc_freq,
6923 x86_cpuid_set_tsc_freq, NULL, NULL);
6924 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
6925 x86_cpu_get_feature_words,
6926 NULL, NULL, (void *)env->features);
6927 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
6928 x86_cpu_get_feature_words,
6929 NULL, NULL, (void *)cpu->filtered_features);
6930 /*
6931 * The "unavailable-features" property has the same semantics as
6932 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
6933 * QMP command: they list the features that would have prevented the
6934 * CPU from running if the "enforce" flag was set.
6935 */
6936 object_property_add(obj, "unavailable-features", "strList",
6937 x86_cpu_get_unavailable_features,
6938 NULL, NULL, NULL);
6939
6940 object_property_add(obj, "crash-information", "GuestPanicInformation",
6941 x86_cpu_get_crash_info_qom, NULL, NULL, NULL);
6942
6943 for (w = 0; w < FEATURE_WORDS; w++) {
6944 int bitnr;
6945
6946 for (bitnr = 0; bitnr < 64; bitnr++) {
6947 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
6948 }
6949 }
6950
6951 object_property_add_alias(obj, "sse3", obj, "pni");
6952 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq");
6953 object_property_add_alias(obj, "sse4-1", obj, "sse4.1");
6954 object_property_add_alias(obj, "sse4-2", obj, "sse4.2");
6955 object_property_add_alias(obj, "xd", obj, "nx");
6956 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt");
6957 object_property_add_alias(obj, "i64", obj, "lm");
6958
6959 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl");
6960 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust");
6961 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt");
6962 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm");
6963 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy");
6964 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr");
6965 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core");
6966 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb");
6967 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay");
6968 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu");
6969 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf");
6970 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time");
6971 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi");
6972 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt");
6973 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control");
6974 object_property_add_alias(obj, "svm_lock", obj, "svm-lock");
6975 object_property_add_alias(obj, "nrip_save", obj, "nrip-save");
6976 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale");
6977 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean");
6978 object_property_add_alias(obj, "pause_filter", obj, "pause-filter");
6979 object_property_add_alias(obj, "sse4_1", obj, "sse4.1");
6980 object_property_add_alias(obj, "sse4_2", obj, "sse4.2");
6981
6982 if (xcc->model) {
6983 x86_cpu_load_model(cpu, xcc->model, &error_abort);
6984 }
6985 }
6986
6987 static int64_t x86_cpu_get_arch_id(CPUState *cs)
6988 {
6989 X86CPU *cpu = X86_CPU(cs);
6990
6991 return cpu->apic_id;
6992 }
6993
6994 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
6995 {
6996 X86CPU *cpu = X86_CPU(cs);
6997
6998 return cpu->env.cr[0] & CR0_PG_MASK;
6999 }
7000
7001 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
7002 {
7003 X86CPU *cpu = X86_CPU(cs);
7004
7005 cpu->env.eip = value;
7006 }
7007
7008 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
7009 {
7010 X86CPU *cpu = X86_CPU(cs);
7011
7012 cpu->env.eip = tb->pc - tb->cs_base;
7013 }
7014
7015 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
7016 {
7017 X86CPU *cpu = X86_CPU(cs);
7018 CPUX86State *env = &cpu->env;
7019
7020 #if !defined(CONFIG_USER_ONLY)
7021 if (interrupt_request & CPU_INTERRUPT_POLL) {
7022 return CPU_INTERRUPT_POLL;
7023 }
7024 #endif
7025 if (interrupt_request & CPU_INTERRUPT_SIPI) {
7026 return CPU_INTERRUPT_SIPI;
7027 }
7028
7029 if (env->hflags2 & HF2_GIF_MASK) {
7030 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
7031 !(env->hflags & HF_SMM_MASK)) {
7032 return CPU_INTERRUPT_SMI;
7033 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
7034 !(env->hflags2 & HF2_NMI_MASK)) {
7035 return CPU_INTERRUPT_NMI;
7036 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
7037 return CPU_INTERRUPT_MCE;
7038 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
7039 (((env->hflags2 & HF2_VINTR_MASK) &&
7040 (env->hflags2 & HF2_HIF_MASK)) ||
7041 (!(env->hflags2 & HF2_VINTR_MASK) &&
7042 (env->eflags & IF_MASK &&
7043 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
7044 return CPU_INTERRUPT_HARD;
7045 #if !defined(CONFIG_USER_ONLY)
7046 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
7047 (env->eflags & IF_MASK) &&
7048 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
7049 return CPU_INTERRUPT_VIRQ;
7050 #endif
7051 }
7052 }
7053
7054 return 0;
7055 }
7056
7057 static bool x86_cpu_has_work(CPUState *cs)
7058 {
7059 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
7060 }
7061
7062 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
7063 {
7064 X86CPU *cpu = X86_CPU(cs);
7065 CPUX86State *env = &cpu->env;
7066
7067 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
7068 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
7069 : bfd_mach_i386_i8086);
7070 info->print_insn = print_insn_i386;
7071
7072 info->cap_arch = CS_ARCH_X86;
7073 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
7074 : env->hflags & HF_CS32_MASK ? CS_MODE_32
7075 : CS_MODE_16);
7076 info->cap_insn_unit = 1;
7077 info->cap_insn_split = 8;
7078 }
7079
7080 void x86_update_hflags(CPUX86State *env)
7081 {
7082 uint32_t hflags;
7083 #define HFLAG_COPY_MASK \
7084 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
7085 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
7086 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
7087 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
7088
7089 hflags = env->hflags & HFLAG_COPY_MASK;
7090 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
7091 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
7092 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
7093 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
7094 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
7095
7096 if (env->cr[4] & CR4_OSFXSR_MASK) {
7097 hflags |= HF_OSFXSR_MASK;
7098 }
7099
7100 if (env->efer & MSR_EFER_LMA) {
7101 hflags |= HF_LMA_MASK;
7102 }
7103
7104 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
7105 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
7106 } else {
7107 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
7108 (DESC_B_SHIFT - HF_CS32_SHIFT);
7109 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
7110 (DESC_B_SHIFT - HF_SS32_SHIFT);
7111 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
7112 !(hflags & HF_CS32_MASK)) {
7113 hflags |= HF_ADDSEG_MASK;
7114 } else {
7115 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
7116 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
7117 }
7118 }
7119 env->hflags = hflags;
7120 }
7121
7122 static Property x86_cpu_properties[] = {
7123 #ifdef CONFIG_USER_ONLY
7124 /* apic_id = 0 by default for *-user, see commit 9886e834 */
7125 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
7126 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
7127 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
7128 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
7129 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
7130 #else
7131 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
7132 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
7133 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
7134 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
7135 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
7136 #endif
7137 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
7138 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
7139
7140 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
7141 HYPERV_SPINLOCK_NEVER_RETRY),
7142 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
7143 HYPERV_FEAT_RELAXED, 0),
7144 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
7145 HYPERV_FEAT_VAPIC, 0),
7146 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
7147 HYPERV_FEAT_TIME, 0),
7148 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
7149 HYPERV_FEAT_CRASH, 0),
7150 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
7151 HYPERV_FEAT_RESET, 0),
7152 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
7153 HYPERV_FEAT_VPINDEX, 0),
7154 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
7155 HYPERV_FEAT_RUNTIME, 0),
7156 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
7157 HYPERV_FEAT_SYNIC, 0),
7158 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
7159 HYPERV_FEAT_STIMER, 0),
7160 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
7161 HYPERV_FEAT_FREQUENCIES, 0),
7162 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
7163 HYPERV_FEAT_REENLIGHTENMENT, 0),
7164 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
7165 HYPERV_FEAT_TLBFLUSH, 0),
7166 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
7167 HYPERV_FEAT_EVMCS, 0),
7168 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
7169 HYPERV_FEAT_IPI, 0),
7170 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
7171 HYPERV_FEAT_STIMER_DIRECT, 0),
7172 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
7173 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
7174 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
7175
7176 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
7177 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
7178 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
7179 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
7180 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
7181 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
7182 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
7183 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
7184 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
7185 UINT32_MAX),
7186 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
7187 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
7188 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
7189 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
7190 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
7191 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
7192 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
7193 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
7194 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
7195 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
7196 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
7197 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
7198 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
7199 false),
7200 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
7201 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
7202 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
7203 true),
7204 /*
7205 * lecacy_cache defaults to true unless the CPU model provides its
7206 * own cache information (see x86_cpu_load_def()).
7207 */
7208 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
7209
7210 /*
7211 * From "Requirements for Implementing the Microsoft
7212 * Hypervisor Interface":
7213 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
7214 *
7215 * "Starting with Windows Server 2012 and Windows 8, if
7216 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
7217 * the hypervisor imposes no specific limit to the number of VPs.
7218 * In this case, Windows Server 2012 guest VMs may use more than
7219 * 64 VPs, up to the maximum supported number of processors applicable
7220 * to the specific Windows version being used."
7221 */
7222 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
7223 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
7224 false),
7225 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
7226 true),
7227 DEFINE_PROP_END_OF_LIST()
7228 };
7229
7230 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
7231 {
7232 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7233 CPUClass *cc = CPU_CLASS(oc);
7234 DeviceClass *dc = DEVICE_CLASS(oc);
7235
7236 device_class_set_parent_realize(dc, x86_cpu_realizefn,
7237 &xcc->parent_realize);
7238 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
7239 &xcc->parent_unrealize);
7240 device_class_set_props(dc, x86_cpu_properties);
7241
7242 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset);
7243 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
7244
7245 cc->class_by_name = x86_cpu_class_by_name;
7246 cc->parse_features = x86_cpu_parse_featurestr;
7247 cc->has_work = x86_cpu_has_work;
7248 #ifdef CONFIG_TCG
7249 cc->do_interrupt = x86_cpu_do_interrupt;
7250 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
7251 #endif
7252 cc->dump_state = x86_cpu_dump_state;
7253 cc->get_crash_info = x86_cpu_get_crash_info;
7254 cc->set_pc = x86_cpu_set_pc;
7255 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
7256 cc->gdb_read_register = x86_cpu_gdb_read_register;
7257 cc->gdb_write_register = x86_cpu_gdb_write_register;
7258 cc->get_arch_id = x86_cpu_get_arch_id;
7259 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
7260 #ifndef CONFIG_USER_ONLY
7261 cc->asidx_from_attrs = x86_asidx_from_attrs;
7262 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
7263 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
7264 cc->write_elf64_note = x86_cpu_write_elf64_note;
7265 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
7266 cc->write_elf32_note = x86_cpu_write_elf32_note;
7267 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
7268 cc->vmsd = &vmstate_x86_cpu;
7269 #endif
7270 cc->gdb_arch_name = x86_gdb_arch_name;
7271 #ifdef TARGET_X86_64
7272 cc->gdb_core_xml_file = "i386-64bit.xml";
7273 cc->gdb_num_core_regs = 66;
7274 #else
7275 cc->gdb_core_xml_file = "i386-32bit.xml";
7276 cc->gdb_num_core_regs = 50;
7277 #endif
7278 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
7279 cc->debug_excp_handler = breakpoint_handler;
7280 #endif
7281 cc->cpu_exec_enter = x86_cpu_exec_enter;
7282 cc->cpu_exec_exit = x86_cpu_exec_exit;
7283 #ifdef CONFIG_TCG
7284 cc->tcg_initialize = tcg_x86_init;
7285 cc->tlb_fill = x86_cpu_tlb_fill;
7286 #endif
7287 cc->disas_set_info = x86_disas_set_info;
7288
7289 dc->user_creatable = true;
7290 }
7291
7292 static const TypeInfo x86_cpu_type_info = {
7293 .name = TYPE_X86_CPU,
7294 .parent = TYPE_CPU,
7295 .instance_size = sizeof(X86CPU),
7296 .instance_init = x86_cpu_initfn,
7297 .abstract = true,
7298 .class_size = sizeof(X86CPUClass),
7299 .class_init = x86_cpu_common_class_init,
7300 };
7301
7302
7303 /* "base" CPU model, used by query-cpu-model-expansion */
7304 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
7305 {
7306 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7307
7308 xcc->static_model = true;
7309 xcc->migration_safe = true;
7310 xcc->model_description = "base CPU model type with no features enabled";
7311 xcc->ordering = 8;
7312 }
7313
7314 static const TypeInfo x86_base_cpu_type_info = {
7315 .name = X86_CPU_TYPE_NAME("base"),
7316 .parent = TYPE_X86_CPU,
7317 .class_init = x86_cpu_base_class_init,
7318 };
7319
7320 static void x86_cpu_register_types(void)
7321 {
7322 int i;
7323
7324 type_register_static(&x86_cpu_type_info);
7325 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
7326 x86_register_cpudef_types(&builtin_x86_defs[i]);
7327 }
7328 type_register_static(&max_x86_cpu_type_info);
7329 type_register_static(&x86_base_cpu_type_info);
7330 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
7331 type_register_static(&host_x86_cpu_type_info);
7332 #endif
7333 }
7334
7335 type_init(x86_cpu_register_types)