]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
target/i386: set the CPUID level to 0x14 on old machine-type
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/hvf.h"
31 #include "sysemu/cpus.h"
32 #include "kvm_i386.h"
33 #include "sev_i386.h"
34
35 #include "qemu/error-report.h"
36 #include "qemu/module.h"
37 #include "qemu/option.h"
38 #include "qemu/config-file.h"
39 #include "qapi/error.h"
40 #include "qapi/qapi-visit-machine.h"
41 #include "qapi/qapi-visit-run-state.h"
42 #include "qapi/qmp/qdict.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/visitor.h"
45 #include "qom/qom-qobject.h"
46 #include "sysemu/arch_init.h"
47 #include "qapi/qapi-commands-machine-target.h"
48
49 #include "standard-headers/asm-x86/kvm_para.h"
50
51 #include "sysemu/sysemu.h"
52 #include "sysemu/tcg.h"
53 #include "hw/qdev-properties.h"
54 #include "hw/i386/topology.h"
55 #ifndef CONFIG_USER_ONLY
56 #include "exec/address-spaces.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /* Encode cache info for CPUID[8000001D] */
342 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
343 X86CPUTopoInfo *topo_info,
344 uint32_t *eax, uint32_t *ebx,
345 uint32_t *ecx, uint32_t *edx)
346 {
347 uint32_t l3_cores;
348 unsigned nodes = MAX(topo_info->nodes_per_pkg, 1);
349
350 assert(cache->size == cache->line_size * cache->associativity *
351 cache->partitions * cache->sets);
352
353 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
354 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
355
356 /* L3 is shared among multiple cores */
357 if (cache->level == 3) {
358 l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg *
359 topo_info->cores_per_die *
360 topo_info->threads_per_core),
361 nodes);
362 *eax |= (l3_cores - 1) << 14;
363 } else {
364 *eax |= ((topo_info->threads_per_core - 1) << 14);
365 }
366
367 assert(cache->line_size > 0);
368 assert(cache->partitions > 0);
369 assert(cache->associativity > 0);
370 /* We don't implement fully-associative caches */
371 assert(cache->associativity < cache->sets);
372 *ebx = (cache->line_size - 1) |
373 ((cache->partitions - 1) << 12) |
374 ((cache->associativity - 1) << 22);
375
376 assert(cache->sets > 0);
377 *ecx = cache->sets - 1;
378
379 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
380 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
381 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
382 }
383
384 /* Encode cache info for CPUID[8000001E] */
385 static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu,
386 uint32_t *eax, uint32_t *ebx,
387 uint32_t *ecx, uint32_t *edx)
388 {
389 X86CPUTopoIDs topo_ids = {0};
390 unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1);
391 int shift;
392
393 x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids);
394
395 *eax = cpu->apic_id;
396 /*
397 * CPUID_Fn8000001E_EBX
398 * 31:16 Reserved
399 * 15:8 Threads per core (The number of threads per core is
400 * Threads per core + 1)
401 * 7:0 Core id (see bit decoding below)
402 * SMT:
403 * 4:3 node id
404 * 2 Core complex id
405 * 1:0 Core id
406 * Non SMT:
407 * 5:4 node id
408 * 3 Core complex id
409 * 1:0 Core id
410 */
411 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) |
412 (topo_ids.core_id);
413 /*
414 * CPUID_Fn8000001E_ECX
415 * 31:11 Reserved
416 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
417 * 7:0 Node id (see bit decoding below)
418 * 2 Socket id
419 * 1:0 Node id
420 */
421 if (nodes <= 4) {
422 *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id;
423 } else {
424 /*
425 * Node id fix up. Actual hardware supports up to 4 nodes. But with
426 * more than 32 cores, we may end up with more than 4 nodes.
427 * Node id is a combination of socket id and node id. Only requirement
428 * here is that this number should be unique accross the system.
429 * Shift the socket id to accommodate more nodes. We dont expect both
430 * socket id and node id to be big number at the same time. This is not
431 * an ideal config but we need to to support it. Max nodes we can have
432 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
433 * 5 bits for nodes. Find the left most set bit to represent the total
434 * number of nodes. find_last_bit returns last set bit(0 based). Left
435 * shift(+1) the socket id to represent all the nodes.
436 */
437 nodes -= 1;
438 shift = find_last_bit(&nodes, 8);
439 *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) |
440 topo_ids.node_id;
441 }
442 *edx = 0;
443 }
444
445 /*
446 * Definitions of the hardcoded cache entries we expose:
447 * These are legacy cache values. If there is a need to change any
448 * of these values please use builtin_x86_defs
449 */
450
451 /* L1 data cache: */
452 static CPUCacheInfo legacy_l1d_cache = {
453 .type = DATA_CACHE,
454 .level = 1,
455 .size = 32 * KiB,
456 .self_init = 1,
457 .line_size = 64,
458 .associativity = 8,
459 .sets = 64,
460 .partitions = 1,
461 .no_invd_sharing = true,
462 };
463
464 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
465 static CPUCacheInfo legacy_l1d_cache_amd = {
466 .type = DATA_CACHE,
467 .level = 1,
468 .size = 64 * KiB,
469 .self_init = 1,
470 .line_size = 64,
471 .associativity = 2,
472 .sets = 512,
473 .partitions = 1,
474 .lines_per_tag = 1,
475 .no_invd_sharing = true,
476 };
477
478 /* L1 instruction cache: */
479 static CPUCacheInfo legacy_l1i_cache = {
480 .type = INSTRUCTION_CACHE,
481 .level = 1,
482 .size = 32 * KiB,
483 .self_init = 1,
484 .line_size = 64,
485 .associativity = 8,
486 .sets = 64,
487 .partitions = 1,
488 .no_invd_sharing = true,
489 };
490
491 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
492 static CPUCacheInfo legacy_l1i_cache_amd = {
493 .type = INSTRUCTION_CACHE,
494 .level = 1,
495 .size = 64 * KiB,
496 .self_init = 1,
497 .line_size = 64,
498 .associativity = 2,
499 .sets = 512,
500 .partitions = 1,
501 .lines_per_tag = 1,
502 .no_invd_sharing = true,
503 };
504
505 /* Level 2 unified cache: */
506 static CPUCacheInfo legacy_l2_cache = {
507 .type = UNIFIED_CACHE,
508 .level = 2,
509 .size = 4 * MiB,
510 .self_init = 1,
511 .line_size = 64,
512 .associativity = 16,
513 .sets = 4096,
514 .partitions = 1,
515 .no_invd_sharing = true,
516 };
517
518 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
519 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
520 .type = UNIFIED_CACHE,
521 .level = 2,
522 .size = 2 * MiB,
523 .line_size = 64,
524 .associativity = 8,
525 };
526
527
528 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
529 static CPUCacheInfo legacy_l2_cache_amd = {
530 .type = UNIFIED_CACHE,
531 .level = 2,
532 .size = 512 * KiB,
533 .line_size = 64,
534 .lines_per_tag = 1,
535 .associativity = 16,
536 .sets = 512,
537 .partitions = 1,
538 };
539
540 /* Level 3 unified cache: */
541 static CPUCacheInfo legacy_l3_cache = {
542 .type = UNIFIED_CACHE,
543 .level = 3,
544 .size = 16 * MiB,
545 .line_size = 64,
546 .associativity = 16,
547 .sets = 16384,
548 .partitions = 1,
549 .lines_per_tag = 1,
550 .self_init = true,
551 .inclusive = true,
552 .complex_indexing = true,
553 };
554
555 /* TLB definitions: */
556
557 #define L1_DTLB_2M_ASSOC 1
558 #define L1_DTLB_2M_ENTRIES 255
559 #define L1_DTLB_4K_ASSOC 1
560 #define L1_DTLB_4K_ENTRIES 255
561
562 #define L1_ITLB_2M_ASSOC 1
563 #define L1_ITLB_2M_ENTRIES 255
564 #define L1_ITLB_4K_ASSOC 1
565 #define L1_ITLB_4K_ENTRIES 255
566
567 #define L2_DTLB_2M_ASSOC 0 /* disabled */
568 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
569 #define L2_DTLB_4K_ASSOC 4
570 #define L2_DTLB_4K_ENTRIES 512
571
572 #define L2_ITLB_2M_ASSOC 0 /* disabled */
573 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
574 #define L2_ITLB_4K_ASSOC 4
575 #define L2_ITLB_4K_ENTRIES 512
576
577 /* CPUID Leaf 0x14 constants: */
578 #define INTEL_PT_MAX_SUBLEAF 0x1
579 /*
580 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
581 * MSR can be accessed;
582 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
583 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
584 * of Intel PT MSRs across warm reset;
585 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
586 */
587 #define INTEL_PT_MINIMAL_EBX 0xf
588 /*
589 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
590 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
591 * accessed;
592 * bit[01]: ToPA tables can hold any number of output entries, up to the
593 * maximum allowed by the MaskOrTableOffset field of
594 * IA32_RTIT_OUTPUT_MASK_PTRS;
595 * bit[02]: Support Single-Range Output scheme;
596 */
597 #define INTEL_PT_MINIMAL_ECX 0x7
598 /* generated packets which contain IP payloads have LIP values */
599 #define INTEL_PT_IP_LIP (1 << 31)
600 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
601 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
602 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
603 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
604 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
605
606 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
607 uint32_t vendor2, uint32_t vendor3)
608 {
609 int i;
610 for (i = 0; i < 4; i++) {
611 dst[i] = vendor1 >> (8 * i);
612 dst[i + 4] = vendor2 >> (8 * i);
613 dst[i + 8] = vendor3 >> (8 * i);
614 }
615 dst[CPUID_VENDOR_SZ] = '\0';
616 }
617
618 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
619 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
620 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
621 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
622 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
623 CPUID_PSE36 | CPUID_FXSR)
624 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
625 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
626 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
627 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
628 CPUID_PAE | CPUID_SEP | CPUID_APIC)
629
630 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
631 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
632 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
633 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
634 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
635 /* partly implemented:
636 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
637 /* missing:
638 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
639 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
640 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
641 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
642 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
643 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
644 CPUID_EXT_RDRAND)
645 /* missing:
646 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
647 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
648 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
649 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
650 CPUID_EXT_F16C */
651
652 #ifdef TARGET_X86_64
653 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
654 #else
655 #define TCG_EXT2_X86_64_FEATURES 0
656 #endif
657
658 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
659 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
660 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
661 TCG_EXT2_X86_64_FEATURES)
662 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
663 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
664 #define TCG_EXT4_FEATURES 0
665 #define TCG_SVM_FEATURES CPUID_SVM_NPT
666 #define TCG_KVM_FEATURES 0
667 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
668 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
669 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
670 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
671 CPUID_7_0_EBX_ERMS)
672 /* missing:
673 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
674 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
675 CPUID_7_0_EBX_RDSEED */
676 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
677 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
678 CPUID_7_0_ECX_LA57)
679 #define TCG_7_0_EDX_FEATURES 0
680 #define TCG_7_1_EAX_FEATURES 0
681 #define TCG_APM_FEATURES 0
682 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
683 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
684 /* missing:
685 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
686
687 typedef enum FeatureWordType {
688 CPUID_FEATURE_WORD,
689 MSR_FEATURE_WORD,
690 } FeatureWordType;
691
692 typedef struct FeatureWordInfo {
693 FeatureWordType type;
694 /* feature flags names are taken from "Intel Processor Identification and
695 * the CPUID Instruction" and AMD's "CPUID Specification".
696 * In cases of disagreement between feature naming conventions,
697 * aliases may be added.
698 */
699 const char *feat_names[64];
700 union {
701 /* If type==CPUID_FEATURE_WORD */
702 struct {
703 uint32_t eax; /* Input EAX for CPUID */
704 bool needs_ecx; /* CPUID instruction uses ECX as input */
705 uint32_t ecx; /* Input ECX value for CPUID */
706 int reg; /* output register (R_* constant) */
707 } cpuid;
708 /* If type==MSR_FEATURE_WORD */
709 struct {
710 uint32_t index;
711 } msr;
712 };
713 uint64_t tcg_features; /* Feature flags supported by TCG */
714 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
715 uint64_t migratable_flags; /* Feature flags known to be migratable */
716 /* Features that shouldn't be auto-enabled by "-cpu host" */
717 uint64_t no_autoenable_flags;
718 } FeatureWordInfo;
719
720 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
721 [FEAT_1_EDX] = {
722 .type = CPUID_FEATURE_WORD,
723 .feat_names = {
724 "fpu", "vme", "de", "pse",
725 "tsc", "msr", "pae", "mce",
726 "cx8", "apic", NULL, "sep",
727 "mtrr", "pge", "mca", "cmov",
728 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
729 NULL, "ds" /* Intel dts */, "acpi", "mmx",
730 "fxsr", "sse", "sse2", "ss",
731 "ht" /* Intel htt */, "tm", "ia64", "pbe",
732 },
733 .cpuid = {.eax = 1, .reg = R_EDX, },
734 .tcg_features = TCG_FEATURES,
735 },
736 [FEAT_1_ECX] = {
737 .type = CPUID_FEATURE_WORD,
738 .feat_names = {
739 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
740 "ds-cpl", "vmx", "smx", "est",
741 "tm2", "ssse3", "cid", NULL,
742 "fma", "cx16", "xtpr", "pdcm",
743 NULL, "pcid", "dca", "sse4.1",
744 "sse4.2", "x2apic", "movbe", "popcnt",
745 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
746 "avx", "f16c", "rdrand", "hypervisor",
747 },
748 .cpuid = { .eax = 1, .reg = R_ECX, },
749 .tcg_features = TCG_EXT_FEATURES,
750 },
751 /* Feature names that are already defined on feature_name[] but
752 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
753 * names on feat_names below. They are copied automatically
754 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
755 */
756 [FEAT_8000_0001_EDX] = {
757 .type = CPUID_FEATURE_WORD,
758 .feat_names = {
759 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
760 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
761 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
762 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
763 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
764 "nx", NULL, "mmxext", NULL /* mmx */,
765 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
766 NULL, "lm", "3dnowext", "3dnow",
767 },
768 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
769 .tcg_features = TCG_EXT2_FEATURES,
770 },
771 [FEAT_8000_0001_ECX] = {
772 .type = CPUID_FEATURE_WORD,
773 .feat_names = {
774 "lahf-lm", "cmp-legacy", "svm", "extapic",
775 "cr8legacy", "abm", "sse4a", "misalignsse",
776 "3dnowprefetch", "osvw", "ibs", "xop",
777 "skinit", "wdt", NULL, "lwp",
778 "fma4", "tce", NULL, "nodeid-msr",
779 NULL, "tbm", "topoext", "perfctr-core",
780 "perfctr-nb", NULL, NULL, NULL,
781 NULL, NULL, NULL, NULL,
782 },
783 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
784 .tcg_features = TCG_EXT3_FEATURES,
785 /*
786 * TOPOEXT is always allowed but can't be enabled blindly by
787 * "-cpu host", as it requires consistent cache topology info
788 * to be provided so it doesn't confuse guests.
789 */
790 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
791 },
792 [FEAT_C000_0001_EDX] = {
793 .type = CPUID_FEATURE_WORD,
794 .feat_names = {
795 NULL, NULL, "xstore", "xstore-en",
796 NULL, NULL, "xcrypt", "xcrypt-en",
797 "ace2", "ace2-en", "phe", "phe-en",
798 "pmm", "pmm-en", NULL, NULL,
799 NULL, NULL, NULL, NULL,
800 NULL, NULL, NULL, NULL,
801 NULL, NULL, NULL, NULL,
802 NULL, NULL, NULL, NULL,
803 },
804 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
805 .tcg_features = TCG_EXT4_FEATURES,
806 },
807 [FEAT_KVM] = {
808 .type = CPUID_FEATURE_WORD,
809 .feat_names = {
810 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
811 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
812 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
813 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
814 NULL, NULL, NULL, NULL,
815 NULL, NULL, NULL, NULL,
816 "kvmclock-stable-bit", NULL, NULL, NULL,
817 NULL, NULL, NULL, NULL,
818 },
819 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
820 .tcg_features = TCG_KVM_FEATURES,
821 },
822 [FEAT_KVM_HINTS] = {
823 .type = CPUID_FEATURE_WORD,
824 .feat_names = {
825 "kvm-hint-dedicated", NULL, NULL, NULL,
826 NULL, NULL, NULL, NULL,
827 NULL, NULL, NULL, NULL,
828 NULL, NULL, NULL, NULL,
829 NULL, NULL, NULL, NULL,
830 NULL, NULL, NULL, NULL,
831 NULL, NULL, NULL, NULL,
832 NULL, NULL, NULL, NULL,
833 },
834 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
835 .tcg_features = TCG_KVM_FEATURES,
836 /*
837 * KVM hints aren't auto-enabled by -cpu host, they need to be
838 * explicitly enabled in the command-line.
839 */
840 .no_autoenable_flags = ~0U,
841 },
842 /*
843 * .feat_names are commented out for Hyper-V enlightenments because we
844 * don't want to have two different ways for enabling them on QEMU command
845 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
846 * enabling several feature bits simultaneously, exposing these bits
847 * individually may just confuse guests.
848 */
849 [FEAT_HYPERV_EAX] = {
850 .type = CPUID_FEATURE_WORD,
851 .feat_names = {
852 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
853 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
854 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
855 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
856 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
857 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
858 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
859 NULL, NULL,
860 NULL, NULL, NULL, NULL,
861 NULL, NULL, NULL, NULL,
862 NULL, NULL, NULL, NULL,
863 NULL, NULL, NULL, NULL,
864 },
865 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
866 },
867 [FEAT_HYPERV_EBX] = {
868 .type = CPUID_FEATURE_WORD,
869 .feat_names = {
870 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
871 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
872 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
873 NULL /* hv_create_port */, NULL /* hv_connect_port */,
874 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
875 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
876 NULL, NULL,
877 NULL, NULL, NULL, NULL,
878 NULL, NULL, NULL, NULL,
879 NULL, NULL, NULL, NULL,
880 NULL, NULL, NULL, NULL,
881 },
882 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
883 },
884 [FEAT_HYPERV_EDX] = {
885 .type = CPUID_FEATURE_WORD,
886 .feat_names = {
887 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
888 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
889 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
890 NULL, NULL,
891 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 },
898 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
899 },
900 [FEAT_HV_RECOMM_EAX] = {
901 .type = CPUID_FEATURE_WORD,
902 .feat_names = {
903 NULL /* hv_recommend_pv_as_switch */,
904 NULL /* hv_recommend_pv_tlbflush_local */,
905 NULL /* hv_recommend_pv_tlbflush_remote */,
906 NULL /* hv_recommend_msr_apic_access */,
907 NULL /* hv_recommend_msr_reset */,
908 NULL /* hv_recommend_relaxed_timing */,
909 NULL /* hv_recommend_dma_remapping */,
910 NULL /* hv_recommend_int_remapping */,
911 NULL /* hv_recommend_x2apic_msrs */,
912 NULL /* hv_recommend_autoeoi_deprecation */,
913 NULL /* hv_recommend_pv_ipi */,
914 NULL /* hv_recommend_ex_hypercalls */,
915 NULL /* hv_hypervisor_is_nested */,
916 NULL /* hv_recommend_int_mbec */,
917 NULL /* hv_recommend_evmcs */,
918 NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 },
924 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
925 },
926 [FEAT_HV_NESTED_EAX] = {
927 .type = CPUID_FEATURE_WORD,
928 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
929 },
930 [FEAT_SVM] = {
931 .type = CPUID_FEATURE_WORD,
932 .feat_names = {
933 "npt", "lbrv", "svm-lock", "nrip-save",
934 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
935 NULL, NULL, "pause-filter", NULL,
936 "pfthreshold", NULL, NULL, NULL,
937 NULL, NULL, NULL, NULL,
938 NULL, NULL, NULL, NULL,
939 NULL, NULL, NULL, NULL,
940 NULL, NULL, NULL, NULL,
941 },
942 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
943 .tcg_features = TCG_SVM_FEATURES,
944 },
945 [FEAT_7_0_EBX] = {
946 .type = CPUID_FEATURE_WORD,
947 .feat_names = {
948 "fsgsbase", "tsc-adjust", NULL, "bmi1",
949 "hle", "avx2", NULL, "smep",
950 "bmi2", "erms", "invpcid", "rtm",
951 NULL, NULL, "mpx", NULL,
952 "avx512f", "avx512dq", "rdseed", "adx",
953 "smap", "avx512ifma", "pcommit", "clflushopt",
954 "clwb", "intel-pt", "avx512pf", "avx512er",
955 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
956 },
957 .cpuid = {
958 .eax = 7,
959 .needs_ecx = true, .ecx = 0,
960 .reg = R_EBX,
961 },
962 .tcg_features = TCG_7_0_EBX_FEATURES,
963 },
964 [FEAT_7_0_ECX] = {
965 .type = CPUID_FEATURE_WORD,
966 .feat_names = {
967 NULL, "avx512vbmi", "umip", "pku",
968 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
969 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
970 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
971 "la57", NULL, NULL, NULL,
972 NULL, NULL, "rdpid", NULL,
973 NULL, "cldemote", NULL, "movdiri",
974 "movdir64b", NULL, NULL, NULL,
975 },
976 .cpuid = {
977 .eax = 7,
978 .needs_ecx = true, .ecx = 0,
979 .reg = R_ECX,
980 },
981 .tcg_features = TCG_7_0_ECX_FEATURES,
982 },
983 [FEAT_7_0_EDX] = {
984 .type = CPUID_FEATURE_WORD,
985 .feat_names = {
986 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, "md-clear", NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL /* pconfig */, NULL,
991 NULL, NULL, NULL, NULL,
992 NULL, NULL, "spec-ctrl", "stibp",
993 NULL, "arch-capabilities", "core-capability", "ssbd",
994 },
995 .cpuid = {
996 .eax = 7,
997 .needs_ecx = true, .ecx = 0,
998 .reg = R_EDX,
999 },
1000 .tcg_features = TCG_7_0_EDX_FEATURES,
1001 },
1002 [FEAT_7_1_EAX] = {
1003 .type = CPUID_FEATURE_WORD,
1004 .feat_names = {
1005 NULL, NULL, NULL, NULL,
1006 NULL, "avx512-bf16", NULL, NULL,
1007 NULL, NULL, NULL, NULL,
1008 NULL, NULL, NULL, NULL,
1009 NULL, NULL, NULL, NULL,
1010 NULL, NULL, NULL, NULL,
1011 NULL, NULL, NULL, NULL,
1012 NULL, NULL, NULL, NULL,
1013 },
1014 .cpuid = {
1015 .eax = 7,
1016 .needs_ecx = true, .ecx = 1,
1017 .reg = R_EAX,
1018 },
1019 .tcg_features = TCG_7_1_EAX_FEATURES,
1020 },
1021 [FEAT_8000_0007_EDX] = {
1022 .type = CPUID_FEATURE_WORD,
1023 .feat_names = {
1024 NULL, NULL, NULL, NULL,
1025 NULL, NULL, NULL, NULL,
1026 "invtsc", NULL, NULL, NULL,
1027 NULL, NULL, NULL, NULL,
1028 NULL, NULL, NULL, NULL,
1029 NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 },
1033 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1034 .tcg_features = TCG_APM_FEATURES,
1035 .unmigratable_flags = CPUID_APM_INVTSC,
1036 },
1037 [FEAT_8000_0008_EBX] = {
1038 .type = CPUID_FEATURE_WORD,
1039 .feat_names = {
1040 "clzero", NULL, "xsaveerptr", NULL,
1041 NULL, NULL, NULL, NULL,
1042 NULL, "wbnoinvd", NULL, NULL,
1043 "ibpb", NULL, NULL, "amd-stibp",
1044 NULL, NULL, NULL, NULL,
1045 NULL, NULL, NULL, NULL,
1046 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1047 NULL, NULL, NULL, NULL,
1048 },
1049 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1050 .tcg_features = 0,
1051 .unmigratable_flags = 0,
1052 },
1053 [FEAT_XSAVE] = {
1054 .type = CPUID_FEATURE_WORD,
1055 .feat_names = {
1056 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1057 NULL, NULL, NULL, NULL,
1058 NULL, NULL, NULL, NULL,
1059 NULL, NULL, NULL, NULL,
1060 NULL, NULL, NULL, NULL,
1061 NULL, NULL, NULL, NULL,
1062 NULL, NULL, NULL, NULL,
1063 NULL, NULL, NULL, NULL,
1064 },
1065 .cpuid = {
1066 .eax = 0xd,
1067 .needs_ecx = true, .ecx = 1,
1068 .reg = R_EAX,
1069 },
1070 .tcg_features = TCG_XSAVE_FEATURES,
1071 },
1072 [FEAT_6_EAX] = {
1073 .type = CPUID_FEATURE_WORD,
1074 .feat_names = {
1075 NULL, NULL, "arat", NULL,
1076 NULL, NULL, NULL, NULL,
1077 NULL, NULL, NULL, NULL,
1078 NULL, NULL, NULL, NULL,
1079 NULL, NULL, NULL, NULL,
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, NULL, NULL,
1082 NULL, NULL, NULL, NULL,
1083 },
1084 .cpuid = { .eax = 6, .reg = R_EAX, },
1085 .tcg_features = TCG_6_EAX_FEATURES,
1086 },
1087 [FEAT_XSAVE_COMP_LO] = {
1088 .type = CPUID_FEATURE_WORD,
1089 .cpuid = {
1090 .eax = 0xD,
1091 .needs_ecx = true, .ecx = 0,
1092 .reg = R_EAX,
1093 },
1094 .tcg_features = ~0U,
1095 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1096 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1097 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1098 XSTATE_PKRU_MASK,
1099 },
1100 [FEAT_XSAVE_COMP_HI] = {
1101 .type = CPUID_FEATURE_WORD,
1102 .cpuid = {
1103 .eax = 0xD,
1104 .needs_ecx = true, .ecx = 0,
1105 .reg = R_EDX,
1106 },
1107 .tcg_features = ~0U,
1108 },
1109 /*Below are MSR exposed features*/
1110 [FEAT_ARCH_CAPABILITIES] = {
1111 .type = MSR_FEATURE_WORD,
1112 .feat_names = {
1113 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1114 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
1115 "taa-no", NULL, NULL, NULL,
1116 NULL, NULL, NULL, NULL,
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 NULL, NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 },
1122 .msr = {
1123 .index = MSR_IA32_ARCH_CAPABILITIES,
1124 },
1125 },
1126 [FEAT_CORE_CAPABILITY] = {
1127 .type = MSR_FEATURE_WORD,
1128 .feat_names = {
1129 NULL, NULL, NULL, NULL,
1130 NULL, "split-lock-detect", NULL, NULL,
1131 NULL, NULL, NULL, NULL,
1132 NULL, NULL, NULL, NULL,
1133 NULL, NULL, NULL, NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 },
1138 .msr = {
1139 .index = MSR_IA32_CORE_CAPABILITY,
1140 },
1141 },
1142
1143 [FEAT_VMX_PROCBASED_CTLS] = {
1144 .type = MSR_FEATURE_WORD,
1145 .feat_names = {
1146 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
1147 NULL, NULL, NULL, "vmx-hlt-exit",
1148 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
1149 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
1150 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
1151 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
1152 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
1153 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
1154 },
1155 .msr = {
1156 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1157 }
1158 },
1159
1160 [FEAT_VMX_SECONDARY_CTLS] = {
1161 .type = MSR_FEATURE_WORD,
1162 .feat_names = {
1163 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
1164 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
1165 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
1166 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
1167 "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
1168 "vmx-xsaves", NULL, NULL, NULL,
1169 NULL, NULL, NULL, NULL,
1170 NULL, NULL, NULL, NULL,
1171 },
1172 .msr = {
1173 .index = MSR_IA32_VMX_PROCBASED_CTLS2,
1174 }
1175 },
1176
1177 [FEAT_VMX_PINBASED_CTLS] = {
1178 .type = MSR_FEATURE_WORD,
1179 .feat_names = {
1180 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
1181 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
1182 NULL, NULL, NULL, NULL,
1183 NULL, NULL, NULL, NULL,
1184 NULL, NULL, NULL, NULL,
1185 NULL, NULL, NULL, NULL,
1186 NULL, NULL, NULL, NULL,
1187 NULL, NULL, NULL, NULL,
1188 },
1189 .msr = {
1190 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1191 }
1192 },
1193
1194 [FEAT_VMX_EXIT_CTLS] = {
1195 .type = MSR_FEATURE_WORD,
1196 /*
1197 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
1198 * the LM CPUID bit.
1199 */
1200 .feat_names = {
1201 NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
1202 NULL, NULL, NULL, NULL,
1203 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
1204 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
1205 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
1206 "vmx-exit-save-efer", "vmx-exit-load-efer",
1207 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
1208 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
1209 NULL, NULL, NULL, NULL,
1210 },
1211 .msr = {
1212 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
1213 }
1214 },
1215
1216 [FEAT_VMX_ENTRY_CTLS] = {
1217 .type = MSR_FEATURE_WORD,
1218 .feat_names = {
1219 NULL, NULL, "vmx-entry-noload-debugctl", NULL,
1220 NULL, NULL, NULL, NULL,
1221 NULL, "vmx-entry-ia32e-mode", NULL, NULL,
1222 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
1223 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
1224 NULL, NULL, NULL, NULL,
1225 NULL, NULL, NULL, NULL,
1226 NULL, NULL, NULL, NULL,
1227 },
1228 .msr = {
1229 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1230 }
1231 },
1232
1233 [FEAT_VMX_MISC] = {
1234 .type = MSR_FEATURE_WORD,
1235 .feat_names = {
1236 NULL, NULL, NULL, NULL,
1237 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
1238 "vmx-activity-wait-sipi", NULL, NULL, NULL,
1239 NULL, NULL, NULL, NULL,
1240 NULL, NULL, NULL, NULL,
1241 NULL, NULL, NULL, NULL,
1242 NULL, NULL, NULL, NULL,
1243 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
1244 },
1245 .msr = {
1246 .index = MSR_IA32_VMX_MISC,
1247 }
1248 },
1249
1250 [FEAT_VMX_EPT_VPID_CAPS] = {
1251 .type = MSR_FEATURE_WORD,
1252 .feat_names = {
1253 "vmx-ept-execonly", NULL, NULL, NULL,
1254 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
1255 NULL, NULL, NULL, NULL,
1256 NULL, NULL, NULL, NULL,
1257 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
1258 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
1259 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
1260 NULL, NULL, NULL, NULL,
1261 "vmx-invvpid", NULL, NULL, NULL,
1262 NULL, NULL, NULL, NULL,
1263 "vmx-invvpid-single-addr", "vmx-invept-single-context",
1264 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
1265 NULL, NULL, NULL, NULL,
1266 NULL, NULL, NULL, NULL,
1267 NULL, NULL, NULL, NULL,
1268 NULL, NULL, NULL, NULL,
1269 NULL, NULL, NULL, NULL,
1270 },
1271 .msr = {
1272 .index = MSR_IA32_VMX_EPT_VPID_CAP,
1273 }
1274 },
1275
1276 [FEAT_VMX_BASIC] = {
1277 .type = MSR_FEATURE_WORD,
1278 .feat_names = {
1279 [54] = "vmx-ins-outs",
1280 [55] = "vmx-true-ctls",
1281 },
1282 .msr = {
1283 .index = MSR_IA32_VMX_BASIC,
1284 },
1285 /* Just to be safe - we don't support setting the MSEG version field. */
1286 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
1287 },
1288
1289 [FEAT_VMX_VMFUNC] = {
1290 .type = MSR_FEATURE_WORD,
1291 .feat_names = {
1292 [0] = "vmx-eptp-switching",
1293 },
1294 .msr = {
1295 .index = MSR_IA32_VMX_VMFUNC,
1296 }
1297 },
1298
1299 };
1300
1301 typedef struct FeatureMask {
1302 FeatureWord index;
1303 uint64_t mask;
1304 } FeatureMask;
1305
1306 typedef struct FeatureDep {
1307 FeatureMask from, to;
1308 } FeatureDep;
1309
1310 static FeatureDep feature_dependencies[] = {
1311 {
1312 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
1313 .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
1314 },
1315 {
1316 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
1317 .to = { FEAT_CORE_CAPABILITY, ~0ull },
1318 },
1319 {
1320 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1321 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
1322 },
1323 {
1324 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1325 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
1326 },
1327 {
1328 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1329 .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
1330 },
1331 {
1332 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1333 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
1334 },
1335 {
1336 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1337 .to = { FEAT_VMX_MISC, ~0ull },
1338 },
1339 {
1340 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1341 .to = { FEAT_VMX_BASIC, ~0ull },
1342 },
1343 {
1344 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
1345 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
1346 },
1347 {
1348 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
1349 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
1350 },
1351 {
1352 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
1353 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
1354 },
1355 {
1356 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
1357 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
1358 },
1359 {
1360 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
1361 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
1362 },
1363 {
1364 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
1365 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
1366 },
1367 {
1368 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
1369 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
1370 },
1371 {
1372 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1373 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
1374 },
1375 {
1376 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1377 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
1378 },
1379 {
1380 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
1381 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
1382 },
1383 {
1384 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
1385 .to = { FEAT_VMX_VMFUNC, ~0ull },
1386 },
1387 };
1388
1389 typedef struct X86RegisterInfo32 {
1390 /* Name of register */
1391 const char *name;
1392 /* QAPI enum value register */
1393 X86CPURegister32 qapi_enum;
1394 } X86RegisterInfo32;
1395
1396 #define REGISTER(reg) \
1397 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1398 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1399 REGISTER(EAX),
1400 REGISTER(ECX),
1401 REGISTER(EDX),
1402 REGISTER(EBX),
1403 REGISTER(ESP),
1404 REGISTER(EBP),
1405 REGISTER(ESI),
1406 REGISTER(EDI),
1407 };
1408 #undef REGISTER
1409
1410 typedef struct ExtSaveArea {
1411 uint32_t feature, bits;
1412 uint32_t offset, size;
1413 } ExtSaveArea;
1414
1415 static const ExtSaveArea x86_ext_save_areas[] = {
1416 [XSTATE_FP_BIT] = {
1417 /* x87 FP state component is always enabled if XSAVE is supported */
1418 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1419 /* x87 state is in the legacy region of the XSAVE area */
1420 .offset = 0,
1421 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1422 },
1423 [XSTATE_SSE_BIT] = {
1424 /* SSE state component is always enabled if XSAVE is supported */
1425 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1426 /* SSE state is in the legacy region of the XSAVE area */
1427 .offset = 0,
1428 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1429 },
1430 [XSTATE_YMM_BIT] =
1431 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1432 .offset = offsetof(X86XSaveArea, avx_state),
1433 .size = sizeof(XSaveAVX) },
1434 [XSTATE_BNDREGS_BIT] =
1435 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1436 .offset = offsetof(X86XSaveArea, bndreg_state),
1437 .size = sizeof(XSaveBNDREG) },
1438 [XSTATE_BNDCSR_BIT] =
1439 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1440 .offset = offsetof(X86XSaveArea, bndcsr_state),
1441 .size = sizeof(XSaveBNDCSR) },
1442 [XSTATE_OPMASK_BIT] =
1443 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1444 .offset = offsetof(X86XSaveArea, opmask_state),
1445 .size = sizeof(XSaveOpmask) },
1446 [XSTATE_ZMM_Hi256_BIT] =
1447 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1448 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1449 .size = sizeof(XSaveZMM_Hi256) },
1450 [XSTATE_Hi16_ZMM_BIT] =
1451 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1452 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1453 .size = sizeof(XSaveHi16_ZMM) },
1454 [XSTATE_PKRU_BIT] =
1455 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1456 .offset = offsetof(X86XSaveArea, pkru_state),
1457 .size = sizeof(XSavePKRU) },
1458 };
1459
1460 static uint32_t xsave_area_size(uint64_t mask)
1461 {
1462 int i;
1463 uint64_t ret = 0;
1464
1465 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1466 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1467 if ((mask >> i) & 1) {
1468 ret = MAX(ret, esa->offset + esa->size);
1469 }
1470 }
1471 return ret;
1472 }
1473
1474 static inline bool accel_uses_host_cpuid(void)
1475 {
1476 return kvm_enabled() || hvf_enabled();
1477 }
1478
1479 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1480 {
1481 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1482 cpu->env.features[FEAT_XSAVE_COMP_LO];
1483 }
1484
1485 const char *get_register_name_32(unsigned int reg)
1486 {
1487 if (reg >= CPU_NB_REGS32) {
1488 return NULL;
1489 }
1490 return x86_reg_info_32[reg].name;
1491 }
1492
1493 /*
1494 * Returns the set of feature flags that are supported and migratable by
1495 * QEMU, for a given FeatureWord.
1496 */
1497 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
1498 {
1499 FeatureWordInfo *wi = &feature_word_info[w];
1500 uint64_t r = 0;
1501 int i;
1502
1503 for (i = 0; i < 64; i++) {
1504 uint64_t f = 1ULL << i;
1505
1506 /* If the feature name is known, it is implicitly considered migratable,
1507 * unless it is explicitly set in unmigratable_flags */
1508 if ((wi->migratable_flags & f) ||
1509 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1510 r |= f;
1511 }
1512 }
1513 return r;
1514 }
1515
1516 void host_cpuid(uint32_t function, uint32_t count,
1517 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1518 {
1519 uint32_t vec[4];
1520
1521 #ifdef __x86_64__
1522 asm volatile("cpuid"
1523 : "=a"(vec[0]), "=b"(vec[1]),
1524 "=c"(vec[2]), "=d"(vec[3])
1525 : "0"(function), "c"(count) : "cc");
1526 #elif defined(__i386__)
1527 asm volatile("pusha \n\t"
1528 "cpuid \n\t"
1529 "mov %%eax, 0(%2) \n\t"
1530 "mov %%ebx, 4(%2) \n\t"
1531 "mov %%ecx, 8(%2) \n\t"
1532 "mov %%edx, 12(%2) \n\t"
1533 "popa"
1534 : : "a"(function), "c"(count), "S"(vec)
1535 : "memory", "cc");
1536 #else
1537 abort();
1538 #endif
1539
1540 if (eax)
1541 *eax = vec[0];
1542 if (ebx)
1543 *ebx = vec[1];
1544 if (ecx)
1545 *ecx = vec[2];
1546 if (edx)
1547 *edx = vec[3];
1548 }
1549
1550 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1551 {
1552 uint32_t eax, ebx, ecx, edx;
1553
1554 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1555 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1556
1557 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1558 if (family) {
1559 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1560 }
1561 if (model) {
1562 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1563 }
1564 if (stepping) {
1565 *stepping = eax & 0x0F;
1566 }
1567 }
1568
1569 /* CPU class name definitions: */
1570
1571 /* Return type name for a given CPU model name
1572 * Caller is responsible for freeing the returned string.
1573 */
1574 static char *x86_cpu_type_name(const char *model_name)
1575 {
1576 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1577 }
1578
1579 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1580 {
1581 g_autofree char *typename = x86_cpu_type_name(cpu_model);
1582 return object_class_by_name(typename);
1583 }
1584
1585 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1586 {
1587 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1588 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1589 return g_strndup(class_name,
1590 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1591 }
1592
1593 typedef struct PropValue {
1594 const char *prop, *value;
1595 } PropValue;
1596
1597 typedef struct X86CPUVersionDefinition {
1598 X86CPUVersion version;
1599 const char *alias;
1600 const char *note;
1601 PropValue *props;
1602 } X86CPUVersionDefinition;
1603
1604 /* Base definition for a CPU model */
1605 typedef struct X86CPUDefinition {
1606 const char *name;
1607 uint32_t level;
1608 uint32_t xlevel;
1609 /* vendor is zero-terminated, 12 character ASCII string */
1610 char vendor[CPUID_VENDOR_SZ + 1];
1611 int family;
1612 int model;
1613 int stepping;
1614 FeatureWordArray features;
1615 const char *model_id;
1616 CPUCaches *cache_info;
1617
1618 /* Use AMD EPYC encoding for apic id */
1619 bool use_epyc_apic_id_encoding;
1620
1621 /*
1622 * Definitions for alternative versions of CPU model.
1623 * List is terminated by item with version == 0.
1624 * If NULL, version 1 will be registered automatically.
1625 */
1626 const X86CPUVersionDefinition *versions;
1627 } X86CPUDefinition;
1628
1629 /* Reference to a specific CPU model version */
1630 struct X86CPUModel {
1631 /* Base CPU definition */
1632 X86CPUDefinition *cpudef;
1633 /* CPU model version */
1634 X86CPUVersion version;
1635 const char *note;
1636 /*
1637 * If true, this is an alias CPU model.
1638 * This matters only for "-cpu help" and query-cpu-definitions
1639 */
1640 bool is_alias;
1641 };
1642
1643 /* Get full model name for CPU version */
1644 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1645 X86CPUVersion version)
1646 {
1647 assert(version > 0);
1648 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1649 }
1650
1651 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1652 {
1653 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1654 static const X86CPUVersionDefinition default_version_list[] = {
1655 { 1 },
1656 { /* end of list */ }
1657 };
1658
1659 return def->versions ?: default_version_list;
1660 }
1661
1662 bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type)
1663 {
1664 X86CPUClass *xcc = X86_CPU_CLASS(object_class_by_name(cpu_type));
1665
1666 assert(xcc);
1667 if (xcc->model && xcc->model->cpudef) {
1668 return xcc->model->cpudef->use_epyc_apic_id_encoding;
1669 } else {
1670 return false;
1671 }
1672 }
1673
1674 static CPUCaches epyc_cache_info = {
1675 .l1d_cache = &(CPUCacheInfo) {
1676 .type = DATA_CACHE,
1677 .level = 1,
1678 .size = 32 * KiB,
1679 .line_size = 64,
1680 .associativity = 8,
1681 .partitions = 1,
1682 .sets = 64,
1683 .lines_per_tag = 1,
1684 .self_init = 1,
1685 .no_invd_sharing = true,
1686 },
1687 .l1i_cache = &(CPUCacheInfo) {
1688 .type = INSTRUCTION_CACHE,
1689 .level = 1,
1690 .size = 64 * KiB,
1691 .line_size = 64,
1692 .associativity = 4,
1693 .partitions = 1,
1694 .sets = 256,
1695 .lines_per_tag = 1,
1696 .self_init = 1,
1697 .no_invd_sharing = true,
1698 },
1699 .l2_cache = &(CPUCacheInfo) {
1700 .type = UNIFIED_CACHE,
1701 .level = 2,
1702 .size = 512 * KiB,
1703 .line_size = 64,
1704 .associativity = 8,
1705 .partitions = 1,
1706 .sets = 1024,
1707 .lines_per_tag = 1,
1708 },
1709 .l3_cache = &(CPUCacheInfo) {
1710 .type = UNIFIED_CACHE,
1711 .level = 3,
1712 .size = 8 * MiB,
1713 .line_size = 64,
1714 .associativity = 16,
1715 .partitions = 1,
1716 .sets = 8192,
1717 .lines_per_tag = 1,
1718 .self_init = true,
1719 .inclusive = true,
1720 .complex_indexing = true,
1721 },
1722 };
1723
1724 static CPUCaches epyc_rome_cache_info = {
1725 .l1d_cache = &(CPUCacheInfo) {
1726 .type = DATA_CACHE,
1727 .level = 1,
1728 .size = 32 * KiB,
1729 .line_size = 64,
1730 .associativity = 8,
1731 .partitions = 1,
1732 .sets = 64,
1733 .lines_per_tag = 1,
1734 .self_init = 1,
1735 .no_invd_sharing = true,
1736 },
1737 .l1i_cache = &(CPUCacheInfo) {
1738 .type = INSTRUCTION_CACHE,
1739 .level = 1,
1740 .size = 32 * KiB,
1741 .line_size = 64,
1742 .associativity = 8,
1743 .partitions = 1,
1744 .sets = 64,
1745 .lines_per_tag = 1,
1746 .self_init = 1,
1747 .no_invd_sharing = true,
1748 },
1749 .l2_cache = &(CPUCacheInfo) {
1750 .type = UNIFIED_CACHE,
1751 .level = 2,
1752 .size = 512 * KiB,
1753 .line_size = 64,
1754 .associativity = 8,
1755 .partitions = 1,
1756 .sets = 1024,
1757 .lines_per_tag = 1,
1758 },
1759 .l3_cache = &(CPUCacheInfo) {
1760 .type = UNIFIED_CACHE,
1761 .level = 3,
1762 .size = 16 * MiB,
1763 .line_size = 64,
1764 .associativity = 16,
1765 .partitions = 1,
1766 .sets = 16384,
1767 .lines_per_tag = 1,
1768 .self_init = true,
1769 .inclusive = true,
1770 .complex_indexing = true,
1771 },
1772 };
1773
1774 /* The following VMX features are not supported by KVM and are left out in the
1775 * CPU definitions:
1776 *
1777 * Dual-monitor support (all processors)
1778 * Entry to SMM
1779 * Deactivate dual-monitor treatment
1780 * Number of CR3-target values
1781 * Shutdown activity state
1782 * Wait-for-SIPI activity state
1783 * PAUSE-loop exiting (Westmere and newer)
1784 * EPT-violation #VE (Broadwell and newer)
1785 * Inject event with insn length=0 (Skylake and newer)
1786 * Conceal non-root operation from PT
1787 * Conceal VM exits from PT
1788 * Conceal VM entries from PT
1789 * Enable ENCLS exiting
1790 * Mode-based execute control (XS/XU)
1791 s TSC scaling (Skylake Server and newer)
1792 * GPA translation for PT (IceLake and newer)
1793 * User wait and pause
1794 * ENCLV exiting
1795 * Load IA32_RTIT_CTL
1796 * Clear IA32_RTIT_CTL
1797 * Advanced VM-exit information for EPT violations
1798 * Sub-page write permissions
1799 * PT in VMX operation
1800 */
1801
1802 static X86CPUDefinition builtin_x86_defs[] = {
1803 {
1804 .name = "qemu64",
1805 .level = 0xd,
1806 .vendor = CPUID_VENDOR_AMD,
1807 .family = 6,
1808 .model = 6,
1809 .stepping = 3,
1810 .features[FEAT_1_EDX] =
1811 PPRO_FEATURES |
1812 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1813 CPUID_PSE36,
1814 .features[FEAT_1_ECX] =
1815 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1816 .features[FEAT_8000_0001_EDX] =
1817 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1818 .features[FEAT_8000_0001_ECX] =
1819 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1820 .xlevel = 0x8000000A,
1821 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1822 },
1823 {
1824 .name = "phenom",
1825 .level = 5,
1826 .vendor = CPUID_VENDOR_AMD,
1827 .family = 16,
1828 .model = 2,
1829 .stepping = 3,
1830 /* Missing: CPUID_HT */
1831 .features[FEAT_1_EDX] =
1832 PPRO_FEATURES |
1833 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1834 CPUID_PSE36 | CPUID_VME,
1835 .features[FEAT_1_ECX] =
1836 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1837 CPUID_EXT_POPCNT,
1838 .features[FEAT_8000_0001_EDX] =
1839 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1840 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1841 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1842 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1843 CPUID_EXT3_CR8LEG,
1844 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1845 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1846 .features[FEAT_8000_0001_ECX] =
1847 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1848 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1849 /* Missing: CPUID_SVM_LBRV */
1850 .features[FEAT_SVM] =
1851 CPUID_SVM_NPT,
1852 .xlevel = 0x8000001A,
1853 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1854 },
1855 {
1856 .name = "core2duo",
1857 .level = 10,
1858 .vendor = CPUID_VENDOR_INTEL,
1859 .family = 6,
1860 .model = 15,
1861 .stepping = 11,
1862 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1863 .features[FEAT_1_EDX] =
1864 PPRO_FEATURES |
1865 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1866 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1867 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1868 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1869 .features[FEAT_1_ECX] =
1870 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1871 CPUID_EXT_CX16,
1872 .features[FEAT_8000_0001_EDX] =
1873 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1874 .features[FEAT_8000_0001_ECX] =
1875 CPUID_EXT3_LAHF_LM,
1876 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
1877 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1878 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1879 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1880 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1881 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
1882 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1883 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1884 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1885 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1886 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1887 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1888 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1889 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
1890 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
1891 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
1892 .features[FEAT_VMX_SECONDARY_CTLS] =
1893 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
1894 .xlevel = 0x80000008,
1895 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1896 },
1897 {
1898 .name = "kvm64",
1899 .level = 0xd,
1900 .vendor = CPUID_VENDOR_INTEL,
1901 .family = 15,
1902 .model = 6,
1903 .stepping = 1,
1904 /* Missing: CPUID_HT */
1905 .features[FEAT_1_EDX] =
1906 PPRO_FEATURES | CPUID_VME |
1907 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1908 CPUID_PSE36,
1909 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1910 .features[FEAT_1_ECX] =
1911 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1912 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1913 .features[FEAT_8000_0001_EDX] =
1914 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1915 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1916 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1917 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1918 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1919 .features[FEAT_8000_0001_ECX] =
1920 0,
1921 /* VMX features from Cedar Mill/Prescott */
1922 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1923 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1924 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1925 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1926 VMX_PIN_BASED_NMI_EXITING,
1927 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1928 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1929 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1930 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1931 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1932 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1933 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1934 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING,
1935 .xlevel = 0x80000008,
1936 .model_id = "Common KVM processor"
1937 },
1938 {
1939 .name = "qemu32",
1940 .level = 4,
1941 .vendor = CPUID_VENDOR_INTEL,
1942 .family = 6,
1943 .model = 6,
1944 .stepping = 3,
1945 .features[FEAT_1_EDX] =
1946 PPRO_FEATURES,
1947 .features[FEAT_1_ECX] =
1948 CPUID_EXT_SSE3,
1949 .xlevel = 0x80000004,
1950 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1951 },
1952 {
1953 .name = "kvm32",
1954 .level = 5,
1955 .vendor = CPUID_VENDOR_INTEL,
1956 .family = 15,
1957 .model = 6,
1958 .stepping = 1,
1959 .features[FEAT_1_EDX] =
1960 PPRO_FEATURES | CPUID_VME |
1961 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1962 .features[FEAT_1_ECX] =
1963 CPUID_EXT_SSE3,
1964 .features[FEAT_8000_0001_ECX] =
1965 0,
1966 /* VMX features from Yonah */
1967 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1968 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1969 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1970 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1971 VMX_PIN_BASED_NMI_EXITING,
1972 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1973 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1974 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1975 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1976 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
1977 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
1978 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
1979 .xlevel = 0x80000008,
1980 .model_id = "Common 32-bit KVM processor"
1981 },
1982 {
1983 .name = "coreduo",
1984 .level = 10,
1985 .vendor = CPUID_VENDOR_INTEL,
1986 .family = 6,
1987 .model = 14,
1988 .stepping = 8,
1989 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1990 .features[FEAT_1_EDX] =
1991 PPRO_FEATURES | CPUID_VME |
1992 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1993 CPUID_SS,
1994 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1995 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1996 .features[FEAT_1_ECX] =
1997 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1998 .features[FEAT_8000_0001_EDX] =
1999 CPUID_EXT2_NX,
2000 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2001 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2002 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2003 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2004 VMX_PIN_BASED_NMI_EXITING,
2005 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2006 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2007 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2008 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2009 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2010 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2011 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2012 .xlevel = 0x80000008,
2013 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
2014 },
2015 {
2016 .name = "486",
2017 .level = 1,
2018 .vendor = CPUID_VENDOR_INTEL,
2019 .family = 4,
2020 .model = 8,
2021 .stepping = 0,
2022 .features[FEAT_1_EDX] =
2023 I486_FEATURES,
2024 .xlevel = 0,
2025 .model_id = "",
2026 },
2027 {
2028 .name = "pentium",
2029 .level = 1,
2030 .vendor = CPUID_VENDOR_INTEL,
2031 .family = 5,
2032 .model = 4,
2033 .stepping = 3,
2034 .features[FEAT_1_EDX] =
2035 PENTIUM_FEATURES,
2036 .xlevel = 0,
2037 .model_id = "",
2038 },
2039 {
2040 .name = "pentium2",
2041 .level = 2,
2042 .vendor = CPUID_VENDOR_INTEL,
2043 .family = 6,
2044 .model = 5,
2045 .stepping = 2,
2046 .features[FEAT_1_EDX] =
2047 PENTIUM2_FEATURES,
2048 .xlevel = 0,
2049 .model_id = "",
2050 },
2051 {
2052 .name = "pentium3",
2053 .level = 3,
2054 .vendor = CPUID_VENDOR_INTEL,
2055 .family = 6,
2056 .model = 7,
2057 .stepping = 3,
2058 .features[FEAT_1_EDX] =
2059 PENTIUM3_FEATURES,
2060 .xlevel = 0,
2061 .model_id = "",
2062 },
2063 {
2064 .name = "athlon",
2065 .level = 2,
2066 .vendor = CPUID_VENDOR_AMD,
2067 .family = 6,
2068 .model = 2,
2069 .stepping = 3,
2070 .features[FEAT_1_EDX] =
2071 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
2072 CPUID_MCA,
2073 .features[FEAT_8000_0001_EDX] =
2074 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
2075 .xlevel = 0x80000008,
2076 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
2077 },
2078 {
2079 .name = "n270",
2080 .level = 10,
2081 .vendor = CPUID_VENDOR_INTEL,
2082 .family = 6,
2083 .model = 28,
2084 .stepping = 2,
2085 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2086 .features[FEAT_1_EDX] =
2087 PPRO_FEATURES |
2088 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
2089 CPUID_ACPI | CPUID_SS,
2090 /* Some CPUs got no CPUID_SEP */
2091 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
2092 * CPUID_EXT_XTPR */
2093 .features[FEAT_1_ECX] =
2094 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
2095 CPUID_EXT_MOVBE,
2096 .features[FEAT_8000_0001_EDX] =
2097 CPUID_EXT2_NX,
2098 .features[FEAT_8000_0001_ECX] =
2099 CPUID_EXT3_LAHF_LM,
2100 .xlevel = 0x80000008,
2101 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2102 },
2103 {
2104 .name = "Conroe",
2105 .level = 10,
2106 .vendor = CPUID_VENDOR_INTEL,
2107 .family = 6,
2108 .model = 15,
2109 .stepping = 3,
2110 .features[FEAT_1_EDX] =
2111 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2112 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2113 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2114 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2115 CPUID_DE | CPUID_FP87,
2116 .features[FEAT_1_ECX] =
2117 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2118 .features[FEAT_8000_0001_EDX] =
2119 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2120 .features[FEAT_8000_0001_ECX] =
2121 CPUID_EXT3_LAHF_LM,
2122 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2123 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2124 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2125 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2126 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2127 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2128 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2129 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2130 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2131 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2132 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2133 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2134 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2135 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2136 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2137 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2138 .features[FEAT_VMX_SECONDARY_CTLS] =
2139 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
2140 .xlevel = 0x80000008,
2141 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2142 },
2143 {
2144 .name = "Penryn",
2145 .level = 10,
2146 .vendor = CPUID_VENDOR_INTEL,
2147 .family = 6,
2148 .model = 23,
2149 .stepping = 3,
2150 .features[FEAT_1_EDX] =
2151 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2152 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2153 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2154 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2155 CPUID_DE | CPUID_FP87,
2156 .features[FEAT_1_ECX] =
2157 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2158 CPUID_EXT_SSE3,
2159 .features[FEAT_8000_0001_EDX] =
2160 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2161 .features[FEAT_8000_0001_ECX] =
2162 CPUID_EXT3_LAHF_LM,
2163 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2164 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2165 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2166 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT |
2167 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2168 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2169 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2170 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2171 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2172 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2173 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2174 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2175 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2176 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2177 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2178 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2179 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2180 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2181 .features[FEAT_VMX_SECONDARY_CTLS] =
2182 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2183 VMX_SECONDARY_EXEC_WBINVD_EXITING,
2184 .xlevel = 0x80000008,
2185 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2186 },
2187 {
2188 .name = "Nehalem",
2189 .level = 11,
2190 .vendor = CPUID_VENDOR_INTEL,
2191 .family = 6,
2192 .model = 26,
2193 .stepping = 3,
2194 .features[FEAT_1_EDX] =
2195 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2196 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2197 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2198 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2199 CPUID_DE | CPUID_FP87,
2200 .features[FEAT_1_ECX] =
2201 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2202 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2203 .features[FEAT_8000_0001_EDX] =
2204 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2205 .features[FEAT_8000_0001_ECX] =
2206 CPUID_EXT3_LAHF_LM,
2207 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2208 MSR_VMX_BASIC_TRUE_CTLS,
2209 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2210 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2211 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2212 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2213 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2214 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2215 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2216 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2217 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2218 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2219 .features[FEAT_VMX_EXIT_CTLS] =
2220 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2221 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2222 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2223 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2224 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2225 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2226 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2227 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2228 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2229 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2230 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2231 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2232 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2233 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2234 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2235 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2236 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2237 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2238 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2239 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2240 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2241 .features[FEAT_VMX_SECONDARY_CTLS] =
2242 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2243 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2244 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2245 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2246 VMX_SECONDARY_EXEC_ENABLE_VPID,
2247 .xlevel = 0x80000008,
2248 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
2249 .versions = (X86CPUVersionDefinition[]) {
2250 { .version = 1 },
2251 {
2252 .version = 2,
2253 .alias = "Nehalem-IBRS",
2254 .props = (PropValue[]) {
2255 { "spec-ctrl", "on" },
2256 { "model-id",
2257 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
2258 { /* end of list */ }
2259 }
2260 },
2261 { /* end of list */ }
2262 }
2263 },
2264 {
2265 .name = "Westmere",
2266 .level = 11,
2267 .vendor = CPUID_VENDOR_INTEL,
2268 .family = 6,
2269 .model = 44,
2270 .stepping = 1,
2271 .features[FEAT_1_EDX] =
2272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2276 CPUID_DE | CPUID_FP87,
2277 .features[FEAT_1_ECX] =
2278 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2279 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2280 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2281 .features[FEAT_8000_0001_EDX] =
2282 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2283 .features[FEAT_8000_0001_ECX] =
2284 CPUID_EXT3_LAHF_LM,
2285 .features[FEAT_6_EAX] =
2286 CPUID_6_EAX_ARAT,
2287 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2288 MSR_VMX_BASIC_TRUE_CTLS,
2289 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2290 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2291 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2292 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2293 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2294 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2295 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2296 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2297 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2298 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2299 .features[FEAT_VMX_EXIT_CTLS] =
2300 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2301 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2302 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2303 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2304 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2305 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2306 MSR_VMX_MISC_STORE_LMA,
2307 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2308 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2309 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2310 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2311 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2312 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2313 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2314 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2315 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2316 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2317 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2318 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2319 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2320 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2321 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2322 .features[FEAT_VMX_SECONDARY_CTLS] =
2323 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2324 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2325 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2326 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2327 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2328 .xlevel = 0x80000008,
2329 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2330 .versions = (X86CPUVersionDefinition[]) {
2331 { .version = 1 },
2332 {
2333 .version = 2,
2334 .alias = "Westmere-IBRS",
2335 .props = (PropValue[]) {
2336 { "spec-ctrl", "on" },
2337 { "model-id",
2338 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
2339 { /* end of list */ }
2340 }
2341 },
2342 { /* end of list */ }
2343 }
2344 },
2345 {
2346 .name = "SandyBridge",
2347 .level = 0xd,
2348 .vendor = CPUID_VENDOR_INTEL,
2349 .family = 6,
2350 .model = 42,
2351 .stepping = 1,
2352 .features[FEAT_1_EDX] =
2353 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2354 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2355 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2356 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2357 CPUID_DE | CPUID_FP87,
2358 .features[FEAT_1_ECX] =
2359 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2360 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2361 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2362 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2363 CPUID_EXT_SSE3,
2364 .features[FEAT_8000_0001_EDX] =
2365 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2366 CPUID_EXT2_SYSCALL,
2367 .features[FEAT_8000_0001_ECX] =
2368 CPUID_EXT3_LAHF_LM,
2369 .features[FEAT_XSAVE] =
2370 CPUID_XSAVE_XSAVEOPT,
2371 .features[FEAT_6_EAX] =
2372 CPUID_6_EAX_ARAT,
2373 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2374 MSR_VMX_BASIC_TRUE_CTLS,
2375 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2376 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2377 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2378 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2379 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2380 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2381 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2382 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2383 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2384 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2385 .features[FEAT_VMX_EXIT_CTLS] =
2386 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2387 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2388 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2389 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2390 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2391 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2392 MSR_VMX_MISC_STORE_LMA,
2393 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2394 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2395 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2396 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2397 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2398 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2399 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2400 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2401 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2402 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2403 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2404 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2405 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2406 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2407 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2408 .features[FEAT_VMX_SECONDARY_CTLS] =
2409 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2410 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2411 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2412 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2413 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2414 .xlevel = 0x80000008,
2415 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
2416 .versions = (X86CPUVersionDefinition[]) {
2417 { .version = 1 },
2418 {
2419 .version = 2,
2420 .alias = "SandyBridge-IBRS",
2421 .props = (PropValue[]) {
2422 { "spec-ctrl", "on" },
2423 { "model-id",
2424 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
2425 { /* end of list */ }
2426 }
2427 },
2428 { /* end of list */ }
2429 }
2430 },
2431 {
2432 .name = "IvyBridge",
2433 .level = 0xd,
2434 .vendor = CPUID_VENDOR_INTEL,
2435 .family = 6,
2436 .model = 58,
2437 .stepping = 9,
2438 .features[FEAT_1_EDX] =
2439 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2440 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2441 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2442 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2443 CPUID_DE | CPUID_FP87,
2444 .features[FEAT_1_ECX] =
2445 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2446 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2447 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2448 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2449 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2450 .features[FEAT_7_0_EBX] =
2451 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2452 CPUID_7_0_EBX_ERMS,
2453 .features[FEAT_8000_0001_EDX] =
2454 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2455 CPUID_EXT2_SYSCALL,
2456 .features[FEAT_8000_0001_ECX] =
2457 CPUID_EXT3_LAHF_LM,
2458 .features[FEAT_XSAVE] =
2459 CPUID_XSAVE_XSAVEOPT,
2460 .features[FEAT_6_EAX] =
2461 CPUID_6_EAX_ARAT,
2462 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2463 MSR_VMX_BASIC_TRUE_CTLS,
2464 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2465 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2466 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2467 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2468 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2469 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2470 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2471 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2472 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2473 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2474 .features[FEAT_VMX_EXIT_CTLS] =
2475 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2476 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2477 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2478 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2479 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2480 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2481 MSR_VMX_MISC_STORE_LMA,
2482 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2483 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2484 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2485 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2486 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2487 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2488 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2489 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2490 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2491 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2492 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2493 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2494 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2495 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2496 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2497 .features[FEAT_VMX_SECONDARY_CTLS] =
2498 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2499 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2500 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2501 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2502 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2503 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2504 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2505 VMX_SECONDARY_EXEC_RDRAND_EXITING,
2506 .xlevel = 0x80000008,
2507 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
2508 .versions = (X86CPUVersionDefinition[]) {
2509 { .version = 1 },
2510 {
2511 .version = 2,
2512 .alias = "IvyBridge-IBRS",
2513 .props = (PropValue[]) {
2514 { "spec-ctrl", "on" },
2515 { "model-id",
2516 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
2517 { /* end of list */ }
2518 }
2519 },
2520 { /* end of list */ }
2521 }
2522 },
2523 {
2524 .name = "Haswell",
2525 .level = 0xd,
2526 .vendor = CPUID_VENDOR_INTEL,
2527 .family = 6,
2528 .model = 60,
2529 .stepping = 4,
2530 .features[FEAT_1_EDX] =
2531 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2532 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2533 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2534 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2535 CPUID_DE | CPUID_FP87,
2536 .features[FEAT_1_ECX] =
2537 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2538 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2539 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2540 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2541 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2542 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2543 .features[FEAT_8000_0001_EDX] =
2544 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2545 CPUID_EXT2_SYSCALL,
2546 .features[FEAT_8000_0001_ECX] =
2547 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2548 .features[FEAT_7_0_EBX] =
2549 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2550 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2551 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2552 CPUID_7_0_EBX_RTM,
2553 .features[FEAT_XSAVE] =
2554 CPUID_XSAVE_XSAVEOPT,
2555 .features[FEAT_6_EAX] =
2556 CPUID_6_EAX_ARAT,
2557 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2558 MSR_VMX_BASIC_TRUE_CTLS,
2559 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2560 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2561 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2562 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2563 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2564 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2565 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2566 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2567 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2568 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2569 .features[FEAT_VMX_EXIT_CTLS] =
2570 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2571 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2572 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2573 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2574 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2575 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2576 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2577 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2578 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2579 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2580 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2581 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2582 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2583 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2584 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2585 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2586 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2587 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2588 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2589 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2590 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2591 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2592 .features[FEAT_VMX_SECONDARY_CTLS] =
2593 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2594 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2595 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2596 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2597 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2598 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2599 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2600 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2601 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
2602 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2603 .xlevel = 0x80000008,
2604 .model_id = "Intel Core Processor (Haswell)",
2605 .versions = (X86CPUVersionDefinition[]) {
2606 { .version = 1 },
2607 {
2608 .version = 2,
2609 .alias = "Haswell-noTSX",
2610 .props = (PropValue[]) {
2611 { "hle", "off" },
2612 { "rtm", "off" },
2613 { "stepping", "1" },
2614 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2615 { /* end of list */ }
2616 },
2617 },
2618 {
2619 .version = 3,
2620 .alias = "Haswell-IBRS",
2621 .props = (PropValue[]) {
2622 /* Restore TSX features removed by -v2 above */
2623 { "hle", "on" },
2624 { "rtm", "on" },
2625 /*
2626 * Haswell and Haswell-IBRS had stepping=4 in
2627 * QEMU 4.0 and older
2628 */
2629 { "stepping", "4" },
2630 { "spec-ctrl", "on" },
2631 { "model-id",
2632 "Intel Core Processor (Haswell, IBRS)" },
2633 { /* end of list */ }
2634 }
2635 },
2636 {
2637 .version = 4,
2638 .alias = "Haswell-noTSX-IBRS",
2639 .props = (PropValue[]) {
2640 { "hle", "off" },
2641 { "rtm", "off" },
2642 /* spec-ctrl was already enabled by -v3 above */
2643 { "stepping", "1" },
2644 { "model-id",
2645 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2646 { /* end of list */ }
2647 }
2648 },
2649 { /* end of list */ }
2650 }
2651 },
2652 {
2653 .name = "Broadwell",
2654 .level = 0xd,
2655 .vendor = CPUID_VENDOR_INTEL,
2656 .family = 6,
2657 .model = 61,
2658 .stepping = 2,
2659 .features[FEAT_1_EDX] =
2660 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2661 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2662 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2663 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2664 CPUID_DE | CPUID_FP87,
2665 .features[FEAT_1_ECX] =
2666 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2667 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2668 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2669 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2670 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2671 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2672 .features[FEAT_8000_0001_EDX] =
2673 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2674 CPUID_EXT2_SYSCALL,
2675 .features[FEAT_8000_0001_ECX] =
2676 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2677 .features[FEAT_7_0_EBX] =
2678 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2679 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2680 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2681 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2682 CPUID_7_0_EBX_SMAP,
2683 .features[FEAT_XSAVE] =
2684 CPUID_XSAVE_XSAVEOPT,
2685 .features[FEAT_6_EAX] =
2686 CPUID_6_EAX_ARAT,
2687 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2688 MSR_VMX_BASIC_TRUE_CTLS,
2689 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2690 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2691 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2692 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2693 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2694 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2695 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2696 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2697 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2698 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2699 .features[FEAT_VMX_EXIT_CTLS] =
2700 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2701 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2702 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2703 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2704 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2705 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2706 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2707 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2708 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2709 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2710 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2711 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2712 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2713 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2714 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2715 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2716 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2717 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2718 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2719 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2720 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2721 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2722 .features[FEAT_VMX_SECONDARY_CTLS] =
2723 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2724 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2725 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2726 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2727 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2728 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2729 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2730 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2731 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2732 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2733 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2734 .xlevel = 0x80000008,
2735 .model_id = "Intel Core Processor (Broadwell)",
2736 .versions = (X86CPUVersionDefinition[]) {
2737 { .version = 1 },
2738 {
2739 .version = 2,
2740 .alias = "Broadwell-noTSX",
2741 .props = (PropValue[]) {
2742 { "hle", "off" },
2743 { "rtm", "off" },
2744 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2745 { /* end of list */ }
2746 },
2747 },
2748 {
2749 .version = 3,
2750 .alias = "Broadwell-IBRS",
2751 .props = (PropValue[]) {
2752 /* Restore TSX features removed by -v2 above */
2753 { "hle", "on" },
2754 { "rtm", "on" },
2755 { "spec-ctrl", "on" },
2756 { "model-id",
2757 "Intel Core Processor (Broadwell, IBRS)" },
2758 { /* end of list */ }
2759 }
2760 },
2761 {
2762 .version = 4,
2763 .alias = "Broadwell-noTSX-IBRS",
2764 .props = (PropValue[]) {
2765 { "hle", "off" },
2766 { "rtm", "off" },
2767 /* spec-ctrl was already enabled by -v3 above */
2768 { "model-id",
2769 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2770 { /* end of list */ }
2771 }
2772 },
2773 { /* end of list */ }
2774 }
2775 },
2776 {
2777 .name = "Skylake-Client",
2778 .level = 0xd,
2779 .vendor = CPUID_VENDOR_INTEL,
2780 .family = 6,
2781 .model = 94,
2782 .stepping = 3,
2783 .features[FEAT_1_EDX] =
2784 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2785 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2786 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2787 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2788 CPUID_DE | CPUID_FP87,
2789 .features[FEAT_1_ECX] =
2790 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2791 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2792 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2793 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2794 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2795 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2796 .features[FEAT_8000_0001_EDX] =
2797 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2798 CPUID_EXT2_SYSCALL,
2799 .features[FEAT_8000_0001_ECX] =
2800 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2801 .features[FEAT_7_0_EBX] =
2802 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2803 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2804 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2805 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2806 CPUID_7_0_EBX_SMAP,
2807 /* Missing: XSAVES (not supported by some Linux versions,
2808 * including v4.1 to v4.12).
2809 * KVM doesn't yet expose any XSAVES state save component,
2810 * and the only one defined in Skylake (processor tracing)
2811 * probably will block migration anyway.
2812 */
2813 .features[FEAT_XSAVE] =
2814 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2815 CPUID_XSAVE_XGETBV1,
2816 .features[FEAT_6_EAX] =
2817 CPUID_6_EAX_ARAT,
2818 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2819 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2820 MSR_VMX_BASIC_TRUE_CTLS,
2821 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2822 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2823 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2824 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2825 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2826 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2827 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2828 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2829 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2830 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2831 .features[FEAT_VMX_EXIT_CTLS] =
2832 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2833 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2834 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2835 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2836 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2837 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2838 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2839 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2840 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2841 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2842 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2843 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2844 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2845 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2846 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2847 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2848 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2849 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2850 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2851 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2852 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2853 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2854 .features[FEAT_VMX_SECONDARY_CTLS] =
2855 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2856 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2857 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2858 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2859 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2860 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2861 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2862 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2863 .xlevel = 0x80000008,
2864 .model_id = "Intel Core Processor (Skylake)",
2865 .versions = (X86CPUVersionDefinition[]) {
2866 { .version = 1 },
2867 {
2868 .version = 2,
2869 .alias = "Skylake-Client-IBRS",
2870 .props = (PropValue[]) {
2871 { "spec-ctrl", "on" },
2872 { "model-id",
2873 "Intel Core Processor (Skylake, IBRS)" },
2874 { /* end of list */ }
2875 }
2876 },
2877 {
2878 .version = 3,
2879 .alias = "Skylake-Client-noTSX-IBRS",
2880 .props = (PropValue[]) {
2881 { "hle", "off" },
2882 { "rtm", "off" },
2883 { "model-id",
2884 "Intel Core Processor (Skylake, IBRS, no TSX)" },
2885 { /* end of list */ }
2886 }
2887 },
2888 { /* end of list */ }
2889 }
2890 },
2891 {
2892 .name = "Skylake-Server",
2893 .level = 0xd,
2894 .vendor = CPUID_VENDOR_INTEL,
2895 .family = 6,
2896 .model = 85,
2897 .stepping = 4,
2898 .features[FEAT_1_EDX] =
2899 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2903 CPUID_DE | CPUID_FP87,
2904 .features[FEAT_1_ECX] =
2905 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2906 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2907 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2908 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2909 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2910 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2911 .features[FEAT_8000_0001_EDX] =
2912 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2913 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2914 .features[FEAT_8000_0001_ECX] =
2915 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2916 .features[FEAT_7_0_EBX] =
2917 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2918 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2919 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2920 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2921 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2922 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2923 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2924 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2925 .features[FEAT_7_0_ECX] =
2926 CPUID_7_0_ECX_PKU,
2927 /* Missing: XSAVES (not supported by some Linux versions,
2928 * including v4.1 to v4.12).
2929 * KVM doesn't yet expose any XSAVES state save component,
2930 * and the only one defined in Skylake (processor tracing)
2931 * probably will block migration anyway.
2932 */
2933 .features[FEAT_XSAVE] =
2934 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2935 CPUID_XSAVE_XGETBV1,
2936 .features[FEAT_6_EAX] =
2937 CPUID_6_EAX_ARAT,
2938 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2939 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2940 MSR_VMX_BASIC_TRUE_CTLS,
2941 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2942 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2943 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2944 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2945 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2946 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2947 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2948 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2949 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2950 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2951 .features[FEAT_VMX_EXIT_CTLS] =
2952 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2953 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2954 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2955 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2956 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2957 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2958 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2959 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2960 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2961 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2962 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2963 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2964 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2965 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2966 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2967 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2968 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2969 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2970 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2971 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2972 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2973 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2974 .features[FEAT_VMX_SECONDARY_CTLS] =
2975 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2976 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2977 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2978 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2979 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2980 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2981 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2982 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2983 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2984 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2985 .xlevel = 0x80000008,
2986 .model_id = "Intel Xeon Processor (Skylake)",
2987 .versions = (X86CPUVersionDefinition[]) {
2988 { .version = 1 },
2989 {
2990 .version = 2,
2991 .alias = "Skylake-Server-IBRS",
2992 .props = (PropValue[]) {
2993 /* clflushopt was not added to Skylake-Server-IBRS */
2994 /* TODO: add -v3 including clflushopt */
2995 { "clflushopt", "off" },
2996 { "spec-ctrl", "on" },
2997 { "model-id",
2998 "Intel Xeon Processor (Skylake, IBRS)" },
2999 { /* end of list */ }
3000 }
3001 },
3002 {
3003 .version = 3,
3004 .alias = "Skylake-Server-noTSX-IBRS",
3005 .props = (PropValue[]) {
3006 { "hle", "off" },
3007 { "rtm", "off" },
3008 { "model-id",
3009 "Intel Xeon Processor (Skylake, IBRS, no TSX)" },
3010 { /* end of list */ }
3011 }
3012 },
3013 { /* end of list */ }
3014 }
3015 },
3016 {
3017 .name = "Cascadelake-Server",
3018 .level = 0xd,
3019 .vendor = CPUID_VENDOR_INTEL,
3020 .family = 6,
3021 .model = 85,
3022 .stepping = 6,
3023 .features[FEAT_1_EDX] =
3024 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3025 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3026 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3027 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3028 CPUID_DE | CPUID_FP87,
3029 .features[FEAT_1_ECX] =
3030 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3031 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3032 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3033 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3034 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3035 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3036 .features[FEAT_8000_0001_EDX] =
3037 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3038 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3039 .features[FEAT_8000_0001_ECX] =
3040 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3041 .features[FEAT_7_0_EBX] =
3042 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3043 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3044 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3045 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3046 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3047 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3048 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3049 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3050 .features[FEAT_7_0_ECX] =
3051 CPUID_7_0_ECX_PKU |
3052 CPUID_7_0_ECX_AVX512VNNI,
3053 .features[FEAT_7_0_EDX] =
3054 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3055 /* Missing: XSAVES (not supported by some Linux versions,
3056 * including v4.1 to v4.12).
3057 * KVM doesn't yet expose any XSAVES state save component,
3058 * and the only one defined in Skylake (processor tracing)
3059 * probably will block migration anyway.
3060 */
3061 .features[FEAT_XSAVE] =
3062 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3063 CPUID_XSAVE_XGETBV1,
3064 .features[FEAT_6_EAX] =
3065 CPUID_6_EAX_ARAT,
3066 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3067 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3068 MSR_VMX_BASIC_TRUE_CTLS,
3069 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3070 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3071 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3072 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3073 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3074 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3075 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3076 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3077 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3078 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3079 .features[FEAT_VMX_EXIT_CTLS] =
3080 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3081 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3082 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3083 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3084 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3085 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3086 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3087 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3088 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3089 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3090 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3091 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3092 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3093 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3094 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3095 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3096 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3097 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3098 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3099 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3100 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3101 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3102 .features[FEAT_VMX_SECONDARY_CTLS] =
3103 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3104 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3105 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3106 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3107 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3108 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3109 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3110 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3111 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3112 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3113 .xlevel = 0x80000008,
3114 .model_id = "Intel Xeon Processor (Cascadelake)",
3115 .versions = (X86CPUVersionDefinition[]) {
3116 { .version = 1 },
3117 { .version = 2,
3118 .props = (PropValue[]) {
3119 { "arch-capabilities", "on" },
3120 { "rdctl-no", "on" },
3121 { "ibrs-all", "on" },
3122 { "skip-l1dfl-vmentry", "on" },
3123 { "mds-no", "on" },
3124 { /* end of list */ }
3125 },
3126 },
3127 { .version = 3,
3128 .alias = "Cascadelake-Server-noTSX",
3129 .props = (PropValue[]) {
3130 { "hle", "off" },
3131 { "rtm", "off" },
3132 { /* end of list */ }
3133 },
3134 },
3135 { /* end of list */ }
3136 }
3137 },
3138 {
3139 .name = "Cooperlake",
3140 .level = 0xd,
3141 .vendor = CPUID_VENDOR_INTEL,
3142 .family = 6,
3143 .model = 85,
3144 .stepping = 10,
3145 .features[FEAT_1_EDX] =
3146 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3147 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3148 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3149 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3150 CPUID_DE | CPUID_FP87,
3151 .features[FEAT_1_ECX] =
3152 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3153 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3154 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3155 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3156 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3157 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3158 .features[FEAT_8000_0001_EDX] =
3159 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3160 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3161 .features[FEAT_8000_0001_ECX] =
3162 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3163 .features[FEAT_7_0_EBX] =
3164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3165 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3167 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3168 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3169 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3170 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3171 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3172 .features[FEAT_7_0_ECX] =
3173 CPUID_7_0_ECX_PKU |
3174 CPUID_7_0_ECX_AVX512VNNI,
3175 .features[FEAT_7_0_EDX] =
3176 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP |
3177 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES,
3178 .features[FEAT_ARCH_CAPABILITIES] =
3179 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
3180 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO |
3181 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO,
3182 .features[FEAT_7_1_EAX] =
3183 CPUID_7_1_EAX_AVX512_BF16,
3184 /*
3185 * Missing: XSAVES (not supported by some Linux versions,
3186 * including v4.1 to v4.12).
3187 * KVM doesn't yet expose any XSAVES state save component,
3188 * and the only one defined in Skylake (processor tracing)
3189 * probably will block migration anyway.
3190 */
3191 .features[FEAT_XSAVE] =
3192 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3193 CPUID_XSAVE_XGETBV1,
3194 .features[FEAT_6_EAX] =
3195 CPUID_6_EAX_ARAT,
3196 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3197 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3198 MSR_VMX_BASIC_TRUE_CTLS,
3199 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3200 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3201 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3202 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3203 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3204 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3205 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3206 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3207 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3208 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3209 .features[FEAT_VMX_EXIT_CTLS] =
3210 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3211 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3212 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3213 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3214 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3215 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3216 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3217 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3218 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3219 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3220 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3221 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3222 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3223 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3224 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3225 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3226 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3227 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3228 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3229 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3230 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3231 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3232 .features[FEAT_VMX_SECONDARY_CTLS] =
3233 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3234 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3235 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3236 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3237 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3238 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3239 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3240 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3241 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3242 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3243 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3244 .xlevel = 0x80000008,
3245 .model_id = "Intel Xeon Processor (Cooperlake)",
3246 },
3247 {
3248 .name = "Icelake-Client",
3249 .level = 0xd,
3250 .vendor = CPUID_VENDOR_INTEL,
3251 .family = 6,
3252 .model = 126,
3253 .stepping = 0,
3254 .features[FEAT_1_EDX] =
3255 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3256 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3257 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3258 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3259 CPUID_DE | CPUID_FP87,
3260 .features[FEAT_1_ECX] =
3261 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3262 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3263 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3264 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3265 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3266 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3267 .features[FEAT_8000_0001_EDX] =
3268 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
3269 CPUID_EXT2_SYSCALL,
3270 .features[FEAT_8000_0001_ECX] =
3271 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3272 .features[FEAT_8000_0008_EBX] =
3273 CPUID_8000_0008_EBX_WBNOINVD,
3274 .features[FEAT_7_0_EBX] =
3275 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3276 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3277 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3278 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3279 CPUID_7_0_EBX_SMAP,
3280 .features[FEAT_7_0_ECX] =
3281 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3282 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3283 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3284 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3285 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3286 .features[FEAT_7_0_EDX] =
3287 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3288 /* Missing: XSAVES (not supported by some Linux versions,
3289 * including v4.1 to v4.12).
3290 * KVM doesn't yet expose any XSAVES state save component,
3291 * and the only one defined in Skylake (processor tracing)
3292 * probably will block migration anyway.
3293 */
3294 .features[FEAT_XSAVE] =
3295 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3296 CPUID_XSAVE_XGETBV1,
3297 .features[FEAT_6_EAX] =
3298 CPUID_6_EAX_ARAT,
3299 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3300 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3301 MSR_VMX_BASIC_TRUE_CTLS,
3302 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3303 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3304 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3305 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3306 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3307 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3308 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3309 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3310 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3311 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3312 .features[FEAT_VMX_EXIT_CTLS] =
3313 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3314 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3315 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3316 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3317 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3318 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3319 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3320 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3321 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3322 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
3323 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3324 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3325 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3326 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3327 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3328 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3329 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3330 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3331 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3332 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3333 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3334 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3335 .features[FEAT_VMX_SECONDARY_CTLS] =
3336 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3337 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3338 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3339 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3340 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3341 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3342 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3343 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3344 .xlevel = 0x80000008,
3345 .model_id = "Intel Core Processor (Icelake)",
3346 .versions = (X86CPUVersionDefinition[]) {
3347 { .version = 1 },
3348 {
3349 .version = 2,
3350 .alias = "Icelake-Client-noTSX",
3351 .props = (PropValue[]) {
3352 { "hle", "off" },
3353 { "rtm", "off" },
3354 { /* end of list */ }
3355 },
3356 },
3357 { /* end of list */ }
3358 }
3359 },
3360 {
3361 .name = "Icelake-Server",
3362 .level = 0xd,
3363 .vendor = CPUID_VENDOR_INTEL,
3364 .family = 6,
3365 .model = 134,
3366 .stepping = 0,
3367 .features[FEAT_1_EDX] =
3368 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3369 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3370 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3371 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3372 CPUID_DE | CPUID_FP87,
3373 .features[FEAT_1_ECX] =
3374 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3375 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3376 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3377 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3378 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3379 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3380 .features[FEAT_8000_0001_EDX] =
3381 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3382 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3383 .features[FEAT_8000_0001_ECX] =
3384 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3385 .features[FEAT_8000_0008_EBX] =
3386 CPUID_8000_0008_EBX_WBNOINVD,
3387 .features[FEAT_7_0_EBX] =
3388 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3389 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3390 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3391 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3392 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3393 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3394 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3395 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3396 .features[FEAT_7_0_ECX] =
3397 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3398 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3399 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3400 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3401 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
3402 .features[FEAT_7_0_EDX] =
3403 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3404 /* Missing: XSAVES (not supported by some Linux versions,
3405 * including v4.1 to v4.12).
3406 * KVM doesn't yet expose any XSAVES state save component,
3407 * and the only one defined in Skylake (processor tracing)
3408 * probably will block migration anyway.
3409 */
3410 .features[FEAT_XSAVE] =
3411 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3412 CPUID_XSAVE_XGETBV1,
3413 .features[FEAT_6_EAX] =
3414 CPUID_6_EAX_ARAT,
3415 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3416 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3417 MSR_VMX_BASIC_TRUE_CTLS,
3418 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3419 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3420 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3421 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3422 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3423 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3424 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3425 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3426 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3427 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3428 .features[FEAT_VMX_EXIT_CTLS] =
3429 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3430 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3431 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3432 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3433 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3434 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3435 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3436 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3437 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3438 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3439 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3440 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3441 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3442 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3443 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3444 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3445 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3446 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3447 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3448 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3449 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3450 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3451 .features[FEAT_VMX_SECONDARY_CTLS] =
3452 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3453 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3454 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3455 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3456 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3457 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3458 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3459 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3460 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
3461 .xlevel = 0x80000008,
3462 .model_id = "Intel Xeon Processor (Icelake)",
3463 .versions = (X86CPUVersionDefinition[]) {
3464 { .version = 1 },
3465 {
3466 .version = 2,
3467 .alias = "Icelake-Server-noTSX",
3468 .props = (PropValue[]) {
3469 { "hle", "off" },
3470 { "rtm", "off" },
3471 { /* end of list */ }
3472 },
3473 },
3474 { /* end of list */ }
3475 }
3476 },
3477 {
3478 .name = "Denverton",
3479 .level = 21,
3480 .vendor = CPUID_VENDOR_INTEL,
3481 .family = 6,
3482 .model = 95,
3483 .stepping = 1,
3484 .features[FEAT_1_EDX] =
3485 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
3486 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
3487 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3488 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
3489 CPUID_SSE | CPUID_SSE2,
3490 .features[FEAT_1_ECX] =
3491 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3492 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 |
3493 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3494 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER |
3495 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND,
3496 .features[FEAT_8000_0001_EDX] =
3497 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
3498 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
3499 .features[FEAT_8000_0001_ECX] =
3500 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3501 .features[FEAT_7_0_EBX] =
3502 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS |
3503 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP |
3504 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI,
3505 .features[FEAT_7_0_EDX] =
3506 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
3507 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3508 /*
3509 * Missing: XSAVES (not supported by some Linux versions,
3510 * including v4.1 to v4.12).
3511 * KVM doesn't yet expose any XSAVES state save component,
3512 * and the only one defined in Skylake (processor tracing)
3513 * probably will block migration anyway.
3514 */
3515 .features[FEAT_XSAVE] =
3516 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1,
3517 .features[FEAT_6_EAX] =
3518 CPUID_6_EAX_ARAT,
3519 .features[FEAT_ARCH_CAPABILITIES] =
3520 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY,
3521 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3522 MSR_VMX_BASIC_TRUE_CTLS,
3523 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3524 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3525 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3526 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3527 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3528 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3529 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3530 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3531 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3532 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3533 .features[FEAT_VMX_EXIT_CTLS] =
3534 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3535 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3536 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3537 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3538 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3539 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3540 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3541 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3542 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3543 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3544 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3545 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3546 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3547 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3548 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3549 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3550 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3551 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3552 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3553 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3554 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3555 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3556 .features[FEAT_VMX_SECONDARY_CTLS] =
3557 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3558 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3559 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3560 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3561 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3562 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3563 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3564 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3565 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3566 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3567 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3568 .xlevel = 0x80000008,
3569 .model_id = "Intel Atom Processor (Denverton)",
3570 .versions = (X86CPUVersionDefinition[]) {
3571 { .version = 1 },
3572 {
3573 .version = 2,
3574 .props = (PropValue[]) {
3575 { "monitor", "off" },
3576 { "mpx", "off" },
3577 { /* end of list */ },
3578 },
3579 },
3580 { /* end of list */ },
3581 },
3582 },
3583 {
3584 .name = "Snowridge",
3585 .level = 27,
3586 .vendor = CPUID_VENDOR_INTEL,
3587 .family = 6,
3588 .model = 134,
3589 .stepping = 1,
3590 .features[FEAT_1_EDX] =
3591 /* missing: CPUID_PN CPUID_IA64 */
3592 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
3593 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
3594 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
3595 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
3596 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3597 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
3598 CPUID_MMX |
3599 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
3600 .features[FEAT_1_ECX] =
3601 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3602 CPUID_EXT_SSSE3 |
3603 CPUID_EXT_CX16 |
3604 CPUID_EXT_SSE41 |
3605 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3606 CPUID_EXT_POPCNT |
3607 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
3608 CPUID_EXT_RDRAND,
3609 .features[FEAT_8000_0001_EDX] =
3610 CPUID_EXT2_SYSCALL |
3611 CPUID_EXT2_NX |
3612 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3613 CPUID_EXT2_LM,
3614 .features[FEAT_8000_0001_ECX] =
3615 CPUID_EXT3_LAHF_LM |
3616 CPUID_EXT3_3DNOWPREFETCH,
3617 .features[FEAT_7_0_EBX] =
3618 CPUID_7_0_EBX_FSGSBASE |
3619 CPUID_7_0_EBX_SMEP |
3620 CPUID_7_0_EBX_ERMS |
3621 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
3622 CPUID_7_0_EBX_RDSEED |
3623 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3624 CPUID_7_0_EBX_CLWB |
3625 CPUID_7_0_EBX_SHA_NI,
3626 .features[FEAT_7_0_ECX] =
3627 CPUID_7_0_ECX_UMIP |
3628 /* missing bit 5 */
3629 CPUID_7_0_ECX_GFNI |
3630 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
3631 CPUID_7_0_ECX_MOVDIR64B,
3632 .features[FEAT_7_0_EDX] =
3633 CPUID_7_0_EDX_SPEC_CTRL |
3634 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
3635 CPUID_7_0_EDX_CORE_CAPABILITY,
3636 .features[FEAT_CORE_CAPABILITY] =
3637 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
3638 /*
3639 * Missing: XSAVES (not supported by some Linux versions,
3640 * including v4.1 to v4.12).
3641 * KVM doesn't yet expose any XSAVES state save component,
3642 * and the only one defined in Skylake (processor tracing)
3643 * probably will block migration anyway.
3644 */
3645 .features[FEAT_XSAVE] =
3646 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3647 CPUID_XSAVE_XGETBV1,
3648 .features[FEAT_6_EAX] =
3649 CPUID_6_EAX_ARAT,
3650 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3651 MSR_VMX_BASIC_TRUE_CTLS,
3652 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3653 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3654 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3655 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3656 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3657 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3658 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3659 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3660 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3661 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3662 .features[FEAT_VMX_EXIT_CTLS] =
3663 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3664 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3665 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3666 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3667 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3668 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3669 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3670 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3671 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3672 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3673 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3674 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3675 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3676 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3677 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3678 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3679 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3680 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3681 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3682 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3683 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3684 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3685 .features[FEAT_VMX_SECONDARY_CTLS] =
3686 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3687 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3688 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3689 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3690 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3691 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3692 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3693 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3694 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3695 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3696 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3697 .xlevel = 0x80000008,
3698 .model_id = "Intel Atom Processor (SnowRidge)",
3699 .versions = (X86CPUVersionDefinition[]) {
3700 { .version = 1 },
3701 {
3702 .version = 2,
3703 .props = (PropValue[]) {
3704 { "mpx", "off" },
3705 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
3706 { /* end of list */ },
3707 },
3708 },
3709 { /* end of list */ },
3710 },
3711 },
3712 {
3713 .name = "KnightsMill",
3714 .level = 0xd,
3715 .vendor = CPUID_VENDOR_INTEL,
3716 .family = 6,
3717 .model = 133,
3718 .stepping = 0,
3719 .features[FEAT_1_EDX] =
3720 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
3721 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
3722 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
3723 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
3724 CPUID_PSE | CPUID_DE | CPUID_FP87,
3725 .features[FEAT_1_ECX] =
3726 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3727 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3728 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3729 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3730 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3731 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3732 .features[FEAT_8000_0001_EDX] =
3733 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3734 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3735 .features[FEAT_8000_0001_ECX] =
3736 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3737 .features[FEAT_7_0_EBX] =
3738 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3739 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
3740 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
3741 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
3742 CPUID_7_0_EBX_AVX512ER,
3743 .features[FEAT_7_0_ECX] =
3744 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3745 .features[FEAT_7_0_EDX] =
3746 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
3747 .features[FEAT_XSAVE] =
3748 CPUID_XSAVE_XSAVEOPT,
3749 .features[FEAT_6_EAX] =
3750 CPUID_6_EAX_ARAT,
3751 .xlevel = 0x80000008,
3752 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
3753 },
3754 {
3755 .name = "Opteron_G1",
3756 .level = 5,
3757 .vendor = CPUID_VENDOR_AMD,
3758 .family = 15,
3759 .model = 6,
3760 .stepping = 1,
3761 .features[FEAT_1_EDX] =
3762 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3763 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3764 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3765 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3766 CPUID_DE | CPUID_FP87,
3767 .features[FEAT_1_ECX] =
3768 CPUID_EXT_SSE3,
3769 .features[FEAT_8000_0001_EDX] =
3770 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3771 .xlevel = 0x80000008,
3772 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
3773 },
3774 {
3775 .name = "Opteron_G2",
3776 .level = 5,
3777 .vendor = CPUID_VENDOR_AMD,
3778 .family = 15,
3779 .model = 6,
3780 .stepping = 1,
3781 .features[FEAT_1_EDX] =
3782 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3783 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3784 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3785 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3786 CPUID_DE | CPUID_FP87,
3787 .features[FEAT_1_ECX] =
3788 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
3789 .features[FEAT_8000_0001_EDX] =
3790 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3791 .features[FEAT_8000_0001_ECX] =
3792 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3793 .xlevel = 0x80000008,
3794 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
3795 },
3796 {
3797 .name = "Opteron_G3",
3798 .level = 5,
3799 .vendor = CPUID_VENDOR_AMD,
3800 .family = 16,
3801 .model = 2,
3802 .stepping = 3,
3803 .features[FEAT_1_EDX] =
3804 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3805 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3806 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3807 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3808 CPUID_DE | CPUID_FP87,
3809 .features[FEAT_1_ECX] =
3810 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
3811 CPUID_EXT_SSE3,
3812 .features[FEAT_8000_0001_EDX] =
3813 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
3814 CPUID_EXT2_RDTSCP,
3815 .features[FEAT_8000_0001_ECX] =
3816 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
3817 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3818 .xlevel = 0x80000008,
3819 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
3820 },
3821 {
3822 .name = "Opteron_G4",
3823 .level = 0xd,
3824 .vendor = CPUID_VENDOR_AMD,
3825 .family = 21,
3826 .model = 1,
3827 .stepping = 2,
3828 .features[FEAT_1_EDX] =
3829 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3830 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3831 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3832 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3833 CPUID_DE | CPUID_FP87,
3834 .features[FEAT_1_ECX] =
3835 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3836 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3837 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
3838 CPUID_EXT_SSE3,
3839 .features[FEAT_8000_0001_EDX] =
3840 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3841 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3842 .features[FEAT_8000_0001_ECX] =
3843 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3844 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3845 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3846 CPUID_EXT3_LAHF_LM,
3847 .features[FEAT_SVM] =
3848 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3849 /* no xsaveopt! */
3850 .xlevel = 0x8000001A,
3851 .model_id = "AMD Opteron 62xx class CPU",
3852 },
3853 {
3854 .name = "Opteron_G5",
3855 .level = 0xd,
3856 .vendor = CPUID_VENDOR_AMD,
3857 .family = 21,
3858 .model = 2,
3859 .stepping = 0,
3860 .features[FEAT_1_EDX] =
3861 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3862 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3863 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3864 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3865 CPUID_DE | CPUID_FP87,
3866 .features[FEAT_1_ECX] =
3867 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
3868 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
3869 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
3870 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3871 .features[FEAT_8000_0001_EDX] =
3872 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3873 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3874 .features[FEAT_8000_0001_ECX] =
3875 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3876 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3877 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3878 CPUID_EXT3_LAHF_LM,
3879 .features[FEAT_SVM] =
3880 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3881 /* no xsaveopt! */
3882 .xlevel = 0x8000001A,
3883 .model_id = "AMD Opteron 63xx class CPU",
3884 },
3885 {
3886 .name = "EPYC",
3887 .level = 0xd,
3888 .vendor = CPUID_VENDOR_AMD,
3889 .family = 23,
3890 .model = 1,
3891 .stepping = 2,
3892 .features[FEAT_1_EDX] =
3893 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3894 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3895 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3896 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3897 CPUID_VME | CPUID_FP87,
3898 .features[FEAT_1_ECX] =
3899 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3900 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
3901 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3902 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3903 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3904 .features[FEAT_8000_0001_EDX] =
3905 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3906 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3907 CPUID_EXT2_SYSCALL,
3908 .features[FEAT_8000_0001_ECX] =
3909 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3910 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3911 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3912 CPUID_EXT3_TOPOEXT,
3913 .features[FEAT_7_0_EBX] =
3914 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3915 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3916 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3917 CPUID_7_0_EBX_SHA_NI,
3918 .features[FEAT_XSAVE] =
3919 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3920 CPUID_XSAVE_XGETBV1,
3921 .features[FEAT_6_EAX] =
3922 CPUID_6_EAX_ARAT,
3923 .features[FEAT_SVM] =
3924 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3925 .xlevel = 0x8000001E,
3926 .model_id = "AMD EPYC Processor",
3927 .cache_info = &epyc_cache_info,
3928 .use_epyc_apic_id_encoding = 1,
3929 .versions = (X86CPUVersionDefinition[]) {
3930 { .version = 1 },
3931 {
3932 .version = 2,
3933 .alias = "EPYC-IBPB",
3934 .props = (PropValue[]) {
3935 { "ibpb", "on" },
3936 { "model-id",
3937 "AMD EPYC Processor (with IBPB)" },
3938 { /* end of list */ }
3939 }
3940 },
3941 {
3942 .version = 3,
3943 .props = (PropValue[]) {
3944 { "ibpb", "on" },
3945 { "perfctr-core", "on" },
3946 { "clzero", "on" },
3947 { "xsaveerptr", "on" },
3948 { "xsaves", "on" },
3949 { "model-id",
3950 "AMD EPYC Processor" },
3951 { /* end of list */ }
3952 }
3953 },
3954 { /* end of list */ }
3955 }
3956 },
3957 {
3958 .name = "Dhyana",
3959 .level = 0xd,
3960 .vendor = CPUID_VENDOR_HYGON,
3961 .family = 24,
3962 .model = 0,
3963 .stepping = 1,
3964 .features[FEAT_1_EDX] =
3965 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3966 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3967 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3968 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3969 CPUID_VME | CPUID_FP87,
3970 .features[FEAT_1_ECX] =
3971 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3972 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3973 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3974 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3975 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3976 .features[FEAT_8000_0001_EDX] =
3977 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3978 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3979 CPUID_EXT2_SYSCALL,
3980 .features[FEAT_8000_0001_ECX] =
3981 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3982 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3983 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3984 CPUID_EXT3_TOPOEXT,
3985 .features[FEAT_8000_0008_EBX] =
3986 CPUID_8000_0008_EBX_IBPB,
3987 .features[FEAT_7_0_EBX] =
3988 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3989 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3990 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
3991 /*
3992 * Missing: XSAVES (not supported by some Linux versions,
3993 * including v4.1 to v4.12).
3994 * KVM doesn't yet expose any XSAVES state save component.
3995 */
3996 .features[FEAT_XSAVE] =
3997 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3998 CPUID_XSAVE_XGETBV1,
3999 .features[FEAT_6_EAX] =
4000 CPUID_6_EAX_ARAT,
4001 .features[FEAT_SVM] =
4002 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
4003 .xlevel = 0x8000001E,
4004 .model_id = "Hygon Dhyana Processor",
4005 .cache_info = &epyc_cache_info,
4006 },
4007 {
4008 .name = "EPYC-Rome",
4009 .level = 0xd,
4010 .vendor = CPUID_VENDOR_AMD,
4011 .family = 23,
4012 .model = 49,
4013 .stepping = 0,
4014 .features[FEAT_1_EDX] =
4015 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
4016 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
4017 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
4018 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
4019 CPUID_VME | CPUID_FP87,
4020 .features[FEAT_1_ECX] =
4021 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
4022 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
4023 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
4024 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
4025 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
4026 .features[FEAT_8000_0001_EDX] =
4027 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
4028 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
4029 CPUID_EXT2_SYSCALL,
4030 .features[FEAT_8000_0001_ECX] =
4031 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
4032 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
4033 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
4034 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
4035 .features[FEAT_8000_0008_EBX] =
4036 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
4037 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
4038 CPUID_8000_0008_EBX_STIBP,
4039 .features[FEAT_7_0_EBX] =
4040 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
4041 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
4042 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
4043 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB,
4044 .features[FEAT_7_0_ECX] =
4045 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID,
4046 .features[FEAT_XSAVE] =
4047 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
4048 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
4049 .features[FEAT_6_EAX] =
4050 CPUID_6_EAX_ARAT,
4051 .features[FEAT_SVM] =
4052 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
4053 .xlevel = 0x8000001E,
4054 .model_id = "AMD EPYC-Rome Processor",
4055 .cache_info = &epyc_rome_cache_info,
4056 .use_epyc_apic_id_encoding = 1,
4057 },
4058 };
4059
4060 /* KVM-specific features that are automatically added/removed
4061 * from all CPU models when KVM is enabled.
4062 */
4063 static PropValue kvm_default_props[] = {
4064 { "kvmclock", "on" },
4065 { "kvm-nopiodelay", "on" },
4066 { "kvm-asyncpf", "on" },
4067 { "kvm-steal-time", "on" },
4068 { "kvm-pv-eoi", "on" },
4069 { "kvmclock-stable-bit", "on" },
4070 { "x2apic", "on" },
4071 { "acpi", "off" },
4072 { "monitor", "off" },
4073 { "svm", "off" },
4074 { NULL, NULL },
4075 };
4076
4077 /* TCG-specific defaults that override all CPU models when using TCG
4078 */
4079 static PropValue tcg_default_props[] = {
4080 { "vme", "off" },
4081 { NULL, NULL },
4082 };
4083
4084
4085 /*
4086 * We resolve CPU model aliases using -v1 when using "-machine
4087 * none", but this is just for compatibility while libvirt isn't
4088 * adapted to resolve CPU model versions before creating VMs.
4089 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi.
4090 */
4091 X86CPUVersion default_cpu_version = 1;
4092
4093 void x86_cpu_set_default_version(X86CPUVersion version)
4094 {
4095 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
4096 assert(version != CPU_VERSION_AUTO);
4097 default_cpu_version = version;
4098 }
4099
4100 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
4101 {
4102 int v = 0;
4103 const X86CPUVersionDefinition *vdef =
4104 x86_cpu_def_get_versions(model->cpudef);
4105 while (vdef->version) {
4106 v = vdef->version;
4107 vdef++;
4108 }
4109 return v;
4110 }
4111
4112 /* Return the actual version being used for a specific CPU model */
4113 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
4114 {
4115 X86CPUVersion v = model->version;
4116 if (v == CPU_VERSION_AUTO) {
4117 v = default_cpu_version;
4118 }
4119 if (v == CPU_VERSION_LATEST) {
4120 return x86_cpu_model_last_version(model);
4121 }
4122 return v;
4123 }
4124
4125 void x86_cpu_change_kvm_default(const char *prop, const char *value)
4126 {
4127 PropValue *pv;
4128 for (pv = kvm_default_props; pv->prop; pv++) {
4129 if (!strcmp(pv->prop, prop)) {
4130 pv->value = value;
4131 break;
4132 }
4133 }
4134
4135 /* It is valid to call this function only for properties that
4136 * are already present in the kvm_default_props table.
4137 */
4138 assert(pv->prop);
4139 }
4140
4141 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4142 bool migratable_only);
4143
4144 static bool lmce_supported(void)
4145 {
4146 uint64_t mce_cap = 0;
4147
4148 #ifdef CONFIG_KVM
4149 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
4150 return false;
4151 }
4152 #endif
4153
4154 return !!(mce_cap & MCG_LMCE_P);
4155 }
4156
4157 #define CPUID_MODEL_ID_SZ 48
4158
4159 /**
4160 * cpu_x86_fill_model_id:
4161 * Get CPUID model ID string from host CPU.
4162 *
4163 * @str should have at least CPUID_MODEL_ID_SZ bytes
4164 *
4165 * The function does NOT add a null terminator to the string
4166 * automatically.
4167 */
4168 static int cpu_x86_fill_model_id(char *str)
4169 {
4170 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
4171 int i;
4172
4173 for (i = 0; i < 3; i++) {
4174 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
4175 memcpy(str + i * 16 + 0, &eax, 4);
4176 memcpy(str + i * 16 + 4, &ebx, 4);
4177 memcpy(str + i * 16 + 8, &ecx, 4);
4178 memcpy(str + i * 16 + 12, &edx, 4);
4179 }
4180 return 0;
4181 }
4182
4183 static Property max_x86_cpu_properties[] = {
4184 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
4185 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
4186 DEFINE_PROP_END_OF_LIST()
4187 };
4188
4189 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
4190 {
4191 DeviceClass *dc = DEVICE_CLASS(oc);
4192 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4193
4194 xcc->ordering = 9;
4195
4196 xcc->model_description =
4197 "Enables all features supported by the accelerator in the current host";
4198
4199 device_class_set_props(dc, max_x86_cpu_properties);
4200 }
4201
4202 static void max_x86_cpu_initfn(Object *obj)
4203 {
4204 X86CPU *cpu = X86_CPU(obj);
4205 CPUX86State *env = &cpu->env;
4206 KVMState *s = kvm_state;
4207
4208 /* We can't fill the features array here because we don't know yet if
4209 * "migratable" is true or false.
4210 */
4211 cpu->max_features = true;
4212
4213 if (accel_uses_host_cpuid()) {
4214 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
4215 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
4216 int family, model, stepping;
4217
4218 host_vendor_fms(vendor, &family, &model, &stepping);
4219 cpu_x86_fill_model_id(model_id);
4220
4221 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
4222 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
4223 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
4224 object_property_set_int(OBJECT(cpu), stepping, "stepping",
4225 &error_abort);
4226 object_property_set_str(OBJECT(cpu), model_id, "model-id",
4227 &error_abort);
4228
4229 if (kvm_enabled()) {
4230 env->cpuid_min_level =
4231 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
4232 env->cpuid_min_xlevel =
4233 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
4234 env->cpuid_min_xlevel2 =
4235 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
4236 } else {
4237 env->cpuid_min_level =
4238 hvf_get_supported_cpuid(0x0, 0, R_EAX);
4239 env->cpuid_min_xlevel =
4240 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
4241 env->cpuid_min_xlevel2 =
4242 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
4243 }
4244
4245 if (lmce_supported()) {
4246 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
4247 }
4248 } else {
4249 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
4250 "vendor", &error_abort);
4251 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
4252 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
4253 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
4254 object_property_set_str(OBJECT(cpu),
4255 "QEMU TCG CPU version " QEMU_HW_VERSION,
4256 "model-id", &error_abort);
4257 }
4258
4259 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
4260 }
4261
4262 static const TypeInfo max_x86_cpu_type_info = {
4263 .name = X86_CPU_TYPE_NAME("max"),
4264 .parent = TYPE_X86_CPU,
4265 .instance_init = max_x86_cpu_initfn,
4266 .class_init = max_x86_cpu_class_init,
4267 };
4268
4269 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4270 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
4271 {
4272 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4273
4274 xcc->host_cpuid_required = true;
4275 xcc->ordering = 8;
4276
4277 #if defined(CONFIG_KVM)
4278 xcc->model_description =
4279 "KVM processor with all supported host features ";
4280 #elif defined(CONFIG_HVF)
4281 xcc->model_description =
4282 "HVF processor with all supported host features ";
4283 #endif
4284 }
4285
4286 static const TypeInfo host_x86_cpu_type_info = {
4287 .name = X86_CPU_TYPE_NAME("host"),
4288 .parent = X86_CPU_TYPE_NAME("max"),
4289 .class_init = host_x86_cpu_class_init,
4290 };
4291
4292 #endif
4293
4294 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
4295 {
4296 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
4297
4298 switch (f->type) {
4299 case CPUID_FEATURE_WORD:
4300 {
4301 const char *reg = get_register_name_32(f->cpuid.reg);
4302 assert(reg);
4303 return g_strdup_printf("CPUID.%02XH:%s",
4304 f->cpuid.eax, reg);
4305 }
4306 case MSR_FEATURE_WORD:
4307 return g_strdup_printf("MSR(%02XH)",
4308 f->msr.index);
4309 }
4310
4311 return NULL;
4312 }
4313
4314 static bool x86_cpu_have_filtered_features(X86CPU *cpu)
4315 {
4316 FeatureWord w;
4317
4318 for (w = 0; w < FEATURE_WORDS; w++) {
4319 if (cpu->filtered_features[w]) {
4320 return true;
4321 }
4322 }
4323
4324 return false;
4325 }
4326
4327 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
4328 const char *verbose_prefix)
4329 {
4330 CPUX86State *env = &cpu->env;
4331 FeatureWordInfo *f = &feature_word_info[w];
4332 int i;
4333
4334 if (!cpu->force_features) {
4335 env->features[w] &= ~mask;
4336 }
4337 cpu->filtered_features[w] |= mask;
4338
4339 if (!verbose_prefix) {
4340 return;
4341 }
4342
4343 for (i = 0; i < 64; ++i) {
4344 if ((1ULL << i) & mask) {
4345 g_autofree char *feat_word_str = feature_word_description(f, i);
4346 warn_report("%s: %s%s%s [bit %d]",
4347 verbose_prefix,
4348 feat_word_str,
4349 f->feat_names[i] ? "." : "",
4350 f->feat_names[i] ? f->feat_names[i] : "", i);
4351 }
4352 }
4353 }
4354
4355 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
4356 const char *name, void *opaque,
4357 Error **errp)
4358 {
4359 X86CPU *cpu = X86_CPU(obj);
4360 CPUX86State *env = &cpu->env;
4361 int64_t value;
4362
4363 value = (env->cpuid_version >> 8) & 0xf;
4364 if (value == 0xf) {
4365 value += (env->cpuid_version >> 20) & 0xff;
4366 }
4367 visit_type_int(v, name, &value, errp);
4368 }
4369
4370 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
4371 const char *name, void *opaque,
4372 Error **errp)
4373 {
4374 X86CPU *cpu = X86_CPU(obj);
4375 CPUX86State *env = &cpu->env;
4376 const int64_t min = 0;
4377 const int64_t max = 0xff + 0xf;
4378 Error *local_err = NULL;
4379 int64_t value;
4380
4381 visit_type_int(v, name, &value, &local_err);
4382 if (local_err) {
4383 error_propagate(errp, local_err);
4384 return;
4385 }
4386 if (value < min || value > max) {
4387 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4388 name ? name : "null", value, min, max);
4389 return;
4390 }
4391
4392 env->cpuid_version &= ~0xff00f00;
4393 if (value > 0x0f) {
4394 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
4395 } else {
4396 env->cpuid_version |= value << 8;
4397 }
4398 }
4399
4400 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
4401 const char *name, void *opaque,
4402 Error **errp)
4403 {
4404 X86CPU *cpu = X86_CPU(obj);
4405 CPUX86State *env = &cpu->env;
4406 int64_t value;
4407
4408 value = (env->cpuid_version >> 4) & 0xf;
4409 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
4410 visit_type_int(v, name, &value, errp);
4411 }
4412
4413 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
4414 const char *name, void *opaque,
4415 Error **errp)
4416 {
4417 X86CPU *cpu = X86_CPU(obj);
4418 CPUX86State *env = &cpu->env;
4419 const int64_t min = 0;
4420 const int64_t max = 0xff;
4421 Error *local_err = NULL;
4422 int64_t value;
4423
4424 visit_type_int(v, name, &value, &local_err);
4425 if (local_err) {
4426 error_propagate(errp, local_err);
4427 return;
4428 }
4429 if (value < min || value > max) {
4430 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4431 name ? name : "null", value, min, max);
4432 return;
4433 }
4434
4435 env->cpuid_version &= ~0xf00f0;
4436 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
4437 }
4438
4439 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
4440 const char *name, void *opaque,
4441 Error **errp)
4442 {
4443 X86CPU *cpu = X86_CPU(obj);
4444 CPUX86State *env = &cpu->env;
4445 int64_t value;
4446
4447 value = env->cpuid_version & 0xf;
4448 visit_type_int(v, name, &value, errp);
4449 }
4450
4451 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
4452 const char *name, void *opaque,
4453 Error **errp)
4454 {
4455 X86CPU *cpu = X86_CPU(obj);
4456 CPUX86State *env = &cpu->env;
4457 const int64_t min = 0;
4458 const int64_t max = 0xf;
4459 Error *local_err = NULL;
4460 int64_t value;
4461
4462 visit_type_int(v, name, &value, &local_err);
4463 if (local_err) {
4464 error_propagate(errp, local_err);
4465 return;
4466 }
4467 if (value < min || value > max) {
4468 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4469 name ? name : "null", value, min, max);
4470 return;
4471 }
4472
4473 env->cpuid_version &= ~0xf;
4474 env->cpuid_version |= value & 0xf;
4475 }
4476
4477 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
4478 {
4479 X86CPU *cpu = X86_CPU(obj);
4480 CPUX86State *env = &cpu->env;
4481 char *value;
4482
4483 value = g_malloc(CPUID_VENDOR_SZ + 1);
4484 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
4485 env->cpuid_vendor3);
4486 return value;
4487 }
4488
4489 static void x86_cpuid_set_vendor(Object *obj, const char *value,
4490 Error **errp)
4491 {
4492 X86CPU *cpu = X86_CPU(obj);
4493 CPUX86State *env = &cpu->env;
4494 int i;
4495
4496 if (strlen(value) != CPUID_VENDOR_SZ) {
4497 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
4498 return;
4499 }
4500
4501 env->cpuid_vendor1 = 0;
4502 env->cpuid_vendor2 = 0;
4503 env->cpuid_vendor3 = 0;
4504 for (i = 0; i < 4; i++) {
4505 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
4506 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
4507 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
4508 }
4509 }
4510
4511 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
4512 {
4513 X86CPU *cpu = X86_CPU(obj);
4514 CPUX86State *env = &cpu->env;
4515 char *value;
4516 int i;
4517
4518 value = g_malloc(48 + 1);
4519 for (i = 0; i < 48; i++) {
4520 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
4521 }
4522 value[48] = '\0';
4523 return value;
4524 }
4525
4526 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
4527 Error **errp)
4528 {
4529 X86CPU *cpu = X86_CPU(obj);
4530 CPUX86State *env = &cpu->env;
4531 int c, len, i;
4532
4533 if (model_id == NULL) {
4534 model_id = "";
4535 }
4536 len = strlen(model_id);
4537 memset(env->cpuid_model, 0, 48);
4538 for (i = 0; i < 48; i++) {
4539 if (i >= len) {
4540 c = '\0';
4541 } else {
4542 c = (uint8_t)model_id[i];
4543 }
4544 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
4545 }
4546 }
4547
4548 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
4549 void *opaque, Error **errp)
4550 {
4551 X86CPU *cpu = X86_CPU(obj);
4552 int64_t value;
4553
4554 value = cpu->env.tsc_khz * 1000;
4555 visit_type_int(v, name, &value, errp);
4556 }
4557
4558 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
4559 void *opaque, Error **errp)
4560 {
4561 X86CPU *cpu = X86_CPU(obj);
4562 const int64_t min = 0;
4563 const int64_t max = INT64_MAX;
4564 Error *local_err = NULL;
4565 int64_t value;
4566
4567 visit_type_int(v, name, &value, &local_err);
4568 if (local_err) {
4569 error_propagate(errp, local_err);
4570 return;
4571 }
4572 if (value < min || value > max) {
4573 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4574 name ? name : "null", value, min, max);
4575 return;
4576 }
4577
4578 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
4579 }
4580
4581 /* Generic getter for "feature-words" and "filtered-features" properties */
4582 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
4583 const char *name, void *opaque,
4584 Error **errp)
4585 {
4586 uint64_t *array = (uint64_t *)opaque;
4587 FeatureWord w;
4588 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
4589 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
4590 X86CPUFeatureWordInfoList *list = NULL;
4591
4592 for (w = 0; w < FEATURE_WORDS; w++) {
4593 FeatureWordInfo *wi = &feature_word_info[w];
4594 /*
4595 * We didn't have MSR features when "feature-words" was
4596 * introduced. Therefore skipped other type entries.
4597 */
4598 if (wi->type != CPUID_FEATURE_WORD) {
4599 continue;
4600 }
4601 X86CPUFeatureWordInfo *qwi = &word_infos[w];
4602 qwi->cpuid_input_eax = wi->cpuid.eax;
4603 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
4604 qwi->cpuid_input_ecx = wi->cpuid.ecx;
4605 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
4606 qwi->features = array[w];
4607
4608 /* List will be in reverse order, but order shouldn't matter */
4609 list_entries[w].next = list;
4610 list_entries[w].value = &word_infos[w];
4611 list = &list_entries[w];
4612 }
4613
4614 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
4615 }
4616
4617 /* Convert all '_' in a feature string option name to '-', to make feature
4618 * name conform to QOM property naming rule, which uses '-' instead of '_'.
4619 */
4620 static inline void feat2prop(char *s)
4621 {
4622 while ((s = strchr(s, '_'))) {
4623 *s = '-';
4624 }
4625 }
4626
4627 /* Return the feature property name for a feature flag bit */
4628 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
4629 {
4630 const char *name;
4631 /* XSAVE components are automatically enabled by other features,
4632 * so return the original feature name instead
4633 */
4634 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
4635 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
4636
4637 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
4638 x86_ext_save_areas[comp].bits) {
4639 w = x86_ext_save_areas[comp].feature;
4640 bitnr = ctz32(x86_ext_save_areas[comp].bits);
4641 }
4642 }
4643
4644 assert(bitnr < 64);
4645 assert(w < FEATURE_WORDS);
4646 name = feature_word_info[w].feat_names[bitnr];
4647 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
4648 return name;
4649 }
4650
4651 /* Compatibily hack to maintain legacy +-feat semantic,
4652 * where +-feat overwrites any feature set by
4653 * feat=on|feat even if the later is parsed after +-feat
4654 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
4655 */
4656 static GList *plus_features, *minus_features;
4657
4658 static gint compare_string(gconstpointer a, gconstpointer b)
4659 {
4660 return g_strcmp0(a, b);
4661 }
4662
4663 /* Parse "+feature,-feature,feature=foo" CPU feature string
4664 */
4665 static void x86_cpu_parse_featurestr(const char *typename, char *features,
4666 Error **errp)
4667 {
4668 char *featurestr; /* Single 'key=value" string being parsed */
4669 static bool cpu_globals_initialized;
4670 bool ambiguous = false;
4671
4672 if (cpu_globals_initialized) {
4673 return;
4674 }
4675 cpu_globals_initialized = true;
4676
4677 if (!features) {
4678 return;
4679 }
4680
4681 for (featurestr = strtok(features, ",");
4682 featurestr;
4683 featurestr = strtok(NULL, ",")) {
4684 const char *name;
4685 const char *val = NULL;
4686 char *eq = NULL;
4687 char num[32];
4688 GlobalProperty *prop;
4689
4690 /* Compatibility syntax: */
4691 if (featurestr[0] == '+') {
4692 plus_features = g_list_append(plus_features,
4693 g_strdup(featurestr + 1));
4694 continue;
4695 } else if (featurestr[0] == '-') {
4696 minus_features = g_list_append(minus_features,
4697 g_strdup(featurestr + 1));
4698 continue;
4699 }
4700
4701 eq = strchr(featurestr, '=');
4702 if (eq) {
4703 *eq++ = 0;
4704 val = eq;
4705 } else {
4706 val = "on";
4707 }
4708
4709 feat2prop(featurestr);
4710 name = featurestr;
4711
4712 if (g_list_find_custom(plus_features, name, compare_string)) {
4713 warn_report("Ambiguous CPU model string. "
4714 "Don't mix both \"+%s\" and \"%s=%s\"",
4715 name, name, val);
4716 ambiguous = true;
4717 }
4718 if (g_list_find_custom(minus_features, name, compare_string)) {
4719 warn_report("Ambiguous CPU model string. "
4720 "Don't mix both \"-%s\" and \"%s=%s\"",
4721 name, name, val);
4722 ambiguous = true;
4723 }
4724
4725 /* Special case: */
4726 if (!strcmp(name, "tsc-freq")) {
4727 int ret;
4728 uint64_t tsc_freq;
4729
4730 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
4731 if (ret < 0 || tsc_freq > INT64_MAX) {
4732 error_setg(errp, "bad numerical value %s", val);
4733 return;
4734 }
4735 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
4736 val = num;
4737 name = "tsc-frequency";
4738 }
4739
4740 prop = g_new0(typeof(*prop), 1);
4741 prop->driver = typename;
4742 prop->property = g_strdup(name);
4743 prop->value = g_strdup(val);
4744 qdev_prop_register_global(prop);
4745 }
4746
4747 if (ambiguous) {
4748 warn_report("Compatibility of ambiguous CPU model "
4749 "strings won't be kept on future QEMU versions");
4750 }
4751 }
4752
4753 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
4754 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
4755
4756 /* Build a list with the name of all features on a feature word array */
4757 static void x86_cpu_list_feature_names(FeatureWordArray features,
4758 strList **feat_names)
4759 {
4760 FeatureWord w;
4761 strList **next = feat_names;
4762
4763 for (w = 0; w < FEATURE_WORDS; w++) {
4764 uint64_t filtered = features[w];
4765 int i;
4766 for (i = 0; i < 64; i++) {
4767 if (filtered & (1ULL << i)) {
4768 strList *new = g_new0(strList, 1);
4769 new->value = g_strdup(x86_cpu_feature_name(w, i));
4770 *next = new;
4771 next = &new->next;
4772 }
4773 }
4774 }
4775 }
4776
4777 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
4778 const char *name, void *opaque,
4779 Error **errp)
4780 {
4781 X86CPU *xc = X86_CPU(obj);
4782 strList *result = NULL;
4783
4784 x86_cpu_list_feature_names(xc->filtered_features, &result);
4785 visit_type_strList(v, "unavailable-features", &result, errp);
4786 }
4787
4788 /* Check for missing features that may prevent the CPU class from
4789 * running using the current machine and accelerator.
4790 */
4791 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
4792 strList **missing_feats)
4793 {
4794 X86CPU *xc;
4795 Error *err = NULL;
4796 strList **next = missing_feats;
4797
4798 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4799 strList *new = g_new0(strList, 1);
4800 new->value = g_strdup("kvm");
4801 *missing_feats = new;
4802 return;
4803 }
4804
4805 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
4806
4807 x86_cpu_expand_features(xc, &err);
4808 if (err) {
4809 /* Errors at x86_cpu_expand_features should never happen,
4810 * but in case it does, just report the model as not
4811 * runnable at all using the "type" property.
4812 */
4813 strList *new = g_new0(strList, 1);
4814 new->value = g_strdup("type");
4815 *next = new;
4816 next = &new->next;
4817 }
4818
4819 x86_cpu_filter_features(xc, false);
4820
4821 x86_cpu_list_feature_names(xc->filtered_features, next);
4822
4823 object_unref(OBJECT(xc));
4824 }
4825
4826 /* Print all cpuid feature names in featureset
4827 */
4828 static void listflags(GList *features)
4829 {
4830 size_t len = 0;
4831 GList *tmp;
4832
4833 for (tmp = features; tmp; tmp = tmp->next) {
4834 const char *name = tmp->data;
4835 if ((len + strlen(name) + 1) >= 75) {
4836 qemu_printf("\n");
4837 len = 0;
4838 }
4839 qemu_printf("%s%s", len == 0 ? " " : " ", name);
4840 len += strlen(name) + 1;
4841 }
4842 qemu_printf("\n");
4843 }
4844
4845 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
4846 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
4847 {
4848 ObjectClass *class_a = (ObjectClass *)a;
4849 ObjectClass *class_b = (ObjectClass *)b;
4850 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
4851 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
4852 int ret;
4853
4854 if (cc_a->ordering != cc_b->ordering) {
4855 ret = cc_a->ordering - cc_b->ordering;
4856 } else {
4857 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a);
4858 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b);
4859 ret = strcmp(name_a, name_b);
4860 }
4861 return ret;
4862 }
4863
4864 static GSList *get_sorted_cpu_model_list(void)
4865 {
4866 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
4867 list = g_slist_sort(list, x86_cpu_list_compare);
4868 return list;
4869 }
4870
4871 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
4872 {
4873 Object *obj = object_new_with_class(OBJECT_CLASS(xc));
4874 char *r = object_property_get_str(obj, "model-id", &error_abort);
4875 object_unref(obj);
4876 return r;
4877 }
4878
4879 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
4880 {
4881 X86CPUVersion version;
4882
4883 if (!cc->model || !cc->model->is_alias) {
4884 return NULL;
4885 }
4886 version = x86_cpu_model_resolve_version(cc->model);
4887 if (version <= 0) {
4888 return NULL;
4889 }
4890 return x86_cpu_versioned_model_name(cc->model->cpudef, version);
4891 }
4892
4893 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
4894 {
4895 ObjectClass *oc = data;
4896 X86CPUClass *cc = X86_CPU_CLASS(oc);
4897 g_autofree char *name = x86_cpu_class_get_model_name(cc);
4898 g_autofree char *desc = g_strdup(cc->model_description);
4899 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc);
4900 g_autofree char *model_id = x86_cpu_class_get_model_id(cc);
4901
4902 if (!desc && alias_of) {
4903 if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
4904 desc = g_strdup("(alias configured by machine type)");
4905 } else {
4906 desc = g_strdup_printf("(alias of %s)", alias_of);
4907 }
4908 }
4909 if (!desc && cc->model && cc->model->note) {
4910 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note);
4911 }
4912 if (!desc) {
4913 desc = g_strdup_printf("%s", model_id);
4914 }
4915
4916 qemu_printf("x86 %-20s %-58s\n", name, desc);
4917 }
4918
4919 /* list available CPU models and flags */
4920 void x86_cpu_list(void)
4921 {
4922 int i, j;
4923 GSList *list;
4924 GList *names = NULL;
4925
4926 qemu_printf("Available CPUs:\n");
4927 list = get_sorted_cpu_model_list();
4928 g_slist_foreach(list, x86_cpu_list_entry, NULL);
4929 g_slist_free(list);
4930
4931 names = NULL;
4932 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
4933 FeatureWordInfo *fw = &feature_word_info[i];
4934 for (j = 0; j < 64; j++) {
4935 if (fw->feat_names[j]) {
4936 names = g_list_append(names, (gpointer)fw->feat_names[j]);
4937 }
4938 }
4939 }
4940
4941 names = g_list_sort(names, (GCompareFunc)strcmp);
4942
4943 qemu_printf("\nRecognized CPUID flags:\n");
4944 listflags(names);
4945 qemu_printf("\n");
4946 g_list_free(names);
4947 }
4948
4949 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
4950 {
4951 ObjectClass *oc = data;
4952 X86CPUClass *cc = X86_CPU_CLASS(oc);
4953 CpuDefinitionInfoList **cpu_list = user_data;
4954 CpuDefinitionInfoList *entry;
4955 CpuDefinitionInfo *info;
4956
4957 info = g_malloc0(sizeof(*info));
4958 info->name = x86_cpu_class_get_model_name(cc);
4959 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
4960 info->has_unavailable_features = true;
4961 info->q_typename = g_strdup(object_class_get_name(oc));
4962 info->migration_safe = cc->migration_safe;
4963 info->has_migration_safe = true;
4964 info->q_static = cc->static_model;
4965 /*
4966 * Old machine types won't report aliases, so that alias translation
4967 * doesn't break compatibility with previous QEMU versions.
4968 */
4969 if (default_cpu_version != CPU_VERSION_LEGACY) {
4970 info->alias_of = x86_cpu_class_get_alias_of(cc);
4971 info->has_alias_of = !!info->alias_of;
4972 }
4973
4974 entry = g_malloc0(sizeof(*entry));
4975 entry->value = info;
4976 entry->next = *cpu_list;
4977 *cpu_list = entry;
4978 }
4979
4980 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
4981 {
4982 CpuDefinitionInfoList *cpu_list = NULL;
4983 GSList *list = get_sorted_cpu_model_list();
4984 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
4985 g_slist_free(list);
4986 return cpu_list;
4987 }
4988
4989 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4990 bool migratable_only)
4991 {
4992 FeatureWordInfo *wi = &feature_word_info[w];
4993 uint64_t r = 0;
4994
4995 if (kvm_enabled()) {
4996 switch (wi->type) {
4997 case CPUID_FEATURE_WORD:
4998 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
4999 wi->cpuid.ecx,
5000 wi->cpuid.reg);
5001 break;
5002 case MSR_FEATURE_WORD:
5003 r = kvm_arch_get_supported_msr_feature(kvm_state,
5004 wi->msr.index);
5005 break;
5006 }
5007 } else if (hvf_enabled()) {
5008 if (wi->type != CPUID_FEATURE_WORD) {
5009 return 0;
5010 }
5011 r = hvf_get_supported_cpuid(wi->cpuid.eax,
5012 wi->cpuid.ecx,
5013 wi->cpuid.reg);
5014 } else if (tcg_enabled()) {
5015 r = wi->tcg_features;
5016 } else {
5017 return ~0;
5018 }
5019 if (migratable_only) {
5020 r &= x86_cpu_get_migratable_flags(w);
5021 }
5022 return r;
5023 }
5024
5025 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
5026 {
5027 PropValue *pv;
5028 for (pv = props; pv->prop; pv++) {
5029 if (!pv->value) {
5030 continue;
5031 }
5032 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
5033 &error_abort);
5034 }
5035 }
5036
5037 /* Apply properties for the CPU model version specified in model */
5038 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
5039 {
5040 const X86CPUVersionDefinition *vdef;
5041 X86CPUVersion version = x86_cpu_model_resolve_version(model);
5042
5043 if (version == CPU_VERSION_LEGACY) {
5044 return;
5045 }
5046
5047 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
5048 PropValue *p;
5049
5050 for (p = vdef->props; p && p->prop; p++) {
5051 object_property_parse(OBJECT(cpu), p->value, p->prop,
5052 &error_abort);
5053 }
5054
5055 if (vdef->version == version) {
5056 break;
5057 }
5058 }
5059
5060 /*
5061 * If we reached the end of the list, version number was invalid
5062 */
5063 assert(vdef->version == version);
5064 }
5065
5066 /* Load data from X86CPUDefinition into a X86CPU object
5067 */
5068 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
5069 {
5070 X86CPUDefinition *def = model->cpudef;
5071 CPUX86State *env = &cpu->env;
5072 const char *vendor;
5073 char host_vendor[CPUID_VENDOR_SZ + 1];
5074 FeatureWord w;
5075
5076 /*NOTE: any property set by this function should be returned by
5077 * x86_cpu_static_props(), so static expansion of
5078 * query-cpu-model-expansion is always complete.
5079 */
5080
5081 /* CPU models only set _minimum_ values for level/xlevel: */
5082 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
5083 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
5084
5085 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
5086 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
5087 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
5088 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
5089 for (w = 0; w < FEATURE_WORDS; w++) {
5090 env->features[w] = def->features[w];
5091 }
5092
5093 /* legacy-cache defaults to 'off' if CPU model provides cache info */
5094 cpu->legacy_cache = !def->cache_info;
5095
5096 /* Special cases not set in the X86CPUDefinition structs: */
5097 /* TODO: in-kernel irqchip for hvf */
5098 if (kvm_enabled()) {
5099 if (!kvm_irqchip_in_kernel()) {
5100 x86_cpu_change_kvm_default("x2apic", "off");
5101 }
5102
5103 x86_cpu_apply_props(cpu, kvm_default_props);
5104 } else if (tcg_enabled()) {
5105 x86_cpu_apply_props(cpu, tcg_default_props);
5106 }
5107
5108 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
5109
5110 /* sysenter isn't supported in compatibility mode on AMD,
5111 * syscall isn't supported in compatibility mode on Intel.
5112 * Normally we advertise the actual CPU vendor, but you can
5113 * override this using the 'vendor' property if you want to use
5114 * KVM's sysenter/syscall emulation in compatibility mode and
5115 * when doing cross vendor migration
5116 */
5117 vendor = def->vendor;
5118 if (accel_uses_host_cpuid()) {
5119 uint32_t ebx = 0, ecx = 0, edx = 0;
5120 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
5121 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
5122 vendor = host_vendor;
5123 }
5124
5125 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
5126
5127 x86_cpu_apply_version_props(cpu, model);
5128 }
5129
5130 #ifndef CONFIG_USER_ONLY
5131 /* Return a QDict containing keys for all properties that can be included
5132 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
5133 * must be included in the dictionary.
5134 */
5135 static QDict *x86_cpu_static_props(void)
5136 {
5137 FeatureWord w;
5138 int i;
5139 static const char *props[] = {
5140 "min-level",
5141 "min-xlevel",
5142 "family",
5143 "model",
5144 "stepping",
5145 "model-id",
5146 "vendor",
5147 "lmce",
5148 NULL,
5149 };
5150 static QDict *d;
5151
5152 if (d) {
5153 return d;
5154 }
5155
5156 d = qdict_new();
5157 for (i = 0; props[i]; i++) {
5158 qdict_put_null(d, props[i]);
5159 }
5160
5161 for (w = 0; w < FEATURE_WORDS; w++) {
5162 FeatureWordInfo *fi = &feature_word_info[w];
5163 int bit;
5164 for (bit = 0; bit < 64; bit++) {
5165 if (!fi->feat_names[bit]) {
5166 continue;
5167 }
5168 qdict_put_null(d, fi->feat_names[bit]);
5169 }
5170 }
5171
5172 return d;
5173 }
5174
5175 /* Add an entry to @props dict, with the value for property. */
5176 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
5177 {
5178 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
5179 &error_abort);
5180
5181 qdict_put_obj(props, prop, value);
5182 }
5183
5184 /* Convert CPU model data from X86CPU object to a property dictionary
5185 * that can recreate exactly the same CPU model.
5186 */
5187 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
5188 {
5189 QDict *sprops = x86_cpu_static_props();
5190 const QDictEntry *e;
5191
5192 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
5193 const char *prop = qdict_entry_key(e);
5194 x86_cpu_expand_prop(cpu, props, prop);
5195 }
5196 }
5197
5198 /* Convert CPU model data from X86CPU object to a property dictionary
5199 * that can recreate exactly the same CPU model, including every
5200 * writeable QOM property.
5201 */
5202 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
5203 {
5204 ObjectPropertyIterator iter;
5205 ObjectProperty *prop;
5206
5207 object_property_iter_init(&iter, OBJECT(cpu));
5208 while ((prop = object_property_iter_next(&iter))) {
5209 /* skip read-only or write-only properties */
5210 if (!prop->get || !prop->set) {
5211 continue;
5212 }
5213
5214 /* "hotplugged" is the only property that is configurable
5215 * on the command-line but will be set differently on CPUs
5216 * created using "-cpu ... -smp ..." and by CPUs created
5217 * on the fly by x86_cpu_from_model() for querying. Skip it.
5218 */
5219 if (!strcmp(prop->name, "hotplugged")) {
5220 continue;
5221 }
5222 x86_cpu_expand_prop(cpu, props, prop->name);
5223 }
5224 }
5225
5226 static void object_apply_props(Object *obj, QDict *props, Error **errp)
5227 {
5228 const QDictEntry *prop;
5229 Error *err = NULL;
5230
5231 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
5232 object_property_set_qobject(obj, qdict_entry_value(prop),
5233 qdict_entry_key(prop), &err);
5234 if (err) {
5235 break;
5236 }
5237 }
5238
5239 error_propagate(errp, err);
5240 }
5241
5242 /* Create X86CPU object according to model+props specification */
5243 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
5244 {
5245 X86CPU *xc = NULL;
5246 X86CPUClass *xcc;
5247 Error *err = NULL;
5248
5249 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
5250 if (xcc == NULL) {
5251 error_setg(&err, "CPU model '%s' not found", model);
5252 goto out;
5253 }
5254
5255 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
5256 if (props) {
5257 object_apply_props(OBJECT(xc), props, &err);
5258 if (err) {
5259 goto out;
5260 }
5261 }
5262
5263 x86_cpu_expand_features(xc, &err);
5264 if (err) {
5265 goto out;
5266 }
5267
5268 out:
5269 if (err) {
5270 error_propagate(errp, err);
5271 object_unref(OBJECT(xc));
5272 xc = NULL;
5273 }
5274 return xc;
5275 }
5276
5277 CpuModelExpansionInfo *
5278 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
5279 CpuModelInfo *model,
5280 Error **errp)
5281 {
5282 X86CPU *xc = NULL;
5283 Error *err = NULL;
5284 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
5285 QDict *props = NULL;
5286 const char *base_name;
5287
5288 xc = x86_cpu_from_model(model->name,
5289 model->has_props ?
5290 qobject_to(QDict, model->props) :
5291 NULL, &err);
5292 if (err) {
5293 goto out;
5294 }
5295
5296 props = qdict_new();
5297 ret->model = g_new0(CpuModelInfo, 1);
5298 ret->model->props = QOBJECT(props);
5299 ret->model->has_props = true;
5300
5301 switch (type) {
5302 case CPU_MODEL_EXPANSION_TYPE_STATIC:
5303 /* Static expansion will be based on "base" only */
5304 base_name = "base";
5305 x86_cpu_to_dict(xc, props);
5306 break;
5307 case CPU_MODEL_EXPANSION_TYPE_FULL:
5308 /* As we don't return every single property, full expansion needs
5309 * to keep the original model name+props, and add extra
5310 * properties on top of that.
5311 */
5312 base_name = model->name;
5313 x86_cpu_to_dict_full(xc, props);
5314 break;
5315 default:
5316 error_setg(&err, "Unsupported expansion type");
5317 goto out;
5318 }
5319
5320 x86_cpu_to_dict(xc, props);
5321
5322 ret->model->name = g_strdup(base_name);
5323
5324 out:
5325 object_unref(OBJECT(xc));
5326 if (err) {
5327 error_propagate(errp, err);
5328 qapi_free_CpuModelExpansionInfo(ret);
5329 ret = NULL;
5330 }
5331 return ret;
5332 }
5333 #endif /* !CONFIG_USER_ONLY */
5334
5335 static gchar *x86_gdb_arch_name(CPUState *cs)
5336 {
5337 #ifdef TARGET_X86_64
5338 return g_strdup("i386:x86-64");
5339 #else
5340 return g_strdup("i386");
5341 #endif
5342 }
5343
5344 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
5345 {
5346 X86CPUModel *model = data;
5347 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5348
5349 xcc->model = model;
5350 xcc->migration_safe = true;
5351 }
5352
5353 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
5354 {
5355 g_autofree char *typename = x86_cpu_type_name(name);
5356 TypeInfo ti = {
5357 .name = typename,
5358 .parent = TYPE_X86_CPU,
5359 .class_init = x86_cpu_cpudef_class_init,
5360 .class_data = model,
5361 };
5362
5363 type_register(&ti);
5364 }
5365
5366 static void x86_register_cpudef_types(X86CPUDefinition *def)
5367 {
5368 X86CPUModel *m;
5369 const X86CPUVersionDefinition *vdef;
5370
5371 /* AMD aliases are handled at runtime based on CPUID vendor, so
5372 * they shouldn't be set on the CPU model table.
5373 */
5374 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
5375 /* catch mistakes instead of silently truncating model_id when too long */
5376 assert(def->model_id && strlen(def->model_id) <= 48);
5377
5378 /* Unversioned model: */
5379 m = g_new0(X86CPUModel, 1);
5380 m->cpudef = def;
5381 m->version = CPU_VERSION_AUTO;
5382 m->is_alias = true;
5383 x86_register_cpu_model_type(def->name, m);
5384
5385 /* Versioned models: */
5386
5387 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
5388 X86CPUModel *m = g_new0(X86CPUModel, 1);
5389 g_autofree char *name =
5390 x86_cpu_versioned_model_name(def, vdef->version);
5391 m->cpudef = def;
5392 m->version = vdef->version;
5393 m->note = vdef->note;
5394 x86_register_cpu_model_type(name, m);
5395
5396 if (vdef->alias) {
5397 X86CPUModel *am = g_new0(X86CPUModel, 1);
5398 am->cpudef = def;
5399 am->version = vdef->version;
5400 am->is_alias = true;
5401 x86_register_cpu_model_type(vdef->alias, am);
5402 }
5403 }
5404
5405 }
5406
5407 #if !defined(CONFIG_USER_ONLY)
5408
5409 void cpu_clear_apic_feature(CPUX86State *env)
5410 {
5411 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
5412 }
5413
5414 #endif /* !CONFIG_USER_ONLY */
5415
5416 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
5417 uint32_t *eax, uint32_t *ebx,
5418 uint32_t *ecx, uint32_t *edx)
5419 {
5420 X86CPU *cpu = env_archcpu(env);
5421 CPUState *cs = env_cpu(env);
5422 uint32_t die_offset;
5423 uint32_t limit;
5424 uint32_t signature[3];
5425 X86CPUTopoInfo topo_info;
5426
5427 topo_info.nodes_per_pkg = env->nr_nodes;
5428 topo_info.dies_per_pkg = env->nr_dies;
5429 topo_info.cores_per_die = cs->nr_cores;
5430 topo_info.threads_per_core = cs->nr_threads;
5431
5432 /* Calculate & apply limits for different index ranges */
5433 if (index >= 0xC0000000) {
5434 limit = env->cpuid_xlevel2;
5435 } else if (index >= 0x80000000) {
5436 limit = env->cpuid_xlevel;
5437 } else if (index >= 0x40000000) {
5438 limit = 0x40000001;
5439 } else {
5440 limit = env->cpuid_level;
5441 }
5442
5443 if (index > limit) {
5444 /* Intel documentation states that invalid EAX input will
5445 * return the same information as EAX=cpuid_level
5446 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
5447 */
5448 index = env->cpuid_level;
5449 }
5450
5451 switch(index) {
5452 case 0:
5453 *eax = env->cpuid_level;
5454 *ebx = env->cpuid_vendor1;
5455 *edx = env->cpuid_vendor2;
5456 *ecx = env->cpuid_vendor3;
5457 break;
5458 case 1:
5459 *eax = env->cpuid_version;
5460 *ebx = (cpu->apic_id << 24) |
5461 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
5462 *ecx = env->features[FEAT_1_ECX];
5463 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
5464 *ecx |= CPUID_EXT_OSXSAVE;
5465 }
5466 *edx = env->features[FEAT_1_EDX];
5467 if (cs->nr_cores * cs->nr_threads > 1) {
5468 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
5469 *edx |= CPUID_HT;
5470 }
5471 break;
5472 case 2:
5473 /* cache info: needed for Pentium Pro compatibility */
5474 if (cpu->cache_info_passthrough) {
5475 host_cpuid(index, 0, eax, ebx, ecx, edx);
5476 break;
5477 }
5478 *eax = 1; /* Number of CPUID[EAX=2] calls required */
5479 *ebx = 0;
5480 if (!cpu->enable_l3_cache) {
5481 *ecx = 0;
5482 } else {
5483 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
5484 }
5485 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
5486 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
5487 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
5488 break;
5489 case 4:
5490 /* cache info: needed for Core compatibility */
5491 if (cpu->cache_info_passthrough) {
5492 host_cpuid(index, count, eax, ebx, ecx, edx);
5493 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
5494 *eax &= ~0xFC000000;
5495 if ((*eax & 31) && cs->nr_cores > 1) {
5496 *eax |= (cs->nr_cores - 1) << 26;
5497 }
5498 } else {
5499 *eax = 0;
5500 switch (count) {
5501 case 0: /* L1 dcache info */
5502 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
5503 1, cs->nr_cores,
5504 eax, ebx, ecx, edx);
5505 break;
5506 case 1: /* L1 icache info */
5507 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
5508 1, cs->nr_cores,
5509 eax, ebx, ecx, edx);
5510 break;
5511 case 2: /* L2 cache info */
5512 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
5513 cs->nr_threads, cs->nr_cores,
5514 eax, ebx, ecx, edx);
5515 break;
5516 case 3: /* L3 cache info */
5517 die_offset = apicid_die_offset(&topo_info);
5518 if (cpu->enable_l3_cache) {
5519 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
5520 (1 << die_offset), cs->nr_cores,
5521 eax, ebx, ecx, edx);
5522 break;
5523 }
5524 /* fall through */
5525 default: /* end of info */
5526 *eax = *ebx = *ecx = *edx = 0;
5527 break;
5528 }
5529 }
5530 break;
5531 case 5:
5532 /* MONITOR/MWAIT Leaf */
5533 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
5534 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
5535 *ecx = cpu->mwait.ecx; /* flags */
5536 *edx = cpu->mwait.edx; /* mwait substates */
5537 break;
5538 case 6:
5539 /* Thermal and Power Leaf */
5540 *eax = env->features[FEAT_6_EAX];
5541 *ebx = 0;
5542 *ecx = 0;
5543 *edx = 0;
5544 break;
5545 case 7:
5546 /* Structured Extended Feature Flags Enumeration Leaf */
5547 if (count == 0) {
5548 /* Maximum ECX value for sub-leaves */
5549 *eax = env->cpuid_level_func7;
5550 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
5551 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
5552 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
5553 *ecx |= CPUID_7_0_ECX_OSPKE;
5554 }
5555 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
5556 } else if (count == 1) {
5557 *eax = env->features[FEAT_7_1_EAX];
5558 *ebx = 0;
5559 *ecx = 0;
5560 *edx = 0;
5561 } else {
5562 *eax = 0;
5563 *ebx = 0;
5564 *ecx = 0;
5565 *edx = 0;
5566 }
5567 break;
5568 case 9:
5569 /* Direct Cache Access Information Leaf */
5570 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
5571 *ebx = 0;
5572 *ecx = 0;
5573 *edx = 0;
5574 break;
5575 case 0xA:
5576 /* Architectural Performance Monitoring Leaf */
5577 if (kvm_enabled() && cpu->enable_pmu) {
5578 KVMState *s = cs->kvm_state;
5579
5580 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
5581 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
5582 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
5583 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
5584 } else if (hvf_enabled() && cpu->enable_pmu) {
5585 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
5586 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
5587 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
5588 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
5589 } else {
5590 *eax = 0;
5591 *ebx = 0;
5592 *ecx = 0;
5593 *edx = 0;
5594 }
5595 break;
5596 case 0xB:
5597 /* Extended Topology Enumeration Leaf */
5598 if (!cpu->enable_cpuid_0xb) {
5599 *eax = *ebx = *ecx = *edx = 0;
5600 break;
5601 }
5602
5603 *ecx = count & 0xff;
5604 *edx = cpu->apic_id;
5605
5606 switch (count) {
5607 case 0:
5608 *eax = apicid_core_offset(&topo_info);
5609 *ebx = cs->nr_threads;
5610 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5611 break;
5612 case 1:
5613 *eax = env->pkg_offset;
5614 *ebx = cs->nr_cores * cs->nr_threads;
5615 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5616 break;
5617 default:
5618 *eax = 0;
5619 *ebx = 0;
5620 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5621 }
5622
5623 assert(!(*eax & ~0x1f));
5624 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5625 break;
5626 case 0x1F:
5627 /* V2 Extended Topology Enumeration Leaf */
5628 if (env->nr_dies < 2) {
5629 *eax = *ebx = *ecx = *edx = 0;
5630 break;
5631 }
5632
5633 *ecx = count & 0xff;
5634 *edx = cpu->apic_id;
5635 switch (count) {
5636 case 0:
5637 *eax = apicid_core_offset(&topo_info);
5638 *ebx = cs->nr_threads;
5639 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5640 break;
5641 case 1:
5642 *eax = apicid_die_offset(&topo_info);
5643 *ebx = cs->nr_cores * cs->nr_threads;
5644 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5645 break;
5646 case 2:
5647 *eax = env->pkg_offset;
5648 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
5649 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
5650 break;
5651 default:
5652 *eax = 0;
5653 *ebx = 0;
5654 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5655 }
5656 assert(!(*eax & ~0x1f));
5657 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5658 break;
5659 case 0xD: {
5660 /* Processor Extended State */
5661 *eax = 0;
5662 *ebx = 0;
5663 *ecx = 0;
5664 *edx = 0;
5665 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5666 break;
5667 }
5668
5669 if (count == 0) {
5670 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
5671 *eax = env->features[FEAT_XSAVE_COMP_LO];
5672 *edx = env->features[FEAT_XSAVE_COMP_HI];
5673 /*
5674 * The initial value of xcr0 and ebx == 0, On host without kvm
5675 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
5676 * even through guest update xcr0, this will crash some legacy guest
5677 * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
5678 */
5679 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
5680 } else if (count == 1) {
5681 *eax = env->features[FEAT_XSAVE];
5682 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
5683 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
5684 const ExtSaveArea *esa = &x86_ext_save_areas[count];
5685 *eax = esa->size;
5686 *ebx = esa->offset;
5687 }
5688 }
5689 break;
5690 }
5691 case 0x14: {
5692 /* Intel Processor Trace Enumeration */
5693 *eax = 0;
5694 *ebx = 0;
5695 *ecx = 0;
5696 *edx = 0;
5697 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
5698 !kvm_enabled()) {
5699 break;
5700 }
5701
5702 if (count == 0) {
5703 *eax = INTEL_PT_MAX_SUBLEAF;
5704 *ebx = INTEL_PT_MINIMAL_EBX;
5705 *ecx = INTEL_PT_MINIMAL_ECX;
5706 } else if (count == 1) {
5707 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
5708 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
5709 }
5710 break;
5711 }
5712 case 0x40000000:
5713 /*
5714 * CPUID code in kvm_arch_init_vcpu() ignores stuff
5715 * set here, but we restrict to TCG none the less.
5716 */
5717 if (tcg_enabled() && cpu->expose_tcg) {
5718 memcpy(signature, "TCGTCGTCGTCG", 12);
5719 *eax = 0x40000001;
5720 *ebx = signature[0];
5721 *ecx = signature[1];
5722 *edx = signature[2];
5723 } else {
5724 *eax = 0;
5725 *ebx = 0;
5726 *ecx = 0;
5727 *edx = 0;
5728 }
5729 break;
5730 case 0x40000001:
5731 *eax = 0;
5732 *ebx = 0;
5733 *ecx = 0;
5734 *edx = 0;
5735 break;
5736 case 0x80000000:
5737 *eax = env->cpuid_xlevel;
5738 *ebx = env->cpuid_vendor1;
5739 *edx = env->cpuid_vendor2;
5740 *ecx = env->cpuid_vendor3;
5741 break;
5742 case 0x80000001:
5743 *eax = env->cpuid_version;
5744 *ebx = 0;
5745 *ecx = env->features[FEAT_8000_0001_ECX];
5746 *edx = env->features[FEAT_8000_0001_EDX];
5747
5748 /* The Linux kernel checks for the CMPLegacy bit and
5749 * discards multiple thread information if it is set.
5750 * So don't set it here for Intel to make Linux guests happy.
5751 */
5752 if (cs->nr_cores * cs->nr_threads > 1) {
5753 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
5754 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
5755 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
5756 *ecx |= 1 << 1; /* CmpLegacy bit */
5757 }
5758 }
5759 break;
5760 case 0x80000002:
5761 case 0x80000003:
5762 case 0x80000004:
5763 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
5764 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
5765 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
5766 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
5767 break;
5768 case 0x80000005:
5769 /* cache info (L1 cache) */
5770 if (cpu->cache_info_passthrough) {
5771 host_cpuid(index, 0, eax, ebx, ecx, edx);
5772 break;
5773 }
5774 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
5775 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
5776 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
5777 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
5778 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
5779 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
5780 break;
5781 case 0x80000006:
5782 /* cache info (L2 cache) */
5783 if (cpu->cache_info_passthrough) {
5784 host_cpuid(index, 0, eax, ebx, ecx, edx);
5785 break;
5786 }
5787 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
5788 (L2_DTLB_2M_ENTRIES << 16) | \
5789 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
5790 (L2_ITLB_2M_ENTRIES);
5791 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
5792 (L2_DTLB_4K_ENTRIES << 16) | \
5793 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
5794 (L2_ITLB_4K_ENTRIES);
5795 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
5796 cpu->enable_l3_cache ?
5797 env->cache_info_amd.l3_cache : NULL,
5798 ecx, edx);
5799 break;
5800 case 0x80000007:
5801 *eax = 0;
5802 *ebx = 0;
5803 *ecx = 0;
5804 *edx = env->features[FEAT_8000_0007_EDX];
5805 break;
5806 case 0x80000008:
5807 /* virtual & phys address size in low 2 bytes. */
5808 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5809 /* 64 bit processor */
5810 *eax = cpu->phys_bits; /* configurable physical bits */
5811 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
5812 *eax |= 0x00003900; /* 57 bits virtual */
5813 } else {
5814 *eax |= 0x00003000; /* 48 bits virtual */
5815 }
5816 } else {
5817 *eax = cpu->phys_bits;
5818 }
5819 *ebx = env->features[FEAT_8000_0008_EBX];
5820 *ecx = 0;
5821 *edx = 0;
5822 if (cs->nr_cores * cs->nr_threads > 1) {
5823 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
5824 }
5825 break;
5826 case 0x8000000A:
5827 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5828 *eax = 0x00000001; /* SVM Revision */
5829 *ebx = 0x00000010; /* nr of ASIDs */
5830 *ecx = 0;
5831 *edx = env->features[FEAT_SVM]; /* optional features */
5832 } else {
5833 *eax = 0;
5834 *ebx = 0;
5835 *ecx = 0;
5836 *edx = 0;
5837 }
5838 break;
5839 case 0x8000001D:
5840 *eax = 0;
5841 if (cpu->cache_info_passthrough) {
5842 host_cpuid(index, count, eax, ebx, ecx, edx);
5843 break;
5844 }
5845 switch (count) {
5846 case 0: /* L1 dcache info */
5847 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
5848 &topo_info, eax, ebx, ecx, edx);
5849 break;
5850 case 1: /* L1 icache info */
5851 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
5852 &topo_info, eax, ebx, ecx, edx);
5853 break;
5854 case 2: /* L2 cache info */
5855 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
5856 &topo_info, eax, ebx, ecx, edx);
5857 break;
5858 case 3: /* L3 cache info */
5859 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
5860 &topo_info, eax, ebx, ecx, edx);
5861 break;
5862 default: /* end of info */
5863 *eax = *ebx = *ecx = *edx = 0;
5864 break;
5865 }
5866 break;
5867 case 0x8000001E:
5868 assert(cpu->core_id <= 255);
5869 encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx);
5870 break;
5871 case 0xC0000000:
5872 *eax = env->cpuid_xlevel2;
5873 *ebx = 0;
5874 *ecx = 0;
5875 *edx = 0;
5876 break;
5877 case 0xC0000001:
5878 /* Support for VIA CPU's CPUID instruction */
5879 *eax = env->cpuid_version;
5880 *ebx = 0;
5881 *ecx = 0;
5882 *edx = env->features[FEAT_C000_0001_EDX];
5883 break;
5884 case 0xC0000002:
5885 case 0xC0000003:
5886 case 0xC0000004:
5887 /* Reserved for the future, and now filled with zero */
5888 *eax = 0;
5889 *ebx = 0;
5890 *ecx = 0;
5891 *edx = 0;
5892 break;
5893 case 0x8000001F:
5894 *eax = sev_enabled() ? 0x2 : 0;
5895 *ebx = sev_get_cbit_position();
5896 *ebx |= sev_get_reduced_phys_bits() << 6;
5897 *ecx = 0;
5898 *edx = 0;
5899 break;
5900 default:
5901 /* reserved values: zero */
5902 *eax = 0;
5903 *ebx = 0;
5904 *ecx = 0;
5905 *edx = 0;
5906 break;
5907 }
5908 }
5909
5910 static void x86_cpu_reset(DeviceState *dev)
5911 {
5912 CPUState *s = CPU(dev);
5913 X86CPU *cpu = X86_CPU(s);
5914 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
5915 CPUX86State *env = &cpu->env;
5916 target_ulong cr4;
5917 uint64_t xcr0;
5918 int i;
5919
5920 xcc->parent_reset(dev);
5921
5922 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
5923
5924 env->old_exception = -1;
5925
5926 /* init to reset state */
5927
5928 env->hflags2 |= HF2_GIF_MASK;
5929
5930 cpu_x86_update_cr0(env, 0x60000010);
5931 env->a20_mask = ~0x0;
5932 env->smbase = 0x30000;
5933 env->msr_smi_count = 0;
5934
5935 env->idt.limit = 0xffff;
5936 env->gdt.limit = 0xffff;
5937 env->ldt.limit = 0xffff;
5938 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
5939 env->tr.limit = 0xffff;
5940 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
5941
5942 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
5943 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
5944 DESC_R_MASK | DESC_A_MASK);
5945 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
5946 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5947 DESC_A_MASK);
5948 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
5949 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5950 DESC_A_MASK);
5951 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
5952 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5953 DESC_A_MASK);
5954 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
5955 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5956 DESC_A_MASK);
5957 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
5958 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5959 DESC_A_MASK);
5960
5961 env->eip = 0xfff0;
5962 env->regs[R_EDX] = env->cpuid_version;
5963
5964 env->eflags = 0x2;
5965
5966 /* FPU init */
5967 for (i = 0; i < 8; i++) {
5968 env->fptags[i] = 1;
5969 }
5970 cpu_set_fpuc(env, 0x37f);
5971
5972 env->mxcsr = 0x1f80;
5973 /* All units are in INIT state. */
5974 env->xstate_bv = 0;
5975
5976 env->pat = 0x0007040600070406ULL;
5977 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
5978 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
5979 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
5980 }
5981
5982 memset(env->dr, 0, sizeof(env->dr));
5983 env->dr[6] = DR6_FIXED_1;
5984 env->dr[7] = DR7_FIXED_1;
5985 cpu_breakpoint_remove_all(s, BP_CPU);
5986 cpu_watchpoint_remove_all(s, BP_CPU);
5987
5988 cr4 = 0;
5989 xcr0 = XSTATE_FP_MASK;
5990
5991 #ifdef CONFIG_USER_ONLY
5992 /* Enable all the features for user-mode. */
5993 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
5994 xcr0 |= XSTATE_SSE_MASK;
5995 }
5996 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5997 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5998 if (env->features[esa->feature] & esa->bits) {
5999 xcr0 |= 1ull << i;
6000 }
6001 }
6002
6003 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
6004 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
6005 }
6006 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
6007 cr4 |= CR4_FSGSBASE_MASK;
6008 }
6009 #endif
6010
6011 env->xcr0 = xcr0;
6012 cpu_x86_update_cr4(env, cr4);
6013
6014 /*
6015 * SDM 11.11.5 requires:
6016 * - IA32_MTRR_DEF_TYPE MSR.E = 0
6017 * - IA32_MTRR_PHYSMASKn.V = 0
6018 * All other bits are undefined. For simplification, zero it all.
6019 */
6020 env->mtrr_deftype = 0;
6021 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
6022 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
6023
6024 env->interrupt_injected = -1;
6025 env->exception_nr = -1;
6026 env->exception_pending = 0;
6027 env->exception_injected = 0;
6028 env->exception_has_payload = false;
6029 env->exception_payload = 0;
6030 env->nmi_injected = false;
6031 #if !defined(CONFIG_USER_ONLY)
6032 /* We hard-wire the BSP to the first CPU. */
6033 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
6034
6035 s->halted = !cpu_is_bsp(cpu);
6036
6037 if (kvm_enabled()) {
6038 kvm_arch_reset_vcpu(cpu);
6039 }
6040 else if (hvf_enabled()) {
6041 hvf_reset_vcpu(s);
6042 }
6043 #endif
6044 }
6045
6046 #ifndef CONFIG_USER_ONLY
6047 bool cpu_is_bsp(X86CPU *cpu)
6048 {
6049 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
6050 }
6051
6052 /* TODO: remove me, when reset over QOM tree is implemented */
6053 static void x86_cpu_machine_reset_cb(void *opaque)
6054 {
6055 X86CPU *cpu = opaque;
6056 cpu_reset(CPU(cpu));
6057 }
6058 #endif
6059
6060 static void mce_init(X86CPU *cpu)
6061 {
6062 CPUX86State *cenv = &cpu->env;
6063 unsigned int bank;
6064
6065 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
6066 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
6067 (CPUID_MCE | CPUID_MCA)) {
6068 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
6069 (cpu->enable_lmce ? MCG_LMCE_P : 0);
6070 cenv->mcg_ctl = ~(uint64_t)0;
6071 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
6072 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
6073 }
6074 }
6075 }
6076
6077 #ifndef CONFIG_USER_ONLY
6078 APICCommonClass *apic_get_class(void)
6079 {
6080 const char *apic_type = "apic";
6081
6082 /* TODO: in-kernel irqchip for hvf */
6083 if (kvm_apic_in_kernel()) {
6084 apic_type = "kvm-apic";
6085 } else if (xen_enabled()) {
6086 apic_type = "xen-apic";
6087 }
6088
6089 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
6090 }
6091
6092 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
6093 {
6094 APICCommonState *apic;
6095 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
6096
6097 cpu->apic_state = DEVICE(object_new_with_class(apic_class));
6098
6099 object_property_add_child(OBJECT(cpu), "lapic",
6100 OBJECT(cpu->apic_state), &error_abort);
6101 object_unref(OBJECT(cpu->apic_state));
6102
6103 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
6104 /* TODO: convert to link<> */
6105 apic = APIC_COMMON(cpu->apic_state);
6106 apic->cpu = cpu;
6107 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
6108 }
6109
6110 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6111 {
6112 APICCommonState *apic;
6113 static bool apic_mmio_map_once;
6114
6115 if (cpu->apic_state == NULL) {
6116 return;
6117 }
6118 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
6119 errp);
6120
6121 /* Map APIC MMIO area */
6122 apic = APIC_COMMON(cpu->apic_state);
6123 if (!apic_mmio_map_once) {
6124 memory_region_add_subregion_overlap(get_system_memory(),
6125 apic->apicbase &
6126 MSR_IA32_APICBASE_BASE,
6127 &apic->io_memory,
6128 0x1000);
6129 apic_mmio_map_once = true;
6130 }
6131 }
6132
6133 static void x86_cpu_machine_done(Notifier *n, void *unused)
6134 {
6135 X86CPU *cpu = container_of(n, X86CPU, machine_done);
6136 MemoryRegion *smram =
6137 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
6138
6139 if (smram) {
6140 cpu->smram = g_new(MemoryRegion, 1);
6141 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
6142 smram, 0, 1ull << 32);
6143 memory_region_set_enabled(cpu->smram, true);
6144 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
6145 }
6146 }
6147 #else
6148 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6149 {
6150 }
6151 #endif
6152
6153 /* Note: Only safe for use on x86(-64) hosts */
6154 static uint32_t x86_host_phys_bits(void)
6155 {
6156 uint32_t eax;
6157 uint32_t host_phys_bits;
6158
6159 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
6160 if (eax >= 0x80000008) {
6161 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
6162 /* Note: According to AMD doc 25481 rev 2.34 they have a field
6163 * at 23:16 that can specify a maximum physical address bits for
6164 * the guest that can override this value; but I've not seen
6165 * anything with that set.
6166 */
6167 host_phys_bits = eax & 0xff;
6168 } else {
6169 /* It's an odd 64 bit machine that doesn't have the leaf for
6170 * physical address bits; fall back to 36 that's most older
6171 * Intel.
6172 */
6173 host_phys_bits = 36;
6174 }
6175
6176 return host_phys_bits;
6177 }
6178
6179 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
6180 {
6181 if (*min < value) {
6182 *min = value;
6183 }
6184 }
6185
6186 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
6187 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
6188 {
6189 CPUX86State *env = &cpu->env;
6190 FeatureWordInfo *fi = &feature_word_info[w];
6191 uint32_t eax = fi->cpuid.eax;
6192 uint32_t region = eax & 0xF0000000;
6193
6194 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
6195 if (!env->features[w]) {
6196 return;
6197 }
6198
6199 switch (region) {
6200 case 0x00000000:
6201 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
6202 break;
6203 case 0x80000000:
6204 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
6205 break;
6206 case 0xC0000000:
6207 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
6208 break;
6209 }
6210
6211 if (eax == 7) {
6212 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
6213 fi->cpuid.ecx);
6214 }
6215 }
6216
6217 /* Calculate XSAVE components based on the configured CPU feature flags */
6218 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
6219 {
6220 CPUX86State *env = &cpu->env;
6221 int i;
6222 uint64_t mask;
6223
6224 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
6225 return;
6226 }
6227
6228 mask = 0;
6229 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
6230 const ExtSaveArea *esa = &x86_ext_save_areas[i];
6231 if (env->features[esa->feature] & esa->bits) {
6232 mask |= (1ULL << i);
6233 }
6234 }
6235
6236 env->features[FEAT_XSAVE_COMP_LO] = mask;
6237 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
6238 }
6239
6240 /***** Steps involved on loading and filtering CPUID data
6241 *
6242 * When initializing and realizing a CPU object, the steps
6243 * involved in setting up CPUID data are:
6244 *
6245 * 1) Loading CPU model definition (X86CPUDefinition). This is
6246 * implemented by x86_cpu_load_model() and should be completely
6247 * transparent, as it is done automatically by instance_init.
6248 * No code should need to look at X86CPUDefinition structs
6249 * outside instance_init.
6250 *
6251 * 2) CPU expansion. This is done by realize before CPUID
6252 * filtering, and will make sure host/accelerator data is
6253 * loaded for CPU models that depend on host capabilities
6254 * (e.g. "host"). Done by x86_cpu_expand_features().
6255 *
6256 * 3) CPUID filtering. This initializes extra data related to
6257 * CPUID, and checks if the host supports all capabilities
6258 * required by the CPU. Runnability of a CPU model is
6259 * determined at this step. Done by x86_cpu_filter_features().
6260 *
6261 * Some operations don't require all steps to be performed.
6262 * More precisely:
6263 *
6264 * - CPU instance creation (instance_init) will run only CPU
6265 * model loading. CPU expansion can't run at instance_init-time
6266 * because host/accelerator data may be not available yet.
6267 * - CPU realization will perform both CPU model expansion and CPUID
6268 * filtering, and return an error in case one of them fails.
6269 * - query-cpu-definitions needs to run all 3 steps. It needs
6270 * to run CPUID filtering, as the 'unavailable-features'
6271 * field is set based on the filtering results.
6272 * - The query-cpu-model-expansion QMP command only needs to run
6273 * CPU model loading and CPU expansion. It should not filter
6274 * any CPUID data based on host capabilities.
6275 */
6276
6277 /* Expand CPU configuration data, based on configured features
6278 * and host/accelerator capabilities when appropriate.
6279 */
6280 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
6281 {
6282 CPUX86State *env = &cpu->env;
6283 FeatureWord w;
6284 int i;
6285 GList *l;
6286 Error *local_err = NULL;
6287
6288 for (l = plus_features; l; l = l->next) {
6289 const char *prop = l->data;
6290 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
6291 if (local_err) {
6292 goto out;
6293 }
6294 }
6295
6296 for (l = minus_features; l; l = l->next) {
6297 const char *prop = l->data;
6298 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
6299 if (local_err) {
6300 goto out;
6301 }
6302 }
6303
6304 /*TODO: Now cpu->max_features doesn't overwrite features
6305 * set using QOM properties, and we can convert
6306 * plus_features & minus_features to global properties
6307 * inside x86_cpu_parse_featurestr() too.
6308 */
6309 if (cpu->max_features) {
6310 for (w = 0; w < FEATURE_WORDS; w++) {
6311 /* Override only features that weren't set explicitly
6312 * by the user.
6313 */
6314 env->features[w] |=
6315 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
6316 ~env->user_features[w] & \
6317 ~feature_word_info[w].no_autoenable_flags;
6318 }
6319 }
6320
6321 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
6322 FeatureDep *d = &feature_dependencies[i];
6323 if (!(env->features[d->from.index] & d->from.mask)) {
6324 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
6325
6326 /* Not an error unless the dependent feature was added explicitly. */
6327 mark_unavailable_features(cpu, d->to.index,
6328 unavailable_features & env->user_features[d->to.index],
6329 "This feature depends on other features that were not requested");
6330
6331 env->user_features[d->to.index] |= unavailable_features;
6332 env->features[d->to.index] &= ~unavailable_features;
6333 }
6334 }
6335
6336 if (!kvm_enabled() || !cpu->expose_kvm) {
6337 env->features[FEAT_KVM] = 0;
6338 }
6339
6340 x86_cpu_enable_xsave_components(cpu);
6341
6342 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
6343 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
6344 if (cpu->full_cpuid_auto_level) {
6345 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
6346 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
6347 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
6348 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
6349 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
6350 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
6351 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
6352 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
6353 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
6354 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
6355 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
6356 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
6357
6358 /* Intel Processor Trace requires CPUID[0x14] */
6359 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) {
6360 if (cpu->intel_pt_auto_level) {
6361 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
6362 } else if (cpu->env.cpuid_min_level < 0x14) {
6363 mark_unavailable_features(cpu, FEAT_7_0_EBX,
6364 CPUID_7_0_EBX_INTEL_PT,
6365 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\"");
6366 }
6367 }
6368
6369 /* CPU topology with multi-dies support requires CPUID[0x1F] */
6370 if (env->nr_dies > 1) {
6371 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
6372 }
6373
6374 /* SVM requires CPUID[0x8000000A] */
6375 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
6376 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
6377 }
6378
6379 /* SEV requires CPUID[0x8000001F] */
6380 if (sev_enabled()) {
6381 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
6382 }
6383 }
6384
6385 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
6386 if (env->cpuid_level_func7 == UINT32_MAX) {
6387 env->cpuid_level_func7 = env->cpuid_min_level_func7;
6388 }
6389 if (env->cpuid_level == UINT32_MAX) {
6390 env->cpuid_level = env->cpuid_min_level;
6391 }
6392 if (env->cpuid_xlevel == UINT32_MAX) {
6393 env->cpuid_xlevel = env->cpuid_min_xlevel;
6394 }
6395 if (env->cpuid_xlevel2 == UINT32_MAX) {
6396 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
6397 }
6398
6399 out:
6400 if (local_err != NULL) {
6401 error_propagate(errp, local_err);
6402 }
6403 }
6404
6405 /*
6406 * Finishes initialization of CPUID data, filters CPU feature
6407 * words based on host availability of each feature.
6408 *
6409 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
6410 */
6411 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
6412 {
6413 CPUX86State *env = &cpu->env;
6414 FeatureWord w;
6415 const char *prefix = NULL;
6416
6417 if (verbose) {
6418 prefix = accel_uses_host_cpuid()
6419 ? "host doesn't support requested feature"
6420 : "TCG doesn't support requested feature";
6421 }
6422
6423 for (w = 0; w < FEATURE_WORDS; w++) {
6424 uint64_t host_feat =
6425 x86_cpu_get_supported_feature_word(w, false);
6426 uint64_t requested_features = env->features[w];
6427 uint64_t unavailable_features = requested_features & ~host_feat;
6428 mark_unavailable_features(cpu, w, unavailable_features, prefix);
6429 }
6430
6431 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6432 kvm_enabled()) {
6433 KVMState *s = CPU(cpu)->kvm_state;
6434 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
6435 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
6436 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
6437 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
6438 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
6439
6440 if (!eax_0 ||
6441 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
6442 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
6443 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
6444 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
6445 INTEL_PT_ADDR_RANGES_NUM) ||
6446 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
6447 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
6448 (ecx_0 & INTEL_PT_IP_LIP)) {
6449 /*
6450 * Processor Trace capabilities aren't configurable, so if the
6451 * host can't emulate the capabilities we report on
6452 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
6453 */
6454 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
6455 }
6456 }
6457 }
6458
6459 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
6460 {
6461 CPUState *cs = CPU(dev);
6462 X86CPU *cpu = X86_CPU(dev);
6463 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6464 CPUX86State *env = &cpu->env;
6465 Error *local_err = NULL;
6466 static bool ht_warned;
6467
6468 if (xcc->host_cpuid_required) {
6469 if (!accel_uses_host_cpuid()) {
6470 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6471 error_setg(&local_err, "CPU model '%s' requires KVM", name);
6472 goto out;
6473 }
6474 }
6475
6476 if (cpu->max_features && accel_uses_host_cpuid()) {
6477 if (enable_cpu_pm) {
6478 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
6479 &cpu->mwait.ecx, &cpu->mwait.edx);
6480 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
6481 }
6482 if (kvm_enabled() && cpu->ucode_rev == 0) {
6483 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state,
6484 MSR_IA32_UCODE_REV);
6485 }
6486 }
6487
6488 if (cpu->ucode_rev == 0) {
6489 /* The default is the same as KVM's. */
6490 if (IS_AMD_CPU(env)) {
6491 cpu->ucode_rev = 0x01000065;
6492 } else {
6493 cpu->ucode_rev = 0x100000000ULL;
6494 }
6495 }
6496
6497 /* mwait extended info: needed for Core compatibility */
6498 /* We always wake on interrupt even if host does not have the capability */
6499 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
6500
6501 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
6502 error_setg(errp, "apic-id property was not initialized properly");
6503 return;
6504 }
6505
6506 x86_cpu_expand_features(cpu, &local_err);
6507 if (local_err) {
6508 goto out;
6509 }
6510
6511 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
6512
6513 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
6514 error_setg(&local_err,
6515 accel_uses_host_cpuid() ?
6516 "Host doesn't support requested features" :
6517 "TCG doesn't support requested features");
6518 goto out;
6519 }
6520
6521 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
6522 * CPUID[1].EDX.
6523 */
6524 if (IS_AMD_CPU(env)) {
6525 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
6526 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
6527 & CPUID_EXT2_AMD_ALIASES);
6528 }
6529
6530 /* For 64bit systems think about the number of physical bits to present.
6531 * ideally this should be the same as the host; anything other than matching
6532 * the host can cause incorrect guest behaviour.
6533 * QEMU used to pick the magic value of 40 bits that corresponds to
6534 * consumer AMD devices but nothing else.
6535 */
6536 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
6537 if (accel_uses_host_cpuid()) {
6538 uint32_t host_phys_bits = x86_host_phys_bits();
6539 static bool warned;
6540
6541 /* Print a warning if the user set it to a value that's not the
6542 * host value.
6543 */
6544 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
6545 !warned) {
6546 warn_report("Host physical bits (%u)"
6547 " does not match phys-bits property (%u)",
6548 host_phys_bits, cpu->phys_bits);
6549 warned = true;
6550 }
6551
6552 if (cpu->host_phys_bits) {
6553 /* The user asked for us to use the host physical bits */
6554 cpu->phys_bits = host_phys_bits;
6555 if (cpu->host_phys_bits_limit &&
6556 cpu->phys_bits > cpu->host_phys_bits_limit) {
6557 cpu->phys_bits = cpu->host_phys_bits_limit;
6558 }
6559 }
6560
6561 if (cpu->phys_bits &&
6562 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
6563 cpu->phys_bits < 32)) {
6564 error_setg(errp, "phys-bits should be between 32 and %u "
6565 " (but is %u)",
6566 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
6567 return;
6568 }
6569 } else {
6570 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
6571 error_setg(errp, "TCG only supports phys-bits=%u",
6572 TCG_PHYS_ADDR_BITS);
6573 return;
6574 }
6575 }
6576 /* 0 means it was not explicitly set by the user (or by machine
6577 * compat_props or by the host code above). In this case, the default
6578 * is the value used by TCG (40).
6579 */
6580 if (cpu->phys_bits == 0) {
6581 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
6582 }
6583 } else {
6584 /* For 32 bit systems don't use the user set value, but keep
6585 * phys_bits consistent with what we tell the guest.
6586 */
6587 if (cpu->phys_bits != 0) {
6588 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
6589 return;
6590 }
6591
6592 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
6593 cpu->phys_bits = 36;
6594 } else {
6595 cpu->phys_bits = 32;
6596 }
6597 }
6598
6599 /* Cache information initialization */
6600 if (!cpu->legacy_cache) {
6601 if (!xcc->model || !xcc->model->cpudef->cache_info) {
6602 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6603 error_setg(errp,
6604 "CPU model '%s' doesn't support legacy-cache=off", name);
6605 return;
6606 }
6607 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
6608 *xcc->model->cpudef->cache_info;
6609 } else {
6610 /* Build legacy cache information */
6611 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
6612 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
6613 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
6614 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
6615
6616 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
6617 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
6618 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
6619 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
6620
6621 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
6622 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
6623 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
6624 env->cache_info_amd.l3_cache = &legacy_l3_cache;
6625 }
6626
6627
6628 cpu_exec_realizefn(cs, &local_err);
6629 if (local_err != NULL) {
6630 error_propagate(errp, local_err);
6631 return;
6632 }
6633
6634 #ifndef CONFIG_USER_ONLY
6635 MachineState *ms = MACHINE(qdev_get_machine());
6636 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
6637
6638 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
6639 x86_cpu_apic_create(cpu, &local_err);
6640 if (local_err != NULL) {
6641 goto out;
6642 }
6643 }
6644 #endif
6645
6646 mce_init(cpu);
6647
6648 #ifndef CONFIG_USER_ONLY
6649 if (tcg_enabled()) {
6650 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
6651 cpu->cpu_as_root = g_new(MemoryRegion, 1);
6652
6653 /* Outer container... */
6654 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
6655 memory_region_set_enabled(cpu->cpu_as_root, true);
6656
6657 /* ... with two regions inside: normal system memory with low
6658 * priority, and...
6659 */
6660 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
6661 get_system_memory(), 0, ~0ull);
6662 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
6663 memory_region_set_enabled(cpu->cpu_as_mem, true);
6664
6665 cs->num_ases = 2;
6666 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
6667 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
6668
6669 /* ... SMRAM with higher priority, linked from /machine/smram. */
6670 cpu->machine_done.notify = x86_cpu_machine_done;
6671 qemu_add_machine_init_done_notifier(&cpu->machine_done);
6672 }
6673 #endif
6674
6675 qemu_init_vcpu(cs);
6676
6677 /*
6678 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
6679 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
6680 * based on inputs (sockets,cores,threads), it is still better to give
6681 * users a warning.
6682 *
6683 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
6684 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
6685 */
6686 if (IS_AMD_CPU(env) &&
6687 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
6688 cs->nr_threads > 1 && !ht_warned) {
6689 warn_report("This family of AMD CPU doesn't support "
6690 "hyperthreading(%d)",
6691 cs->nr_threads);
6692 error_printf("Please configure -smp options properly"
6693 " or try enabling topoext feature.\n");
6694 ht_warned = true;
6695 }
6696
6697 x86_cpu_apic_realize(cpu, &local_err);
6698 if (local_err != NULL) {
6699 goto out;
6700 }
6701 cpu_reset(cs);
6702
6703 xcc->parent_realize(dev, &local_err);
6704
6705 out:
6706 if (local_err != NULL) {
6707 error_propagate(errp, local_err);
6708 return;
6709 }
6710 }
6711
6712 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
6713 {
6714 X86CPU *cpu = X86_CPU(dev);
6715 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6716 Error *local_err = NULL;
6717
6718 #ifndef CONFIG_USER_ONLY
6719 cpu_remove_sync(CPU(dev));
6720 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
6721 #endif
6722
6723 if (cpu->apic_state) {
6724 object_unparent(OBJECT(cpu->apic_state));
6725 cpu->apic_state = NULL;
6726 }
6727
6728 xcc->parent_unrealize(dev, &local_err);
6729 if (local_err != NULL) {
6730 error_propagate(errp, local_err);
6731 return;
6732 }
6733 }
6734
6735 typedef struct BitProperty {
6736 FeatureWord w;
6737 uint64_t mask;
6738 } BitProperty;
6739
6740 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
6741 void *opaque, Error **errp)
6742 {
6743 X86CPU *cpu = X86_CPU(obj);
6744 BitProperty *fp = opaque;
6745 uint64_t f = cpu->env.features[fp->w];
6746 bool value = (f & fp->mask) == fp->mask;
6747 visit_type_bool(v, name, &value, errp);
6748 }
6749
6750 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
6751 void *opaque, Error **errp)
6752 {
6753 DeviceState *dev = DEVICE(obj);
6754 X86CPU *cpu = X86_CPU(obj);
6755 BitProperty *fp = opaque;
6756 Error *local_err = NULL;
6757 bool value;
6758
6759 if (dev->realized) {
6760 qdev_prop_set_after_realize(dev, name, errp);
6761 return;
6762 }
6763
6764 visit_type_bool(v, name, &value, &local_err);
6765 if (local_err) {
6766 error_propagate(errp, local_err);
6767 return;
6768 }
6769
6770 if (value) {
6771 cpu->env.features[fp->w] |= fp->mask;
6772 } else {
6773 cpu->env.features[fp->w] &= ~fp->mask;
6774 }
6775 cpu->env.user_features[fp->w] |= fp->mask;
6776 }
6777
6778 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
6779 void *opaque)
6780 {
6781 BitProperty *prop = opaque;
6782 g_free(prop);
6783 }
6784
6785 /* Register a boolean property to get/set a single bit in a uint32_t field.
6786 *
6787 * The same property name can be registered multiple times to make it affect
6788 * multiple bits in the same FeatureWord. In that case, the getter will return
6789 * true only if all bits are set.
6790 */
6791 static void x86_cpu_register_bit_prop(X86CPU *cpu,
6792 const char *prop_name,
6793 FeatureWord w,
6794 int bitnr)
6795 {
6796 BitProperty *fp;
6797 ObjectProperty *op;
6798 uint64_t mask = (1ULL << bitnr);
6799
6800 op = object_property_find(OBJECT(cpu), prop_name, NULL);
6801 if (op) {
6802 fp = op->opaque;
6803 assert(fp->w == w);
6804 fp->mask |= mask;
6805 } else {
6806 fp = g_new0(BitProperty, 1);
6807 fp->w = w;
6808 fp->mask = mask;
6809 object_property_add(OBJECT(cpu), prop_name, "bool",
6810 x86_cpu_get_bit_prop,
6811 x86_cpu_set_bit_prop,
6812 x86_cpu_release_bit_prop, fp, &error_abort);
6813 }
6814 }
6815
6816 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
6817 FeatureWord w,
6818 int bitnr)
6819 {
6820 FeatureWordInfo *fi = &feature_word_info[w];
6821 const char *name = fi->feat_names[bitnr];
6822
6823 if (!name) {
6824 return;
6825 }
6826
6827 /* Property names should use "-" instead of "_".
6828 * Old names containing underscores are registered as aliases
6829 * using object_property_add_alias()
6830 */
6831 assert(!strchr(name, '_'));
6832 /* aliases don't use "|" delimiters anymore, they are registered
6833 * manually using object_property_add_alias() */
6834 assert(!strchr(name, '|'));
6835 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
6836 }
6837
6838 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
6839 {
6840 X86CPU *cpu = X86_CPU(cs);
6841 CPUX86State *env = &cpu->env;
6842 GuestPanicInformation *panic_info = NULL;
6843
6844 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
6845 panic_info = g_malloc0(sizeof(GuestPanicInformation));
6846
6847 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
6848
6849 assert(HV_CRASH_PARAMS >= 5);
6850 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
6851 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
6852 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
6853 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
6854 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
6855 }
6856
6857 return panic_info;
6858 }
6859 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
6860 const char *name, void *opaque,
6861 Error **errp)
6862 {
6863 CPUState *cs = CPU(obj);
6864 GuestPanicInformation *panic_info;
6865
6866 if (!cs->crash_occurred) {
6867 error_setg(errp, "No crash occured");
6868 return;
6869 }
6870
6871 panic_info = x86_cpu_get_crash_info(cs);
6872 if (panic_info == NULL) {
6873 error_setg(errp, "No crash information");
6874 return;
6875 }
6876
6877 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
6878 errp);
6879 qapi_free_GuestPanicInformation(panic_info);
6880 }
6881
6882 static void x86_cpu_initfn(Object *obj)
6883 {
6884 X86CPU *cpu = X86_CPU(obj);
6885 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
6886 CPUX86State *env = &cpu->env;
6887 FeatureWord w;
6888
6889 env->nr_dies = 1;
6890 env->nr_nodes = 1;
6891 cpu_set_cpustate_pointers(cpu);
6892
6893 object_property_add(obj, "family", "int",
6894 x86_cpuid_version_get_family,
6895 x86_cpuid_version_set_family, NULL, NULL, NULL);
6896 object_property_add(obj, "model", "int",
6897 x86_cpuid_version_get_model,
6898 x86_cpuid_version_set_model, NULL, NULL, NULL);
6899 object_property_add(obj, "stepping", "int",
6900 x86_cpuid_version_get_stepping,
6901 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
6902 object_property_add_str(obj, "vendor",
6903 x86_cpuid_get_vendor,
6904 x86_cpuid_set_vendor, NULL);
6905 object_property_add_str(obj, "model-id",
6906 x86_cpuid_get_model_id,
6907 x86_cpuid_set_model_id, NULL);
6908 object_property_add(obj, "tsc-frequency", "int",
6909 x86_cpuid_get_tsc_freq,
6910 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
6911 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
6912 x86_cpu_get_feature_words,
6913 NULL, NULL, (void *)env->features, NULL);
6914 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
6915 x86_cpu_get_feature_words,
6916 NULL, NULL, (void *)cpu->filtered_features, NULL);
6917 /*
6918 * The "unavailable-features" property has the same semantics as
6919 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
6920 * QMP command: they list the features that would have prevented the
6921 * CPU from running if the "enforce" flag was set.
6922 */
6923 object_property_add(obj, "unavailable-features", "strList",
6924 x86_cpu_get_unavailable_features,
6925 NULL, NULL, NULL, &error_abort);
6926
6927 object_property_add(obj, "crash-information", "GuestPanicInformation",
6928 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
6929
6930 for (w = 0; w < FEATURE_WORDS; w++) {
6931 int bitnr;
6932
6933 for (bitnr = 0; bitnr < 64; bitnr++) {
6934 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
6935 }
6936 }
6937
6938 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
6939 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
6940 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
6941 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
6942 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
6943 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
6944 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
6945
6946 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
6947 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
6948 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
6949 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
6950 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
6951 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
6952 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
6953 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
6954 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
6955 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
6956 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
6957 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
6958 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
6959 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
6960 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control",
6961 &error_abort);
6962 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
6963 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
6964 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
6965 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
6966 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
6967 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
6968 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
6969
6970 if (xcc->model) {
6971 x86_cpu_load_model(cpu, xcc->model, &error_abort);
6972 }
6973 }
6974
6975 static int64_t x86_cpu_get_arch_id(CPUState *cs)
6976 {
6977 X86CPU *cpu = X86_CPU(cs);
6978
6979 return cpu->apic_id;
6980 }
6981
6982 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
6983 {
6984 X86CPU *cpu = X86_CPU(cs);
6985
6986 return cpu->env.cr[0] & CR0_PG_MASK;
6987 }
6988
6989 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
6990 {
6991 X86CPU *cpu = X86_CPU(cs);
6992
6993 cpu->env.eip = value;
6994 }
6995
6996 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
6997 {
6998 X86CPU *cpu = X86_CPU(cs);
6999
7000 cpu->env.eip = tb->pc - tb->cs_base;
7001 }
7002
7003 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
7004 {
7005 X86CPU *cpu = X86_CPU(cs);
7006 CPUX86State *env = &cpu->env;
7007
7008 #if !defined(CONFIG_USER_ONLY)
7009 if (interrupt_request & CPU_INTERRUPT_POLL) {
7010 return CPU_INTERRUPT_POLL;
7011 }
7012 #endif
7013 if (interrupt_request & CPU_INTERRUPT_SIPI) {
7014 return CPU_INTERRUPT_SIPI;
7015 }
7016
7017 if (env->hflags2 & HF2_GIF_MASK) {
7018 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
7019 !(env->hflags & HF_SMM_MASK)) {
7020 return CPU_INTERRUPT_SMI;
7021 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
7022 !(env->hflags2 & HF2_NMI_MASK)) {
7023 return CPU_INTERRUPT_NMI;
7024 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
7025 return CPU_INTERRUPT_MCE;
7026 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
7027 (((env->hflags2 & HF2_VINTR_MASK) &&
7028 (env->hflags2 & HF2_HIF_MASK)) ||
7029 (!(env->hflags2 & HF2_VINTR_MASK) &&
7030 (env->eflags & IF_MASK &&
7031 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
7032 return CPU_INTERRUPT_HARD;
7033 #if !defined(CONFIG_USER_ONLY)
7034 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
7035 (env->eflags & IF_MASK) &&
7036 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
7037 return CPU_INTERRUPT_VIRQ;
7038 #endif
7039 }
7040 }
7041
7042 return 0;
7043 }
7044
7045 static bool x86_cpu_has_work(CPUState *cs)
7046 {
7047 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
7048 }
7049
7050 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
7051 {
7052 X86CPU *cpu = X86_CPU(cs);
7053 CPUX86State *env = &cpu->env;
7054
7055 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
7056 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
7057 : bfd_mach_i386_i8086);
7058 info->print_insn = print_insn_i386;
7059
7060 info->cap_arch = CS_ARCH_X86;
7061 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
7062 : env->hflags & HF_CS32_MASK ? CS_MODE_32
7063 : CS_MODE_16);
7064 info->cap_insn_unit = 1;
7065 info->cap_insn_split = 8;
7066 }
7067
7068 void x86_update_hflags(CPUX86State *env)
7069 {
7070 uint32_t hflags;
7071 #define HFLAG_COPY_MASK \
7072 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
7073 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
7074 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
7075 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
7076
7077 hflags = env->hflags & HFLAG_COPY_MASK;
7078 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
7079 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
7080 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
7081 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
7082 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
7083
7084 if (env->cr[4] & CR4_OSFXSR_MASK) {
7085 hflags |= HF_OSFXSR_MASK;
7086 }
7087
7088 if (env->efer & MSR_EFER_LMA) {
7089 hflags |= HF_LMA_MASK;
7090 }
7091
7092 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
7093 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
7094 } else {
7095 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
7096 (DESC_B_SHIFT - HF_CS32_SHIFT);
7097 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
7098 (DESC_B_SHIFT - HF_SS32_SHIFT);
7099 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
7100 !(hflags & HF_CS32_MASK)) {
7101 hflags |= HF_ADDSEG_MASK;
7102 } else {
7103 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
7104 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
7105 }
7106 }
7107 env->hflags = hflags;
7108 }
7109
7110 static Property x86_cpu_properties[] = {
7111 #ifdef CONFIG_USER_ONLY
7112 /* apic_id = 0 by default for *-user, see commit 9886e834 */
7113 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
7114 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
7115 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
7116 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
7117 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
7118 #else
7119 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
7120 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
7121 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
7122 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
7123 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
7124 #endif
7125 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
7126 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
7127
7128 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
7129 HYPERV_SPINLOCK_NEVER_RETRY),
7130 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
7131 HYPERV_FEAT_RELAXED, 0),
7132 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
7133 HYPERV_FEAT_VAPIC, 0),
7134 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
7135 HYPERV_FEAT_TIME, 0),
7136 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
7137 HYPERV_FEAT_CRASH, 0),
7138 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
7139 HYPERV_FEAT_RESET, 0),
7140 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
7141 HYPERV_FEAT_VPINDEX, 0),
7142 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
7143 HYPERV_FEAT_RUNTIME, 0),
7144 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
7145 HYPERV_FEAT_SYNIC, 0),
7146 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
7147 HYPERV_FEAT_STIMER, 0),
7148 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
7149 HYPERV_FEAT_FREQUENCIES, 0),
7150 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
7151 HYPERV_FEAT_REENLIGHTENMENT, 0),
7152 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
7153 HYPERV_FEAT_TLBFLUSH, 0),
7154 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
7155 HYPERV_FEAT_EVMCS, 0),
7156 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
7157 HYPERV_FEAT_IPI, 0),
7158 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
7159 HYPERV_FEAT_STIMER_DIRECT, 0),
7160 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
7161 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
7162 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
7163
7164 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
7165 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
7166 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
7167 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
7168 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
7169 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
7170 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
7171 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
7172 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
7173 UINT32_MAX),
7174 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
7175 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
7176 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
7177 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
7178 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
7179 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
7180 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
7181 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
7182 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
7183 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
7184 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
7185 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
7186 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
7187 false),
7188 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
7189 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
7190 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
7191 true),
7192 /*
7193 * lecacy_cache defaults to true unless the CPU model provides its
7194 * own cache information (see x86_cpu_load_def()).
7195 */
7196 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
7197
7198 /*
7199 * From "Requirements for Implementing the Microsoft
7200 * Hypervisor Interface":
7201 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
7202 *
7203 * "Starting with Windows Server 2012 and Windows 8, if
7204 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
7205 * the hypervisor imposes no specific limit to the number of VPs.
7206 * In this case, Windows Server 2012 guest VMs may use more than
7207 * 64 VPs, up to the maximum supported number of processors applicable
7208 * to the specific Windows version being used."
7209 */
7210 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
7211 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
7212 false),
7213 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
7214 true),
7215 DEFINE_PROP_END_OF_LIST()
7216 };
7217
7218 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
7219 {
7220 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7221 CPUClass *cc = CPU_CLASS(oc);
7222 DeviceClass *dc = DEVICE_CLASS(oc);
7223
7224 device_class_set_parent_realize(dc, x86_cpu_realizefn,
7225 &xcc->parent_realize);
7226 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
7227 &xcc->parent_unrealize);
7228 device_class_set_props(dc, x86_cpu_properties);
7229
7230 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset);
7231 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
7232
7233 cc->class_by_name = x86_cpu_class_by_name;
7234 cc->parse_features = x86_cpu_parse_featurestr;
7235 cc->has_work = x86_cpu_has_work;
7236 #ifdef CONFIG_TCG
7237 cc->do_interrupt = x86_cpu_do_interrupt;
7238 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
7239 #endif
7240 cc->dump_state = x86_cpu_dump_state;
7241 cc->get_crash_info = x86_cpu_get_crash_info;
7242 cc->set_pc = x86_cpu_set_pc;
7243 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
7244 cc->gdb_read_register = x86_cpu_gdb_read_register;
7245 cc->gdb_write_register = x86_cpu_gdb_write_register;
7246 cc->get_arch_id = x86_cpu_get_arch_id;
7247 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
7248 #ifndef CONFIG_USER_ONLY
7249 cc->asidx_from_attrs = x86_asidx_from_attrs;
7250 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
7251 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
7252 cc->write_elf64_note = x86_cpu_write_elf64_note;
7253 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
7254 cc->write_elf32_note = x86_cpu_write_elf32_note;
7255 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
7256 cc->vmsd = &vmstate_x86_cpu;
7257 #endif
7258 cc->gdb_arch_name = x86_gdb_arch_name;
7259 #ifdef TARGET_X86_64
7260 cc->gdb_core_xml_file = "i386-64bit.xml";
7261 cc->gdb_num_core_regs = 66;
7262 #else
7263 cc->gdb_core_xml_file = "i386-32bit.xml";
7264 cc->gdb_num_core_regs = 50;
7265 #endif
7266 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
7267 cc->debug_excp_handler = breakpoint_handler;
7268 #endif
7269 cc->cpu_exec_enter = x86_cpu_exec_enter;
7270 cc->cpu_exec_exit = x86_cpu_exec_exit;
7271 #ifdef CONFIG_TCG
7272 cc->tcg_initialize = tcg_x86_init;
7273 cc->tlb_fill = x86_cpu_tlb_fill;
7274 #endif
7275 cc->disas_set_info = x86_disas_set_info;
7276
7277 dc->user_creatable = true;
7278 }
7279
7280 static const TypeInfo x86_cpu_type_info = {
7281 .name = TYPE_X86_CPU,
7282 .parent = TYPE_CPU,
7283 .instance_size = sizeof(X86CPU),
7284 .instance_init = x86_cpu_initfn,
7285 .abstract = true,
7286 .class_size = sizeof(X86CPUClass),
7287 .class_init = x86_cpu_common_class_init,
7288 };
7289
7290
7291 /* "base" CPU model, used by query-cpu-model-expansion */
7292 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
7293 {
7294 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7295
7296 xcc->static_model = true;
7297 xcc->migration_safe = true;
7298 xcc->model_description = "base CPU model type with no features enabled";
7299 xcc->ordering = 8;
7300 }
7301
7302 static const TypeInfo x86_base_cpu_type_info = {
7303 .name = X86_CPU_TYPE_NAME("base"),
7304 .parent = TYPE_X86_CPU,
7305 .class_init = x86_cpu_base_class_init,
7306 };
7307
7308 static void x86_cpu_register_types(void)
7309 {
7310 int i;
7311
7312 type_register_static(&x86_cpu_type_info);
7313 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
7314 x86_register_cpudef_types(&builtin_x86_defs[i]);
7315 }
7316 type_register_static(&max_x86_cpu_type_info);
7317 type_register_static(&x86_base_cpu_type_info);
7318 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
7319 type_register_static(&host_x86_cpu_type_info);
7320 #endif
7321 }
7322
7323 type_init(x86_cpu_register_types)