]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
target/i386: define a new MSR based feature word - FEAT_PERF_CAPABILITIES
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/reset.h"
30 #include "sysemu/hvf.h"
31 #include "sysemu/cpus.h"
32 #include "sysemu/xen.h"
33 #include "kvm_i386.h"
34 #include "sev_i386.h"
35
36 #include "qemu/error-report.h"
37 #include "qemu/module.h"
38 #include "qemu/option.h"
39 #include "qemu/config-file.h"
40 #include "qapi/error.h"
41 #include "qapi/qapi-visit-machine.h"
42 #include "qapi/qapi-visit-run-state.h"
43 #include "qapi/qmp/qdict.h"
44 #include "qapi/qmp/qerror.h"
45 #include "qapi/visitor.h"
46 #include "qom/qom-qobject.h"
47 #include "sysemu/arch_init.h"
48 #include "qapi/qapi-commands-machine-target.h"
49
50 #include "standard-headers/asm-x86/kvm_para.h"
51
52 #include "sysemu/sysemu.h"
53 #include "sysemu/tcg.h"
54 #include "hw/qdev-properties.h"
55 #include "hw/i386/topology.h"
56 #ifndef CONFIG_USER_ONLY
57 #include "exec/address-spaces.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /* Encode cache info for CPUID[8000001D] */
342 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache,
343 X86CPUTopoInfo *topo_info,
344 uint32_t *eax, uint32_t *ebx,
345 uint32_t *ecx, uint32_t *edx)
346 {
347 uint32_t l3_cores;
348 unsigned nodes = MAX(topo_info->nodes_per_pkg, 1);
349
350 assert(cache->size == cache->line_size * cache->associativity *
351 cache->partitions * cache->sets);
352
353 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
354 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
355
356 /* L3 is shared among multiple cores */
357 if (cache->level == 3) {
358 l3_cores = DIV_ROUND_UP((topo_info->dies_per_pkg *
359 topo_info->cores_per_die *
360 topo_info->threads_per_core),
361 nodes);
362 *eax |= (l3_cores - 1) << 14;
363 } else {
364 *eax |= ((topo_info->threads_per_core - 1) << 14);
365 }
366
367 assert(cache->line_size > 0);
368 assert(cache->partitions > 0);
369 assert(cache->associativity > 0);
370 /* We don't implement fully-associative caches */
371 assert(cache->associativity < cache->sets);
372 *ebx = (cache->line_size - 1) |
373 ((cache->partitions - 1) << 12) |
374 ((cache->associativity - 1) << 22);
375
376 assert(cache->sets > 0);
377 *ecx = cache->sets - 1;
378
379 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
380 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
381 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
382 }
383
384 /* Encode cache info for CPUID[8000001E] */
385 static void encode_topo_cpuid8000001e(X86CPUTopoInfo *topo_info, X86CPU *cpu,
386 uint32_t *eax, uint32_t *ebx,
387 uint32_t *ecx, uint32_t *edx)
388 {
389 X86CPUTopoIDs topo_ids = {0};
390 unsigned long nodes = MAX(topo_info->nodes_per_pkg, 1);
391 int shift;
392
393 x86_topo_ids_from_apicid_epyc(cpu->apic_id, topo_info, &topo_ids);
394
395 *eax = cpu->apic_id;
396 /*
397 * CPUID_Fn8000001E_EBX
398 * 31:16 Reserved
399 * 15:8 Threads per core (The number of threads per core is
400 * Threads per core + 1)
401 * 7:0 Core id (see bit decoding below)
402 * SMT:
403 * 4:3 node id
404 * 2 Core complex id
405 * 1:0 Core id
406 * Non SMT:
407 * 5:4 node id
408 * 3 Core complex id
409 * 1:0 Core id
410 */
411 *ebx = ((topo_info->threads_per_core - 1) << 8) | (topo_ids.node_id << 3) |
412 (topo_ids.core_id);
413 /*
414 * CPUID_Fn8000001E_ECX
415 * 31:11 Reserved
416 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
417 * 7:0 Node id (see bit decoding below)
418 * 2 Socket id
419 * 1:0 Node id
420 */
421 if (nodes <= 4) {
422 *ecx = ((nodes - 1) << 8) | (topo_ids.pkg_id << 2) | topo_ids.node_id;
423 } else {
424 /*
425 * Node id fix up. Actual hardware supports up to 4 nodes. But with
426 * more than 32 cores, we may end up with more than 4 nodes.
427 * Node id is a combination of socket id and node id. Only requirement
428 * here is that this number should be unique accross the system.
429 * Shift the socket id to accommodate more nodes. We dont expect both
430 * socket id and node id to be big number at the same time. This is not
431 * an ideal config but we need to to support it. Max nodes we can have
432 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
433 * 5 bits for nodes. Find the left most set bit to represent the total
434 * number of nodes. find_last_bit returns last set bit(0 based). Left
435 * shift(+1) the socket id to represent all the nodes.
436 */
437 nodes -= 1;
438 shift = find_last_bit(&nodes, 8);
439 *ecx = (nodes << 8) | (topo_ids.pkg_id << (shift + 1)) |
440 topo_ids.node_id;
441 }
442 *edx = 0;
443 }
444
445 /*
446 * Definitions of the hardcoded cache entries we expose:
447 * These are legacy cache values. If there is a need to change any
448 * of these values please use builtin_x86_defs
449 */
450
451 /* L1 data cache: */
452 static CPUCacheInfo legacy_l1d_cache = {
453 .type = DATA_CACHE,
454 .level = 1,
455 .size = 32 * KiB,
456 .self_init = 1,
457 .line_size = 64,
458 .associativity = 8,
459 .sets = 64,
460 .partitions = 1,
461 .no_invd_sharing = true,
462 };
463
464 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
465 static CPUCacheInfo legacy_l1d_cache_amd = {
466 .type = DATA_CACHE,
467 .level = 1,
468 .size = 64 * KiB,
469 .self_init = 1,
470 .line_size = 64,
471 .associativity = 2,
472 .sets = 512,
473 .partitions = 1,
474 .lines_per_tag = 1,
475 .no_invd_sharing = true,
476 };
477
478 /* L1 instruction cache: */
479 static CPUCacheInfo legacy_l1i_cache = {
480 .type = INSTRUCTION_CACHE,
481 .level = 1,
482 .size = 32 * KiB,
483 .self_init = 1,
484 .line_size = 64,
485 .associativity = 8,
486 .sets = 64,
487 .partitions = 1,
488 .no_invd_sharing = true,
489 };
490
491 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
492 static CPUCacheInfo legacy_l1i_cache_amd = {
493 .type = INSTRUCTION_CACHE,
494 .level = 1,
495 .size = 64 * KiB,
496 .self_init = 1,
497 .line_size = 64,
498 .associativity = 2,
499 .sets = 512,
500 .partitions = 1,
501 .lines_per_tag = 1,
502 .no_invd_sharing = true,
503 };
504
505 /* Level 2 unified cache: */
506 static CPUCacheInfo legacy_l2_cache = {
507 .type = UNIFIED_CACHE,
508 .level = 2,
509 .size = 4 * MiB,
510 .self_init = 1,
511 .line_size = 64,
512 .associativity = 16,
513 .sets = 4096,
514 .partitions = 1,
515 .no_invd_sharing = true,
516 };
517
518 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
519 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
520 .type = UNIFIED_CACHE,
521 .level = 2,
522 .size = 2 * MiB,
523 .line_size = 64,
524 .associativity = 8,
525 };
526
527
528 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
529 static CPUCacheInfo legacy_l2_cache_amd = {
530 .type = UNIFIED_CACHE,
531 .level = 2,
532 .size = 512 * KiB,
533 .line_size = 64,
534 .lines_per_tag = 1,
535 .associativity = 16,
536 .sets = 512,
537 .partitions = 1,
538 };
539
540 /* Level 3 unified cache: */
541 static CPUCacheInfo legacy_l3_cache = {
542 .type = UNIFIED_CACHE,
543 .level = 3,
544 .size = 16 * MiB,
545 .line_size = 64,
546 .associativity = 16,
547 .sets = 16384,
548 .partitions = 1,
549 .lines_per_tag = 1,
550 .self_init = true,
551 .inclusive = true,
552 .complex_indexing = true,
553 };
554
555 /* TLB definitions: */
556
557 #define L1_DTLB_2M_ASSOC 1
558 #define L1_DTLB_2M_ENTRIES 255
559 #define L1_DTLB_4K_ASSOC 1
560 #define L1_DTLB_4K_ENTRIES 255
561
562 #define L1_ITLB_2M_ASSOC 1
563 #define L1_ITLB_2M_ENTRIES 255
564 #define L1_ITLB_4K_ASSOC 1
565 #define L1_ITLB_4K_ENTRIES 255
566
567 #define L2_DTLB_2M_ASSOC 0 /* disabled */
568 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
569 #define L2_DTLB_4K_ASSOC 4
570 #define L2_DTLB_4K_ENTRIES 512
571
572 #define L2_ITLB_2M_ASSOC 0 /* disabled */
573 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
574 #define L2_ITLB_4K_ASSOC 4
575 #define L2_ITLB_4K_ENTRIES 512
576
577 /* CPUID Leaf 0x14 constants: */
578 #define INTEL_PT_MAX_SUBLEAF 0x1
579 /*
580 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
581 * MSR can be accessed;
582 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
583 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
584 * of Intel PT MSRs across warm reset;
585 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
586 */
587 #define INTEL_PT_MINIMAL_EBX 0xf
588 /*
589 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
590 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
591 * accessed;
592 * bit[01]: ToPA tables can hold any number of output entries, up to the
593 * maximum allowed by the MaskOrTableOffset field of
594 * IA32_RTIT_OUTPUT_MASK_PTRS;
595 * bit[02]: Support Single-Range Output scheme;
596 */
597 #define INTEL_PT_MINIMAL_ECX 0x7
598 /* generated packets which contain IP payloads have LIP values */
599 #define INTEL_PT_IP_LIP (1 << 31)
600 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
601 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
602 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
603 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
604 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
605
606 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
607 uint32_t vendor2, uint32_t vendor3)
608 {
609 int i;
610 for (i = 0; i < 4; i++) {
611 dst[i] = vendor1 >> (8 * i);
612 dst[i + 4] = vendor2 >> (8 * i);
613 dst[i + 8] = vendor3 >> (8 * i);
614 }
615 dst[CPUID_VENDOR_SZ] = '\0';
616 }
617
618 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
619 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
620 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
621 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
622 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
623 CPUID_PSE36 | CPUID_FXSR)
624 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
625 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
626 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
627 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
628 CPUID_PAE | CPUID_SEP | CPUID_APIC)
629
630 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
631 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
632 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
633 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
634 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
635 /* partly implemented:
636 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
637 /* missing:
638 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
639 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
640 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
641 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
642 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
643 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
644 CPUID_EXT_RDRAND)
645 /* missing:
646 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
647 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
648 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
649 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
650 CPUID_EXT_F16C */
651
652 #ifdef TARGET_X86_64
653 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
654 #else
655 #define TCG_EXT2_X86_64_FEATURES 0
656 #endif
657
658 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
659 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
660 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
661 TCG_EXT2_X86_64_FEATURES)
662 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
663 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
664 #define TCG_EXT4_FEATURES 0
665 #define TCG_SVM_FEATURES CPUID_SVM_NPT
666 #define TCG_KVM_FEATURES 0
667 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
668 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
669 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
670 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
671 CPUID_7_0_EBX_ERMS)
672 /* missing:
673 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
674 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
675 CPUID_7_0_EBX_RDSEED */
676 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
677 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
678 CPUID_7_0_ECX_LA57)
679 #define TCG_7_0_EDX_FEATURES 0
680 #define TCG_7_1_EAX_FEATURES 0
681 #define TCG_APM_FEATURES 0
682 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
683 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
684 /* missing:
685 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
686
687 typedef enum FeatureWordType {
688 CPUID_FEATURE_WORD,
689 MSR_FEATURE_WORD,
690 } FeatureWordType;
691
692 typedef struct FeatureWordInfo {
693 FeatureWordType type;
694 /* feature flags names are taken from "Intel Processor Identification and
695 * the CPUID Instruction" and AMD's "CPUID Specification".
696 * In cases of disagreement between feature naming conventions,
697 * aliases may be added.
698 */
699 const char *feat_names[64];
700 union {
701 /* If type==CPUID_FEATURE_WORD */
702 struct {
703 uint32_t eax; /* Input EAX for CPUID */
704 bool needs_ecx; /* CPUID instruction uses ECX as input */
705 uint32_t ecx; /* Input ECX value for CPUID */
706 int reg; /* output register (R_* constant) */
707 } cpuid;
708 /* If type==MSR_FEATURE_WORD */
709 struct {
710 uint32_t index;
711 } msr;
712 };
713 uint64_t tcg_features; /* Feature flags supported by TCG */
714 uint64_t unmigratable_flags; /* Feature flags known to be unmigratable */
715 uint64_t migratable_flags; /* Feature flags known to be migratable */
716 /* Features that shouldn't be auto-enabled by "-cpu host" */
717 uint64_t no_autoenable_flags;
718 } FeatureWordInfo;
719
720 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
721 [FEAT_1_EDX] = {
722 .type = CPUID_FEATURE_WORD,
723 .feat_names = {
724 "fpu", "vme", "de", "pse",
725 "tsc", "msr", "pae", "mce",
726 "cx8", "apic", NULL, "sep",
727 "mtrr", "pge", "mca", "cmov",
728 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
729 NULL, "ds" /* Intel dts */, "acpi", "mmx",
730 "fxsr", "sse", "sse2", "ss",
731 "ht" /* Intel htt */, "tm", "ia64", "pbe",
732 },
733 .cpuid = {.eax = 1, .reg = R_EDX, },
734 .tcg_features = TCG_FEATURES,
735 },
736 [FEAT_1_ECX] = {
737 .type = CPUID_FEATURE_WORD,
738 .feat_names = {
739 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
740 "ds-cpl", "vmx", "smx", "est",
741 "tm2", "ssse3", "cid", NULL,
742 "fma", "cx16", "xtpr", "pdcm",
743 NULL, "pcid", "dca", "sse4.1",
744 "sse4.2", "x2apic", "movbe", "popcnt",
745 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
746 "avx", "f16c", "rdrand", "hypervisor",
747 },
748 .cpuid = { .eax = 1, .reg = R_ECX, },
749 .tcg_features = TCG_EXT_FEATURES,
750 },
751 /* Feature names that are already defined on feature_name[] but
752 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
753 * names on feat_names below. They are copied automatically
754 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
755 */
756 [FEAT_8000_0001_EDX] = {
757 .type = CPUID_FEATURE_WORD,
758 .feat_names = {
759 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
760 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
761 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
762 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
763 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
764 "nx", NULL, "mmxext", NULL /* mmx */,
765 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
766 NULL, "lm", "3dnowext", "3dnow",
767 },
768 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
769 .tcg_features = TCG_EXT2_FEATURES,
770 },
771 [FEAT_8000_0001_ECX] = {
772 .type = CPUID_FEATURE_WORD,
773 .feat_names = {
774 "lahf-lm", "cmp-legacy", "svm", "extapic",
775 "cr8legacy", "abm", "sse4a", "misalignsse",
776 "3dnowprefetch", "osvw", "ibs", "xop",
777 "skinit", "wdt", NULL, "lwp",
778 "fma4", "tce", NULL, "nodeid-msr",
779 NULL, "tbm", "topoext", "perfctr-core",
780 "perfctr-nb", NULL, NULL, NULL,
781 NULL, NULL, NULL, NULL,
782 },
783 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
784 .tcg_features = TCG_EXT3_FEATURES,
785 /*
786 * TOPOEXT is always allowed but can't be enabled blindly by
787 * "-cpu host", as it requires consistent cache topology info
788 * to be provided so it doesn't confuse guests.
789 */
790 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
791 },
792 [FEAT_C000_0001_EDX] = {
793 .type = CPUID_FEATURE_WORD,
794 .feat_names = {
795 NULL, NULL, "xstore", "xstore-en",
796 NULL, NULL, "xcrypt", "xcrypt-en",
797 "ace2", "ace2-en", "phe", "phe-en",
798 "pmm", "pmm-en", NULL, NULL,
799 NULL, NULL, NULL, NULL,
800 NULL, NULL, NULL, NULL,
801 NULL, NULL, NULL, NULL,
802 NULL, NULL, NULL, NULL,
803 },
804 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
805 .tcg_features = TCG_EXT4_FEATURES,
806 },
807 [FEAT_KVM] = {
808 .type = CPUID_FEATURE_WORD,
809 .feat_names = {
810 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
811 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
812 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
813 "kvm-poll-control", "kvm-pv-sched-yield", NULL, NULL,
814 NULL, NULL, NULL, NULL,
815 NULL, NULL, NULL, NULL,
816 "kvmclock-stable-bit", NULL, NULL, NULL,
817 NULL, NULL, NULL, NULL,
818 },
819 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
820 .tcg_features = TCG_KVM_FEATURES,
821 },
822 [FEAT_KVM_HINTS] = {
823 .type = CPUID_FEATURE_WORD,
824 .feat_names = {
825 "kvm-hint-dedicated", NULL, NULL, NULL,
826 NULL, NULL, NULL, NULL,
827 NULL, NULL, NULL, NULL,
828 NULL, NULL, NULL, NULL,
829 NULL, NULL, NULL, NULL,
830 NULL, NULL, NULL, NULL,
831 NULL, NULL, NULL, NULL,
832 NULL, NULL, NULL, NULL,
833 },
834 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
835 .tcg_features = TCG_KVM_FEATURES,
836 /*
837 * KVM hints aren't auto-enabled by -cpu host, they need to be
838 * explicitly enabled in the command-line.
839 */
840 .no_autoenable_flags = ~0U,
841 },
842 /*
843 * .feat_names are commented out for Hyper-V enlightenments because we
844 * don't want to have two different ways for enabling them on QEMU command
845 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
846 * enabling several feature bits simultaneously, exposing these bits
847 * individually may just confuse guests.
848 */
849 [FEAT_HYPERV_EAX] = {
850 .type = CPUID_FEATURE_WORD,
851 .feat_names = {
852 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
853 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
854 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
855 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
856 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
857 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
858 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
859 NULL, NULL,
860 NULL, NULL, NULL, NULL,
861 NULL, NULL, NULL, NULL,
862 NULL, NULL, NULL, NULL,
863 NULL, NULL, NULL, NULL,
864 },
865 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
866 },
867 [FEAT_HYPERV_EBX] = {
868 .type = CPUID_FEATURE_WORD,
869 .feat_names = {
870 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
871 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
872 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
873 NULL /* hv_create_port */, NULL /* hv_connect_port */,
874 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
875 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
876 NULL, NULL,
877 NULL, NULL, NULL, NULL,
878 NULL, NULL, NULL, NULL,
879 NULL, NULL, NULL, NULL,
880 NULL, NULL, NULL, NULL,
881 },
882 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
883 },
884 [FEAT_HYPERV_EDX] = {
885 .type = CPUID_FEATURE_WORD,
886 .feat_names = {
887 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
888 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
889 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
890 NULL, NULL,
891 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
892 NULL, NULL, NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 },
898 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
899 },
900 [FEAT_HV_RECOMM_EAX] = {
901 .type = CPUID_FEATURE_WORD,
902 .feat_names = {
903 NULL /* hv_recommend_pv_as_switch */,
904 NULL /* hv_recommend_pv_tlbflush_local */,
905 NULL /* hv_recommend_pv_tlbflush_remote */,
906 NULL /* hv_recommend_msr_apic_access */,
907 NULL /* hv_recommend_msr_reset */,
908 NULL /* hv_recommend_relaxed_timing */,
909 NULL /* hv_recommend_dma_remapping */,
910 NULL /* hv_recommend_int_remapping */,
911 NULL /* hv_recommend_x2apic_msrs */,
912 NULL /* hv_recommend_autoeoi_deprecation */,
913 NULL /* hv_recommend_pv_ipi */,
914 NULL /* hv_recommend_ex_hypercalls */,
915 NULL /* hv_hypervisor_is_nested */,
916 NULL /* hv_recommend_int_mbec */,
917 NULL /* hv_recommend_evmcs */,
918 NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 },
924 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
925 },
926 [FEAT_HV_NESTED_EAX] = {
927 .type = CPUID_FEATURE_WORD,
928 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
929 },
930 [FEAT_SVM] = {
931 .type = CPUID_FEATURE_WORD,
932 .feat_names = {
933 "npt", "lbrv", "svm-lock", "nrip-save",
934 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
935 NULL, NULL, "pause-filter", NULL,
936 "pfthreshold", NULL, NULL, NULL,
937 NULL, NULL, NULL, NULL,
938 NULL, NULL, NULL, NULL,
939 NULL, NULL, NULL, NULL,
940 NULL, NULL, NULL, NULL,
941 },
942 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
943 .tcg_features = TCG_SVM_FEATURES,
944 },
945 [FEAT_7_0_EBX] = {
946 .type = CPUID_FEATURE_WORD,
947 .feat_names = {
948 "fsgsbase", "tsc-adjust", NULL, "bmi1",
949 "hle", "avx2", NULL, "smep",
950 "bmi2", "erms", "invpcid", "rtm",
951 NULL, NULL, "mpx", NULL,
952 "avx512f", "avx512dq", "rdseed", "adx",
953 "smap", "avx512ifma", "pcommit", "clflushopt",
954 "clwb", "intel-pt", "avx512pf", "avx512er",
955 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
956 },
957 .cpuid = {
958 .eax = 7,
959 .needs_ecx = true, .ecx = 0,
960 .reg = R_EBX,
961 },
962 .tcg_features = TCG_7_0_EBX_FEATURES,
963 },
964 [FEAT_7_0_ECX] = {
965 .type = CPUID_FEATURE_WORD,
966 .feat_names = {
967 NULL, "avx512vbmi", "umip", "pku",
968 NULL /* ospke */, "waitpkg", "avx512vbmi2", NULL,
969 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
970 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
971 "la57", NULL, NULL, NULL,
972 NULL, NULL, "rdpid", NULL,
973 NULL, "cldemote", NULL, "movdiri",
974 "movdir64b", NULL, NULL, NULL,
975 },
976 .cpuid = {
977 .eax = 7,
978 .needs_ecx = true, .ecx = 0,
979 .reg = R_ECX,
980 },
981 .tcg_features = TCG_7_0_ECX_FEATURES,
982 },
983 [FEAT_7_0_EDX] = {
984 .type = CPUID_FEATURE_WORD,
985 .feat_names = {
986 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
987 NULL, NULL, NULL, NULL,
988 "avx512-vp2intersect", NULL, "md-clear", NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL /* pconfig */, NULL,
991 NULL, NULL, NULL, NULL,
992 NULL, NULL, "spec-ctrl", "stibp",
993 NULL, "arch-capabilities", "core-capability", "ssbd",
994 },
995 .cpuid = {
996 .eax = 7,
997 .needs_ecx = true, .ecx = 0,
998 .reg = R_EDX,
999 },
1000 .tcg_features = TCG_7_0_EDX_FEATURES,
1001 },
1002 [FEAT_7_1_EAX] = {
1003 .type = CPUID_FEATURE_WORD,
1004 .feat_names = {
1005 NULL, NULL, NULL, NULL,
1006 NULL, "avx512-bf16", NULL, NULL,
1007 NULL, NULL, NULL, NULL,
1008 NULL, NULL, NULL, NULL,
1009 NULL, NULL, NULL, NULL,
1010 NULL, NULL, NULL, NULL,
1011 NULL, NULL, NULL, NULL,
1012 NULL, NULL, NULL, NULL,
1013 },
1014 .cpuid = {
1015 .eax = 7,
1016 .needs_ecx = true, .ecx = 1,
1017 .reg = R_EAX,
1018 },
1019 .tcg_features = TCG_7_1_EAX_FEATURES,
1020 },
1021 [FEAT_8000_0007_EDX] = {
1022 .type = CPUID_FEATURE_WORD,
1023 .feat_names = {
1024 NULL, NULL, NULL, NULL,
1025 NULL, NULL, NULL, NULL,
1026 "invtsc", NULL, NULL, NULL,
1027 NULL, NULL, NULL, NULL,
1028 NULL, NULL, NULL, NULL,
1029 NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 },
1033 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1034 .tcg_features = TCG_APM_FEATURES,
1035 .unmigratable_flags = CPUID_APM_INVTSC,
1036 },
1037 [FEAT_8000_0008_EBX] = {
1038 .type = CPUID_FEATURE_WORD,
1039 .feat_names = {
1040 "clzero", NULL, "xsaveerptr", NULL,
1041 NULL, NULL, NULL, NULL,
1042 NULL, "wbnoinvd", NULL, NULL,
1043 "ibpb", NULL, NULL, "amd-stibp",
1044 NULL, NULL, NULL, NULL,
1045 NULL, NULL, NULL, NULL,
1046 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1047 NULL, NULL, NULL, NULL,
1048 },
1049 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1050 .tcg_features = 0,
1051 .unmigratable_flags = 0,
1052 },
1053 [FEAT_XSAVE] = {
1054 .type = CPUID_FEATURE_WORD,
1055 .feat_names = {
1056 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1057 NULL, NULL, NULL, NULL,
1058 NULL, NULL, NULL, NULL,
1059 NULL, NULL, NULL, NULL,
1060 NULL, NULL, NULL, NULL,
1061 NULL, NULL, NULL, NULL,
1062 NULL, NULL, NULL, NULL,
1063 NULL, NULL, NULL, NULL,
1064 },
1065 .cpuid = {
1066 .eax = 0xd,
1067 .needs_ecx = true, .ecx = 1,
1068 .reg = R_EAX,
1069 },
1070 .tcg_features = TCG_XSAVE_FEATURES,
1071 },
1072 [FEAT_6_EAX] = {
1073 .type = CPUID_FEATURE_WORD,
1074 .feat_names = {
1075 NULL, NULL, "arat", NULL,
1076 NULL, NULL, NULL, NULL,
1077 NULL, NULL, NULL, NULL,
1078 NULL, NULL, NULL, NULL,
1079 NULL, NULL, NULL, NULL,
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, NULL, NULL,
1082 NULL, NULL, NULL, NULL,
1083 },
1084 .cpuid = { .eax = 6, .reg = R_EAX, },
1085 .tcg_features = TCG_6_EAX_FEATURES,
1086 },
1087 [FEAT_XSAVE_COMP_LO] = {
1088 .type = CPUID_FEATURE_WORD,
1089 .cpuid = {
1090 .eax = 0xD,
1091 .needs_ecx = true, .ecx = 0,
1092 .reg = R_EAX,
1093 },
1094 .tcg_features = ~0U,
1095 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1096 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1097 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1098 XSTATE_PKRU_MASK,
1099 },
1100 [FEAT_XSAVE_COMP_HI] = {
1101 .type = CPUID_FEATURE_WORD,
1102 .cpuid = {
1103 .eax = 0xD,
1104 .needs_ecx = true, .ecx = 0,
1105 .reg = R_EDX,
1106 },
1107 .tcg_features = ~0U,
1108 },
1109 /*Below are MSR exposed features*/
1110 [FEAT_ARCH_CAPABILITIES] = {
1111 .type = MSR_FEATURE_WORD,
1112 .feat_names = {
1113 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1114 "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl",
1115 "taa-no", NULL, NULL, NULL,
1116 NULL, NULL, NULL, NULL,
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 NULL, NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 },
1122 .msr = {
1123 .index = MSR_IA32_ARCH_CAPABILITIES,
1124 },
1125 },
1126 [FEAT_CORE_CAPABILITY] = {
1127 .type = MSR_FEATURE_WORD,
1128 .feat_names = {
1129 NULL, NULL, NULL, NULL,
1130 NULL, "split-lock-detect", NULL, NULL,
1131 NULL, NULL, NULL, NULL,
1132 NULL, NULL, NULL, NULL,
1133 NULL, NULL, NULL, NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 },
1138 .msr = {
1139 .index = MSR_IA32_CORE_CAPABILITY,
1140 },
1141 },
1142 [FEAT_PERF_CAPABILITIES] = {
1143 .type = MSR_FEATURE_WORD,
1144 .feat_names = {
1145 NULL, NULL, NULL, NULL,
1146 NULL, NULL, NULL, NULL,
1147 NULL, NULL, NULL, NULL,
1148 NULL, "full-width-write", NULL, NULL,
1149 NULL, NULL, NULL, NULL,
1150 NULL, NULL, NULL, NULL,
1151 NULL, NULL, NULL, NULL,
1152 NULL, NULL, NULL, NULL,
1153 },
1154 .msr = {
1155 .index = MSR_IA32_PERF_CAPABILITIES,
1156 },
1157 },
1158
1159 [FEAT_VMX_PROCBASED_CTLS] = {
1160 .type = MSR_FEATURE_WORD,
1161 .feat_names = {
1162 NULL, NULL, "vmx-vintr-pending", "vmx-tsc-offset",
1163 NULL, NULL, NULL, "vmx-hlt-exit",
1164 NULL, "vmx-invlpg-exit", "vmx-mwait-exit", "vmx-rdpmc-exit",
1165 "vmx-rdtsc-exit", NULL, NULL, "vmx-cr3-load-noexit",
1166 "vmx-cr3-store-noexit", NULL, NULL, "vmx-cr8-load-exit",
1167 "vmx-cr8-store-exit", "vmx-flexpriority", "vmx-vnmi-pending", "vmx-movdr-exit",
1168 "vmx-io-exit", "vmx-io-bitmap", NULL, "vmx-mtf",
1169 "vmx-msr-bitmap", "vmx-monitor-exit", "vmx-pause-exit", "vmx-secondary-ctls",
1170 },
1171 .msr = {
1172 .index = MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1173 }
1174 },
1175
1176 [FEAT_VMX_SECONDARY_CTLS] = {
1177 .type = MSR_FEATURE_WORD,
1178 .feat_names = {
1179 "vmx-apicv-xapic", "vmx-ept", "vmx-desc-exit", "vmx-rdtscp-exit",
1180 "vmx-apicv-x2apic", "vmx-vpid", "vmx-wbinvd-exit", "vmx-unrestricted-guest",
1181 "vmx-apicv-register", "vmx-apicv-vid", "vmx-ple", "vmx-rdrand-exit",
1182 "vmx-invpcid-exit", "vmx-vmfunc", "vmx-shadow-vmcs", "vmx-encls-exit",
1183 "vmx-rdseed-exit", "vmx-pml", NULL, NULL,
1184 "vmx-xsaves", NULL, NULL, NULL,
1185 NULL, NULL, NULL, NULL,
1186 NULL, NULL, NULL, NULL,
1187 },
1188 .msr = {
1189 .index = MSR_IA32_VMX_PROCBASED_CTLS2,
1190 }
1191 },
1192
1193 [FEAT_VMX_PINBASED_CTLS] = {
1194 .type = MSR_FEATURE_WORD,
1195 .feat_names = {
1196 "vmx-intr-exit", NULL, NULL, "vmx-nmi-exit",
1197 NULL, "vmx-vnmi", "vmx-preemption-timer", "vmx-posted-intr",
1198 NULL, NULL, NULL, NULL,
1199 NULL, NULL, NULL, NULL,
1200 NULL, NULL, NULL, NULL,
1201 NULL, NULL, NULL, NULL,
1202 NULL, NULL, NULL, NULL,
1203 NULL, NULL, NULL, NULL,
1204 },
1205 .msr = {
1206 .index = MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1207 }
1208 },
1209
1210 [FEAT_VMX_EXIT_CTLS] = {
1211 .type = MSR_FEATURE_WORD,
1212 /*
1213 * VMX_VM_EXIT_HOST_ADDR_SPACE_SIZE is copied from
1214 * the LM CPUID bit.
1215 */
1216 .feat_names = {
1217 NULL, NULL, "vmx-exit-nosave-debugctl", NULL,
1218 NULL, NULL, NULL, NULL,
1219 NULL, NULL /* vmx-exit-host-addr-space-size */, NULL, NULL,
1220 "vmx-exit-load-perf-global-ctrl", NULL, NULL, "vmx-exit-ack-intr",
1221 NULL, NULL, "vmx-exit-save-pat", "vmx-exit-load-pat",
1222 "vmx-exit-save-efer", "vmx-exit-load-efer",
1223 "vmx-exit-save-preemption-timer", "vmx-exit-clear-bndcfgs",
1224 NULL, "vmx-exit-clear-rtit-ctl", NULL, NULL,
1225 NULL, NULL, NULL, NULL,
1226 },
1227 .msr = {
1228 .index = MSR_IA32_VMX_TRUE_EXIT_CTLS,
1229 }
1230 },
1231
1232 [FEAT_VMX_ENTRY_CTLS] = {
1233 .type = MSR_FEATURE_WORD,
1234 .feat_names = {
1235 NULL, NULL, "vmx-entry-noload-debugctl", NULL,
1236 NULL, NULL, NULL, NULL,
1237 NULL, "vmx-entry-ia32e-mode", NULL, NULL,
1238 NULL, "vmx-entry-load-perf-global-ctrl", "vmx-entry-load-pat", "vmx-entry-load-efer",
1239 "vmx-entry-load-bndcfgs", NULL, "vmx-entry-load-rtit-ctl", NULL,
1240 NULL, NULL, NULL, NULL,
1241 NULL, NULL, NULL, NULL,
1242 NULL, NULL, NULL, NULL,
1243 },
1244 .msr = {
1245 .index = MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1246 }
1247 },
1248
1249 [FEAT_VMX_MISC] = {
1250 .type = MSR_FEATURE_WORD,
1251 .feat_names = {
1252 NULL, NULL, NULL, NULL,
1253 NULL, "vmx-store-lma", "vmx-activity-hlt", "vmx-activity-shutdown",
1254 "vmx-activity-wait-sipi", NULL, NULL, NULL,
1255 NULL, NULL, NULL, NULL,
1256 NULL, NULL, NULL, NULL,
1257 NULL, NULL, NULL, NULL,
1258 NULL, NULL, NULL, NULL,
1259 NULL, "vmx-vmwrite-vmexit-fields", "vmx-zero-len-inject", NULL,
1260 },
1261 .msr = {
1262 .index = MSR_IA32_VMX_MISC,
1263 }
1264 },
1265
1266 [FEAT_VMX_EPT_VPID_CAPS] = {
1267 .type = MSR_FEATURE_WORD,
1268 .feat_names = {
1269 "vmx-ept-execonly", NULL, NULL, NULL,
1270 NULL, NULL, "vmx-page-walk-4", "vmx-page-walk-5",
1271 NULL, NULL, NULL, NULL,
1272 NULL, NULL, NULL, NULL,
1273 "vmx-ept-2mb", "vmx-ept-1gb", NULL, NULL,
1274 "vmx-invept", "vmx-eptad", "vmx-ept-advanced-exitinfo", NULL,
1275 NULL, "vmx-invept-single-context", "vmx-invept-all-context", NULL,
1276 NULL, NULL, NULL, NULL,
1277 "vmx-invvpid", NULL, NULL, NULL,
1278 NULL, NULL, NULL, NULL,
1279 "vmx-invvpid-single-addr", "vmx-invept-single-context",
1280 "vmx-invvpid-all-context", "vmx-invept-single-context-noglobals",
1281 NULL, NULL, NULL, NULL,
1282 NULL, NULL, NULL, NULL,
1283 NULL, NULL, NULL, NULL,
1284 NULL, NULL, NULL, NULL,
1285 NULL, NULL, NULL, NULL,
1286 },
1287 .msr = {
1288 .index = MSR_IA32_VMX_EPT_VPID_CAP,
1289 }
1290 },
1291
1292 [FEAT_VMX_BASIC] = {
1293 .type = MSR_FEATURE_WORD,
1294 .feat_names = {
1295 [54] = "vmx-ins-outs",
1296 [55] = "vmx-true-ctls",
1297 },
1298 .msr = {
1299 .index = MSR_IA32_VMX_BASIC,
1300 },
1301 /* Just to be safe - we don't support setting the MSEG version field. */
1302 .no_autoenable_flags = MSR_VMX_BASIC_DUAL_MONITOR,
1303 },
1304
1305 [FEAT_VMX_VMFUNC] = {
1306 .type = MSR_FEATURE_WORD,
1307 .feat_names = {
1308 [0] = "vmx-eptp-switching",
1309 },
1310 .msr = {
1311 .index = MSR_IA32_VMX_VMFUNC,
1312 }
1313 },
1314
1315 };
1316
1317 typedef struct FeatureMask {
1318 FeatureWord index;
1319 uint64_t mask;
1320 } FeatureMask;
1321
1322 typedef struct FeatureDep {
1323 FeatureMask from, to;
1324 } FeatureDep;
1325
1326 static FeatureDep feature_dependencies[] = {
1327 {
1328 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_ARCH_CAPABILITIES },
1329 .to = { FEAT_ARCH_CAPABILITIES, ~0ull },
1330 },
1331 {
1332 .from = { FEAT_7_0_EDX, CPUID_7_0_EDX_CORE_CAPABILITY },
1333 .to = { FEAT_CORE_CAPABILITY, ~0ull },
1334 },
1335 {
1336 .from = { FEAT_1_ECX, CPUID_EXT_PDCM },
1337 .to = { FEAT_PERF_CAPABILITIES, ~0ull },
1338 },
1339 {
1340 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1341 .to = { FEAT_VMX_PROCBASED_CTLS, ~0ull },
1342 },
1343 {
1344 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1345 .to = { FEAT_VMX_PINBASED_CTLS, ~0ull },
1346 },
1347 {
1348 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1349 .to = { FEAT_VMX_EXIT_CTLS, ~0ull },
1350 },
1351 {
1352 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1353 .to = { FEAT_VMX_ENTRY_CTLS, ~0ull },
1354 },
1355 {
1356 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1357 .to = { FEAT_VMX_MISC, ~0ull },
1358 },
1359 {
1360 .from = { FEAT_1_ECX, CPUID_EXT_VMX },
1361 .to = { FEAT_VMX_BASIC, ~0ull },
1362 },
1363 {
1364 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_LM },
1365 .to = { FEAT_VMX_ENTRY_CTLS, VMX_VM_ENTRY_IA32E_MODE },
1366 },
1367 {
1368 .from = { FEAT_VMX_PROCBASED_CTLS, VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS },
1369 .to = { FEAT_VMX_SECONDARY_CTLS, ~0ull },
1370 },
1371 {
1372 .from = { FEAT_XSAVE, CPUID_XSAVE_XSAVES },
1373 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_XSAVES },
1374 },
1375 {
1376 .from = { FEAT_1_ECX, CPUID_EXT_RDRAND },
1377 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDRAND_EXITING },
1378 },
1379 {
1380 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_INVPCID },
1381 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_INVPCID },
1382 },
1383 {
1384 .from = { FEAT_7_0_EBX, CPUID_7_0_EBX_RDSEED },
1385 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDSEED_EXITING },
1386 },
1387 {
1388 .from = { FEAT_8000_0001_EDX, CPUID_EXT2_RDTSCP },
1389 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_RDTSCP },
1390 },
1391 {
1392 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1393 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull },
1394 },
1395 {
1396 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_EPT },
1397 .to = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST },
1398 },
1399 {
1400 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VPID },
1401 .to = { FEAT_VMX_EPT_VPID_CAPS, 0xffffffffull << 32 },
1402 },
1403 {
1404 .from = { FEAT_VMX_SECONDARY_CTLS, VMX_SECONDARY_EXEC_ENABLE_VMFUNC },
1405 .to = { FEAT_VMX_VMFUNC, ~0ull },
1406 },
1407 };
1408
1409 typedef struct X86RegisterInfo32 {
1410 /* Name of register */
1411 const char *name;
1412 /* QAPI enum value register */
1413 X86CPURegister32 qapi_enum;
1414 } X86RegisterInfo32;
1415
1416 #define REGISTER(reg) \
1417 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1418 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1419 REGISTER(EAX),
1420 REGISTER(ECX),
1421 REGISTER(EDX),
1422 REGISTER(EBX),
1423 REGISTER(ESP),
1424 REGISTER(EBP),
1425 REGISTER(ESI),
1426 REGISTER(EDI),
1427 };
1428 #undef REGISTER
1429
1430 typedef struct ExtSaveArea {
1431 uint32_t feature, bits;
1432 uint32_t offset, size;
1433 } ExtSaveArea;
1434
1435 static const ExtSaveArea x86_ext_save_areas[] = {
1436 [XSTATE_FP_BIT] = {
1437 /* x87 FP state component is always enabled if XSAVE is supported */
1438 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1439 /* x87 state is in the legacy region of the XSAVE area */
1440 .offset = 0,
1441 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1442 },
1443 [XSTATE_SSE_BIT] = {
1444 /* SSE state component is always enabled if XSAVE is supported */
1445 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1446 /* SSE state is in the legacy region of the XSAVE area */
1447 .offset = 0,
1448 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1449 },
1450 [XSTATE_YMM_BIT] =
1451 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1452 .offset = offsetof(X86XSaveArea, avx_state),
1453 .size = sizeof(XSaveAVX) },
1454 [XSTATE_BNDREGS_BIT] =
1455 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1456 .offset = offsetof(X86XSaveArea, bndreg_state),
1457 .size = sizeof(XSaveBNDREG) },
1458 [XSTATE_BNDCSR_BIT] =
1459 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1460 .offset = offsetof(X86XSaveArea, bndcsr_state),
1461 .size = sizeof(XSaveBNDCSR) },
1462 [XSTATE_OPMASK_BIT] =
1463 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1464 .offset = offsetof(X86XSaveArea, opmask_state),
1465 .size = sizeof(XSaveOpmask) },
1466 [XSTATE_ZMM_Hi256_BIT] =
1467 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1468 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1469 .size = sizeof(XSaveZMM_Hi256) },
1470 [XSTATE_Hi16_ZMM_BIT] =
1471 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1472 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1473 .size = sizeof(XSaveHi16_ZMM) },
1474 [XSTATE_PKRU_BIT] =
1475 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1476 .offset = offsetof(X86XSaveArea, pkru_state),
1477 .size = sizeof(XSavePKRU) },
1478 };
1479
1480 static uint32_t xsave_area_size(uint64_t mask)
1481 {
1482 int i;
1483 uint64_t ret = 0;
1484
1485 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1486 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1487 if ((mask >> i) & 1) {
1488 ret = MAX(ret, esa->offset + esa->size);
1489 }
1490 }
1491 return ret;
1492 }
1493
1494 static inline bool accel_uses_host_cpuid(void)
1495 {
1496 return kvm_enabled() || hvf_enabled();
1497 }
1498
1499 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1500 {
1501 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1502 cpu->env.features[FEAT_XSAVE_COMP_LO];
1503 }
1504
1505 const char *get_register_name_32(unsigned int reg)
1506 {
1507 if (reg >= CPU_NB_REGS32) {
1508 return NULL;
1509 }
1510 return x86_reg_info_32[reg].name;
1511 }
1512
1513 /*
1514 * Returns the set of feature flags that are supported and migratable by
1515 * QEMU, for a given FeatureWord.
1516 */
1517 static uint64_t x86_cpu_get_migratable_flags(FeatureWord w)
1518 {
1519 FeatureWordInfo *wi = &feature_word_info[w];
1520 uint64_t r = 0;
1521 int i;
1522
1523 for (i = 0; i < 64; i++) {
1524 uint64_t f = 1ULL << i;
1525
1526 /* If the feature name is known, it is implicitly considered migratable,
1527 * unless it is explicitly set in unmigratable_flags */
1528 if ((wi->migratable_flags & f) ||
1529 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1530 r |= f;
1531 }
1532 }
1533 return r;
1534 }
1535
1536 void host_cpuid(uint32_t function, uint32_t count,
1537 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1538 {
1539 uint32_t vec[4];
1540
1541 #ifdef __x86_64__
1542 asm volatile("cpuid"
1543 : "=a"(vec[0]), "=b"(vec[1]),
1544 "=c"(vec[2]), "=d"(vec[3])
1545 : "0"(function), "c"(count) : "cc");
1546 #elif defined(__i386__)
1547 asm volatile("pusha \n\t"
1548 "cpuid \n\t"
1549 "mov %%eax, 0(%2) \n\t"
1550 "mov %%ebx, 4(%2) \n\t"
1551 "mov %%ecx, 8(%2) \n\t"
1552 "mov %%edx, 12(%2) \n\t"
1553 "popa"
1554 : : "a"(function), "c"(count), "S"(vec)
1555 : "memory", "cc");
1556 #else
1557 abort();
1558 #endif
1559
1560 if (eax)
1561 *eax = vec[0];
1562 if (ebx)
1563 *ebx = vec[1];
1564 if (ecx)
1565 *ecx = vec[2];
1566 if (edx)
1567 *edx = vec[3];
1568 }
1569
1570 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1571 {
1572 uint32_t eax, ebx, ecx, edx;
1573
1574 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1575 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1576
1577 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1578 if (family) {
1579 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1580 }
1581 if (model) {
1582 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1583 }
1584 if (stepping) {
1585 *stepping = eax & 0x0F;
1586 }
1587 }
1588
1589 /* CPU class name definitions: */
1590
1591 /* Return type name for a given CPU model name
1592 * Caller is responsible for freeing the returned string.
1593 */
1594 static char *x86_cpu_type_name(const char *model_name)
1595 {
1596 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1597 }
1598
1599 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1600 {
1601 g_autofree char *typename = x86_cpu_type_name(cpu_model);
1602 return object_class_by_name(typename);
1603 }
1604
1605 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1606 {
1607 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1608 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1609 return g_strndup(class_name,
1610 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1611 }
1612
1613 typedef struct PropValue {
1614 const char *prop, *value;
1615 } PropValue;
1616
1617 typedef struct X86CPUVersionDefinition {
1618 X86CPUVersion version;
1619 const char *alias;
1620 const char *note;
1621 PropValue *props;
1622 } X86CPUVersionDefinition;
1623
1624 /* Base definition for a CPU model */
1625 typedef struct X86CPUDefinition {
1626 const char *name;
1627 uint32_t level;
1628 uint32_t xlevel;
1629 /* vendor is zero-terminated, 12 character ASCII string */
1630 char vendor[CPUID_VENDOR_SZ + 1];
1631 int family;
1632 int model;
1633 int stepping;
1634 FeatureWordArray features;
1635 const char *model_id;
1636 CPUCaches *cache_info;
1637
1638 /* Use AMD EPYC encoding for apic id */
1639 bool use_epyc_apic_id_encoding;
1640
1641 /*
1642 * Definitions for alternative versions of CPU model.
1643 * List is terminated by item with version == 0.
1644 * If NULL, version 1 will be registered automatically.
1645 */
1646 const X86CPUVersionDefinition *versions;
1647 } X86CPUDefinition;
1648
1649 /* Reference to a specific CPU model version */
1650 struct X86CPUModel {
1651 /* Base CPU definition */
1652 X86CPUDefinition *cpudef;
1653 /* CPU model version */
1654 X86CPUVersion version;
1655 const char *note;
1656 /*
1657 * If true, this is an alias CPU model.
1658 * This matters only for "-cpu help" and query-cpu-definitions
1659 */
1660 bool is_alias;
1661 };
1662
1663 /* Get full model name for CPU version */
1664 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1665 X86CPUVersion version)
1666 {
1667 assert(version > 0);
1668 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1669 }
1670
1671 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1672 {
1673 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1674 static const X86CPUVersionDefinition default_version_list[] = {
1675 { 1 },
1676 { /* end of list */ }
1677 };
1678
1679 return def->versions ?: default_version_list;
1680 }
1681
1682 bool cpu_x86_use_epyc_apic_id_encoding(const char *cpu_type)
1683 {
1684 X86CPUClass *xcc = X86_CPU_CLASS(object_class_by_name(cpu_type));
1685
1686 assert(xcc);
1687 if (xcc->model && xcc->model->cpudef) {
1688 return xcc->model->cpudef->use_epyc_apic_id_encoding;
1689 } else {
1690 return false;
1691 }
1692 }
1693
1694 static CPUCaches epyc_cache_info = {
1695 .l1d_cache = &(CPUCacheInfo) {
1696 .type = DATA_CACHE,
1697 .level = 1,
1698 .size = 32 * KiB,
1699 .line_size = 64,
1700 .associativity = 8,
1701 .partitions = 1,
1702 .sets = 64,
1703 .lines_per_tag = 1,
1704 .self_init = 1,
1705 .no_invd_sharing = true,
1706 },
1707 .l1i_cache = &(CPUCacheInfo) {
1708 .type = INSTRUCTION_CACHE,
1709 .level = 1,
1710 .size = 64 * KiB,
1711 .line_size = 64,
1712 .associativity = 4,
1713 .partitions = 1,
1714 .sets = 256,
1715 .lines_per_tag = 1,
1716 .self_init = 1,
1717 .no_invd_sharing = true,
1718 },
1719 .l2_cache = &(CPUCacheInfo) {
1720 .type = UNIFIED_CACHE,
1721 .level = 2,
1722 .size = 512 * KiB,
1723 .line_size = 64,
1724 .associativity = 8,
1725 .partitions = 1,
1726 .sets = 1024,
1727 .lines_per_tag = 1,
1728 },
1729 .l3_cache = &(CPUCacheInfo) {
1730 .type = UNIFIED_CACHE,
1731 .level = 3,
1732 .size = 8 * MiB,
1733 .line_size = 64,
1734 .associativity = 16,
1735 .partitions = 1,
1736 .sets = 8192,
1737 .lines_per_tag = 1,
1738 .self_init = true,
1739 .inclusive = true,
1740 .complex_indexing = true,
1741 },
1742 };
1743
1744 static CPUCaches epyc_rome_cache_info = {
1745 .l1d_cache = &(CPUCacheInfo) {
1746 .type = DATA_CACHE,
1747 .level = 1,
1748 .size = 32 * KiB,
1749 .line_size = 64,
1750 .associativity = 8,
1751 .partitions = 1,
1752 .sets = 64,
1753 .lines_per_tag = 1,
1754 .self_init = 1,
1755 .no_invd_sharing = true,
1756 },
1757 .l1i_cache = &(CPUCacheInfo) {
1758 .type = INSTRUCTION_CACHE,
1759 .level = 1,
1760 .size = 32 * KiB,
1761 .line_size = 64,
1762 .associativity = 8,
1763 .partitions = 1,
1764 .sets = 64,
1765 .lines_per_tag = 1,
1766 .self_init = 1,
1767 .no_invd_sharing = true,
1768 },
1769 .l2_cache = &(CPUCacheInfo) {
1770 .type = UNIFIED_CACHE,
1771 .level = 2,
1772 .size = 512 * KiB,
1773 .line_size = 64,
1774 .associativity = 8,
1775 .partitions = 1,
1776 .sets = 1024,
1777 .lines_per_tag = 1,
1778 },
1779 .l3_cache = &(CPUCacheInfo) {
1780 .type = UNIFIED_CACHE,
1781 .level = 3,
1782 .size = 16 * MiB,
1783 .line_size = 64,
1784 .associativity = 16,
1785 .partitions = 1,
1786 .sets = 16384,
1787 .lines_per_tag = 1,
1788 .self_init = true,
1789 .inclusive = true,
1790 .complex_indexing = true,
1791 },
1792 };
1793
1794 /* The following VMX features are not supported by KVM and are left out in the
1795 * CPU definitions:
1796 *
1797 * Dual-monitor support (all processors)
1798 * Entry to SMM
1799 * Deactivate dual-monitor treatment
1800 * Number of CR3-target values
1801 * Shutdown activity state
1802 * Wait-for-SIPI activity state
1803 * PAUSE-loop exiting (Westmere and newer)
1804 * EPT-violation #VE (Broadwell and newer)
1805 * Inject event with insn length=0 (Skylake and newer)
1806 * Conceal non-root operation from PT
1807 * Conceal VM exits from PT
1808 * Conceal VM entries from PT
1809 * Enable ENCLS exiting
1810 * Mode-based execute control (XS/XU)
1811 s TSC scaling (Skylake Server and newer)
1812 * GPA translation for PT (IceLake and newer)
1813 * User wait and pause
1814 * ENCLV exiting
1815 * Load IA32_RTIT_CTL
1816 * Clear IA32_RTIT_CTL
1817 * Advanced VM-exit information for EPT violations
1818 * Sub-page write permissions
1819 * PT in VMX operation
1820 */
1821
1822 static X86CPUDefinition builtin_x86_defs[] = {
1823 {
1824 .name = "qemu64",
1825 .level = 0xd,
1826 .vendor = CPUID_VENDOR_AMD,
1827 .family = 6,
1828 .model = 6,
1829 .stepping = 3,
1830 .features[FEAT_1_EDX] =
1831 PPRO_FEATURES |
1832 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1833 CPUID_PSE36,
1834 .features[FEAT_1_ECX] =
1835 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1836 .features[FEAT_8000_0001_EDX] =
1837 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1838 .features[FEAT_8000_0001_ECX] =
1839 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1840 .xlevel = 0x8000000A,
1841 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1842 },
1843 {
1844 .name = "phenom",
1845 .level = 5,
1846 .vendor = CPUID_VENDOR_AMD,
1847 .family = 16,
1848 .model = 2,
1849 .stepping = 3,
1850 /* Missing: CPUID_HT */
1851 .features[FEAT_1_EDX] =
1852 PPRO_FEATURES |
1853 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1854 CPUID_PSE36 | CPUID_VME,
1855 .features[FEAT_1_ECX] =
1856 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1857 CPUID_EXT_POPCNT,
1858 .features[FEAT_8000_0001_EDX] =
1859 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1860 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1861 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1862 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1863 CPUID_EXT3_CR8LEG,
1864 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1865 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1866 .features[FEAT_8000_0001_ECX] =
1867 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1868 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1869 /* Missing: CPUID_SVM_LBRV */
1870 .features[FEAT_SVM] =
1871 CPUID_SVM_NPT,
1872 .xlevel = 0x8000001A,
1873 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1874 },
1875 {
1876 .name = "core2duo",
1877 .level = 10,
1878 .vendor = CPUID_VENDOR_INTEL,
1879 .family = 6,
1880 .model = 15,
1881 .stepping = 11,
1882 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1883 .features[FEAT_1_EDX] =
1884 PPRO_FEATURES |
1885 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1886 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1887 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1888 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1889 .features[FEAT_1_ECX] =
1890 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1891 CPUID_EXT_CX16,
1892 .features[FEAT_8000_0001_EDX] =
1893 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1894 .features[FEAT_8000_0001_ECX] =
1895 CPUID_EXT3_LAHF_LM,
1896 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
1897 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1898 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1899 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1900 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1901 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
1902 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1903 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1904 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1905 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1906 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1907 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1908 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1909 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
1910 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
1911 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
1912 .features[FEAT_VMX_SECONDARY_CTLS] =
1913 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
1914 .xlevel = 0x80000008,
1915 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1916 },
1917 {
1918 .name = "kvm64",
1919 .level = 0xd,
1920 .vendor = CPUID_VENDOR_INTEL,
1921 .family = 15,
1922 .model = 6,
1923 .stepping = 1,
1924 /* Missing: CPUID_HT */
1925 .features[FEAT_1_EDX] =
1926 PPRO_FEATURES | CPUID_VME |
1927 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1928 CPUID_PSE36,
1929 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1930 .features[FEAT_1_ECX] =
1931 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1932 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1933 .features[FEAT_8000_0001_EDX] =
1934 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1935 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1936 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1937 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1938 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1939 .features[FEAT_8000_0001_ECX] =
1940 0,
1941 /* VMX features from Cedar Mill/Prescott */
1942 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1943 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1944 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1945 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1946 VMX_PIN_BASED_NMI_EXITING,
1947 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1948 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1949 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1950 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1951 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
1952 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
1953 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
1954 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING,
1955 .xlevel = 0x80000008,
1956 .model_id = "Common KVM processor"
1957 },
1958 {
1959 .name = "qemu32",
1960 .level = 4,
1961 .vendor = CPUID_VENDOR_INTEL,
1962 .family = 6,
1963 .model = 6,
1964 .stepping = 3,
1965 .features[FEAT_1_EDX] =
1966 PPRO_FEATURES,
1967 .features[FEAT_1_ECX] =
1968 CPUID_EXT_SSE3,
1969 .xlevel = 0x80000004,
1970 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1971 },
1972 {
1973 .name = "kvm32",
1974 .level = 5,
1975 .vendor = CPUID_VENDOR_INTEL,
1976 .family = 15,
1977 .model = 6,
1978 .stepping = 1,
1979 .features[FEAT_1_EDX] =
1980 PPRO_FEATURES | CPUID_VME |
1981 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1982 .features[FEAT_1_ECX] =
1983 CPUID_EXT_SSE3,
1984 .features[FEAT_8000_0001_ECX] =
1985 0,
1986 /* VMX features from Yonah */
1987 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
1988 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
1989 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
1990 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
1991 VMX_PIN_BASED_NMI_EXITING,
1992 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
1993 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
1994 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
1995 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
1996 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
1997 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
1998 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
1999 .xlevel = 0x80000008,
2000 .model_id = "Common 32-bit KVM processor"
2001 },
2002 {
2003 .name = "coreduo",
2004 .level = 10,
2005 .vendor = CPUID_VENDOR_INTEL,
2006 .family = 6,
2007 .model = 14,
2008 .stepping = 8,
2009 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2010 .features[FEAT_1_EDX] =
2011 PPRO_FEATURES | CPUID_VME |
2012 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
2013 CPUID_SS,
2014 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
2015 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
2016 .features[FEAT_1_ECX] =
2017 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
2018 .features[FEAT_8000_0001_EDX] =
2019 CPUID_EXT2_NX,
2020 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2021 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2022 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2023 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2024 VMX_PIN_BASED_NMI_EXITING,
2025 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2026 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2027 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2028 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2029 VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING |
2030 VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING |
2031 VMX_CPU_BASED_PAUSE_EXITING | VMX_CPU_BASED_USE_MSR_BITMAPS,
2032 .xlevel = 0x80000008,
2033 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
2034 },
2035 {
2036 .name = "486",
2037 .level = 1,
2038 .vendor = CPUID_VENDOR_INTEL,
2039 .family = 4,
2040 .model = 8,
2041 .stepping = 0,
2042 .features[FEAT_1_EDX] =
2043 I486_FEATURES,
2044 .xlevel = 0,
2045 .model_id = "",
2046 },
2047 {
2048 .name = "pentium",
2049 .level = 1,
2050 .vendor = CPUID_VENDOR_INTEL,
2051 .family = 5,
2052 .model = 4,
2053 .stepping = 3,
2054 .features[FEAT_1_EDX] =
2055 PENTIUM_FEATURES,
2056 .xlevel = 0,
2057 .model_id = "",
2058 },
2059 {
2060 .name = "pentium2",
2061 .level = 2,
2062 .vendor = CPUID_VENDOR_INTEL,
2063 .family = 6,
2064 .model = 5,
2065 .stepping = 2,
2066 .features[FEAT_1_EDX] =
2067 PENTIUM2_FEATURES,
2068 .xlevel = 0,
2069 .model_id = "",
2070 },
2071 {
2072 .name = "pentium3",
2073 .level = 3,
2074 .vendor = CPUID_VENDOR_INTEL,
2075 .family = 6,
2076 .model = 7,
2077 .stepping = 3,
2078 .features[FEAT_1_EDX] =
2079 PENTIUM3_FEATURES,
2080 .xlevel = 0,
2081 .model_id = "",
2082 },
2083 {
2084 .name = "athlon",
2085 .level = 2,
2086 .vendor = CPUID_VENDOR_AMD,
2087 .family = 6,
2088 .model = 2,
2089 .stepping = 3,
2090 .features[FEAT_1_EDX] =
2091 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
2092 CPUID_MCA,
2093 .features[FEAT_8000_0001_EDX] =
2094 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
2095 .xlevel = 0x80000008,
2096 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
2097 },
2098 {
2099 .name = "n270",
2100 .level = 10,
2101 .vendor = CPUID_VENDOR_INTEL,
2102 .family = 6,
2103 .model = 28,
2104 .stepping = 2,
2105 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2106 .features[FEAT_1_EDX] =
2107 PPRO_FEATURES |
2108 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
2109 CPUID_ACPI | CPUID_SS,
2110 /* Some CPUs got no CPUID_SEP */
2111 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
2112 * CPUID_EXT_XTPR */
2113 .features[FEAT_1_ECX] =
2114 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
2115 CPUID_EXT_MOVBE,
2116 .features[FEAT_8000_0001_EDX] =
2117 CPUID_EXT2_NX,
2118 .features[FEAT_8000_0001_ECX] =
2119 CPUID_EXT3_LAHF_LM,
2120 .xlevel = 0x80000008,
2121 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
2122 },
2123 {
2124 .name = "Conroe",
2125 .level = 10,
2126 .vendor = CPUID_VENDOR_INTEL,
2127 .family = 6,
2128 .model = 15,
2129 .stepping = 3,
2130 .features[FEAT_1_EDX] =
2131 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2132 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2133 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2134 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2135 CPUID_DE | CPUID_FP87,
2136 .features[FEAT_1_ECX] =
2137 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2138 .features[FEAT_8000_0001_EDX] =
2139 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2140 .features[FEAT_8000_0001_ECX] =
2141 CPUID_EXT3_LAHF_LM,
2142 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2143 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE,
2144 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT,
2145 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2146 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2147 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2148 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2149 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2150 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2151 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2152 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2153 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2154 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2155 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2156 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2157 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2158 .features[FEAT_VMX_SECONDARY_CTLS] =
2159 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES,
2160 .xlevel = 0x80000008,
2161 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
2162 },
2163 {
2164 .name = "Penryn",
2165 .level = 10,
2166 .vendor = CPUID_VENDOR_INTEL,
2167 .family = 6,
2168 .model = 23,
2169 .stepping = 3,
2170 .features[FEAT_1_EDX] =
2171 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2172 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2173 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2174 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2175 CPUID_DE | CPUID_FP87,
2176 .features[FEAT_1_ECX] =
2177 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2178 CPUID_EXT_SSE3,
2179 .features[FEAT_8000_0001_EDX] =
2180 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2181 .features[FEAT_8000_0001_ECX] =
2182 CPUID_EXT3_LAHF_LM,
2183 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS,
2184 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2185 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
2186 .features[FEAT_VMX_EXIT_CTLS] = VMX_VM_EXIT_ACK_INTR_ON_EXIT |
2187 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
2188 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2189 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2190 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS,
2191 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2192 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2193 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2194 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2195 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2196 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2197 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2198 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2199 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2200 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2201 .features[FEAT_VMX_SECONDARY_CTLS] =
2202 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2203 VMX_SECONDARY_EXEC_WBINVD_EXITING,
2204 .xlevel = 0x80000008,
2205 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
2206 },
2207 {
2208 .name = "Nehalem",
2209 .level = 11,
2210 .vendor = CPUID_VENDOR_INTEL,
2211 .family = 6,
2212 .model = 26,
2213 .stepping = 3,
2214 .features[FEAT_1_EDX] =
2215 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2216 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2217 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2218 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2219 CPUID_DE | CPUID_FP87,
2220 .features[FEAT_1_ECX] =
2221 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2222 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
2223 .features[FEAT_8000_0001_EDX] =
2224 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2225 .features[FEAT_8000_0001_ECX] =
2226 CPUID_EXT3_LAHF_LM,
2227 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2228 MSR_VMX_BASIC_TRUE_CTLS,
2229 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2230 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2231 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2232 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2233 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2234 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2235 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2236 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2237 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2238 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2239 .features[FEAT_VMX_EXIT_CTLS] =
2240 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2241 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2242 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2243 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2244 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2245 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT,
2246 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2247 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2248 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2249 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2250 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2251 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2252 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2253 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2254 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2255 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2256 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2257 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2258 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2259 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2260 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2261 .features[FEAT_VMX_SECONDARY_CTLS] =
2262 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2263 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2264 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2265 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2266 VMX_SECONDARY_EXEC_ENABLE_VPID,
2267 .xlevel = 0x80000008,
2268 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
2269 .versions = (X86CPUVersionDefinition[]) {
2270 { .version = 1 },
2271 {
2272 .version = 2,
2273 .alias = "Nehalem-IBRS",
2274 .props = (PropValue[]) {
2275 { "spec-ctrl", "on" },
2276 { "model-id",
2277 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
2278 { /* end of list */ }
2279 }
2280 },
2281 { /* end of list */ }
2282 }
2283 },
2284 {
2285 .name = "Westmere",
2286 .level = 11,
2287 .vendor = CPUID_VENDOR_INTEL,
2288 .family = 6,
2289 .model = 44,
2290 .stepping = 1,
2291 .features[FEAT_1_EDX] =
2292 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2293 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2294 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2295 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2296 CPUID_DE | CPUID_FP87,
2297 .features[FEAT_1_ECX] =
2298 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2299 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2300 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2301 .features[FEAT_8000_0001_EDX] =
2302 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
2303 .features[FEAT_8000_0001_ECX] =
2304 CPUID_EXT3_LAHF_LM,
2305 .features[FEAT_6_EAX] =
2306 CPUID_6_EAX_ARAT,
2307 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2308 MSR_VMX_BASIC_TRUE_CTLS,
2309 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2310 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2311 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2312 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2313 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2314 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2315 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2316 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2317 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2318 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2319 .features[FEAT_VMX_EXIT_CTLS] =
2320 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2321 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2322 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2323 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2324 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2325 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2326 MSR_VMX_MISC_STORE_LMA,
2327 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2328 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2329 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2330 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2331 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2332 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2333 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2334 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2335 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2336 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2337 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2338 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2339 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2340 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2341 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2342 .features[FEAT_VMX_SECONDARY_CTLS] =
2343 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2344 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2345 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2346 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2347 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2348 .xlevel = 0x80000008,
2349 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
2350 .versions = (X86CPUVersionDefinition[]) {
2351 { .version = 1 },
2352 {
2353 .version = 2,
2354 .alias = "Westmere-IBRS",
2355 .props = (PropValue[]) {
2356 { "spec-ctrl", "on" },
2357 { "model-id",
2358 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
2359 { /* end of list */ }
2360 }
2361 },
2362 { /* end of list */ }
2363 }
2364 },
2365 {
2366 .name = "SandyBridge",
2367 .level = 0xd,
2368 .vendor = CPUID_VENDOR_INTEL,
2369 .family = 6,
2370 .model = 42,
2371 .stepping = 1,
2372 .features[FEAT_1_EDX] =
2373 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2374 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2375 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2376 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2377 CPUID_DE | CPUID_FP87,
2378 .features[FEAT_1_ECX] =
2379 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2380 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2381 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2382 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2383 CPUID_EXT_SSE3,
2384 .features[FEAT_8000_0001_EDX] =
2385 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2386 CPUID_EXT2_SYSCALL,
2387 .features[FEAT_8000_0001_ECX] =
2388 CPUID_EXT3_LAHF_LM,
2389 .features[FEAT_XSAVE] =
2390 CPUID_XSAVE_XSAVEOPT,
2391 .features[FEAT_6_EAX] =
2392 CPUID_6_EAX_ARAT,
2393 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2394 MSR_VMX_BASIC_TRUE_CTLS,
2395 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2396 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2397 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2398 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2399 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2400 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2401 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2402 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2403 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2404 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2405 .features[FEAT_VMX_EXIT_CTLS] =
2406 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2407 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2408 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2409 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2410 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2411 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2412 MSR_VMX_MISC_STORE_LMA,
2413 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2414 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2415 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2416 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2417 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2418 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2419 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2420 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2421 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2422 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2423 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2424 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2425 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2426 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2427 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2428 .features[FEAT_VMX_SECONDARY_CTLS] =
2429 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2430 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2431 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2432 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2433 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST,
2434 .xlevel = 0x80000008,
2435 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
2436 .versions = (X86CPUVersionDefinition[]) {
2437 { .version = 1 },
2438 {
2439 .version = 2,
2440 .alias = "SandyBridge-IBRS",
2441 .props = (PropValue[]) {
2442 { "spec-ctrl", "on" },
2443 { "model-id",
2444 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
2445 { /* end of list */ }
2446 }
2447 },
2448 { /* end of list */ }
2449 }
2450 },
2451 {
2452 .name = "IvyBridge",
2453 .level = 0xd,
2454 .vendor = CPUID_VENDOR_INTEL,
2455 .family = 6,
2456 .model = 58,
2457 .stepping = 9,
2458 .features[FEAT_1_EDX] =
2459 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2460 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2461 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2462 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2463 CPUID_DE | CPUID_FP87,
2464 .features[FEAT_1_ECX] =
2465 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2466 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2467 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2468 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2469 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2470 .features[FEAT_7_0_EBX] =
2471 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2472 CPUID_7_0_EBX_ERMS,
2473 .features[FEAT_8000_0001_EDX] =
2474 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2475 CPUID_EXT2_SYSCALL,
2476 .features[FEAT_8000_0001_ECX] =
2477 CPUID_EXT3_LAHF_LM,
2478 .features[FEAT_XSAVE] =
2479 CPUID_XSAVE_XSAVEOPT,
2480 .features[FEAT_6_EAX] =
2481 CPUID_6_EAX_ARAT,
2482 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2483 MSR_VMX_BASIC_TRUE_CTLS,
2484 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2485 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2486 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2487 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2488 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2489 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2490 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2491 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2492 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2493 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS,
2494 .features[FEAT_VMX_EXIT_CTLS] =
2495 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2496 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2497 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2498 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2499 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2500 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2501 MSR_VMX_MISC_STORE_LMA,
2502 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2503 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2504 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2505 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2506 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2507 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2508 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2509 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2510 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2511 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2512 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2513 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2514 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2515 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2516 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2517 .features[FEAT_VMX_SECONDARY_CTLS] =
2518 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2519 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2520 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2521 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2522 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2523 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2524 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2525 VMX_SECONDARY_EXEC_RDRAND_EXITING,
2526 .xlevel = 0x80000008,
2527 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
2528 .versions = (X86CPUVersionDefinition[]) {
2529 { .version = 1 },
2530 {
2531 .version = 2,
2532 .alias = "IvyBridge-IBRS",
2533 .props = (PropValue[]) {
2534 { "spec-ctrl", "on" },
2535 { "model-id",
2536 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
2537 { /* end of list */ }
2538 }
2539 },
2540 { /* end of list */ }
2541 }
2542 },
2543 {
2544 .name = "Haswell",
2545 .level = 0xd,
2546 .vendor = CPUID_VENDOR_INTEL,
2547 .family = 6,
2548 .model = 60,
2549 .stepping = 4,
2550 .features[FEAT_1_EDX] =
2551 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2552 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2553 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2554 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2555 CPUID_DE | CPUID_FP87,
2556 .features[FEAT_1_ECX] =
2557 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2558 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2559 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2560 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2561 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2562 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2563 .features[FEAT_8000_0001_EDX] =
2564 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2565 CPUID_EXT2_SYSCALL,
2566 .features[FEAT_8000_0001_ECX] =
2567 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2568 .features[FEAT_7_0_EBX] =
2569 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2570 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2571 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2572 CPUID_7_0_EBX_RTM,
2573 .features[FEAT_XSAVE] =
2574 CPUID_XSAVE_XSAVEOPT,
2575 .features[FEAT_6_EAX] =
2576 CPUID_6_EAX_ARAT,
2577 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2578 MSR_VMX_BASIC_TRUE_CTLS,
2579 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2580 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2581 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2582 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2583 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2584 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2585 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2586 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2587 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2588 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2589 .features[FEAT_VMX_EXIT_CTLS] =
2590 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2591 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2592 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2593 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2594 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2595 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2596 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2597 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2598 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2599 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2600 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2601 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2602 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2603 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2604 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2605 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2606 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2607 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2608 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2609 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2610 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2611 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2612 .features[FEAT_VMX_SECONDARY_CTLS] =
2613 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2614 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2615 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2616 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2617 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2618 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2619 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2620 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2621 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
2622 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2623 .xlevel = 0x80000008,
2624 .model_id = "Intel Core Processor (Haswell)",
2625 .versions = (X86CPUVersionDefinition[]) {
2626 { .version = 1 },
2627 {
2628 .version = 2,
2629 .alias = "Haswell-noTSX",
2630 .props = (PropValue[]) {
2631 { "hle", "off" },
2632 { "rtm", "off" },
2633 { "stepping", "1" },
2634 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2635 { /* end of list */ }
2636 },
2637 },
2638 {
2639 .version = 3,
2640 .alias = "Haswell-IBRS",
2641 .props = (PropValue[]) {
2642 /* Restore TSX features removed by -v2 above */
2643 { "hle", "on" },
2644 { "rtm", "on" },
2645 /*
2646 * Haswell and Haswell-IBRS had stepping=4 in
2647 * QEMU 4.0 and older
2648 */
2649 { "stepping", "4" },
2650 { "spec-ctrl", "on" },
2651 { "model-id",
2652 "Intel Core Processor (Haswell, IBRS)" },
2653 { /* end of list */ }
2654 }
2655 },
2656 {
2657 .version = 4,
2658 .alias = "Haswell-noTSX-IBRS",
2659 .props = (PropValue[]) {
2660 { "hle", "off" },
2661 { "rtm", "off" },
2662 /* spec-ctrl was already enabled by -v3 above */
2663 { "stepping", "1" },
2664 { "model-id",
2665 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2666 { /* end of list */ }
2667 }
2668 },
2669 { /* end of list */ }
2670 }
2671 },
2672 {
2673 .name = "Broadwell",
2674 .level = 0xd,
2675 .vendor = CPUID_VENDOR_INTEL,
2676 .family = 6,
2677 .model = 61,
2678 .stepping = 2,
2679 .features[FEAT_1_EDX] =
2680 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2681 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2682 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2683 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2684 CPUID_DE | CPUID_FP87,
2685 .features[FEAT_1_ECX] =
2686 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2687 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2688 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2689 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2690 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2691 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2692 .features[FEAT_8000_0001_EDX] =
2693 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2694 CPUID_EXT2_SYSCALL,
2695 .features[FEAT_8000_0001_ECX] =
2696 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2697 .features[FEAT_7_0_EBX] =
2698 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2699 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2700 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2701 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2702 CPUID_7_0_EBX_SMAP,
2703 .features[FEAT_XSAVE] =
2704 CPUID_XSAVE_XSAVEOPT,
2705 .features[FEAT_6_EAX] =
2706 CPUID_6_EAX_ARAT,
2707 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2708 MSR_VMX_BASIC_TRUE_CTLS,
2709 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2710 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2711 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2712 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2713 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2714 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2715 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2716 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2717 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2718 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2719 .features[FEAT_VMX_EXIT_CTLS] =
2720 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2721 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2722 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2723 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2724 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2725 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2726 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2727 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2728 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2729 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2730 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2731 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2732 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2733 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2734 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2735 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2736 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2737 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2738 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2739 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2740 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2741 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2742 .features[FEAT_VMX_SECONDARY_CTLS] =
2743 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2744 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2745 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2746 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2747 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2748 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
2749 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2750 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2751 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2752 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2753 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2754 .xlevel = 0x80000008,
2755 .model_id = "Intel Core Processor (Broadwell)",
2756 .versions = (X86CPUVersionDefinition[]) {
2757 { .version = 1 },
2758 {
2759 .version = 2,
2760 .alias = "Broadwell-noTSX",
2761 .props = (PropValue[]) {
2762 { "hle", "off" },
2763 { "rtm", "off" },
2764 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2765 { /* end of list */ }
2766 },
2767 },
2768 {
2769 .version = 3,
2770 .alias = "Broadwell-IBRS",
2771 .props = (PropValue[]) {
2772 /* Restore TSX features removed by -v2 above */
2773 { "hle", "on" },
2774 { "rtm", "on" },
2775 { "spec-ctrl", "on" },
2776 { "model-id",
2777 "Intel Core Processor (Broadwell, IBRS)" },
2778 { /* end of list */ }
2779 }
2780 },
2781 {
2782 .version = 4,
2783 .alias = "Broadwell-noTSX-IBRS",
2784 .props = (PropValue[]) {
2785 { "hle", "off" },
2786 { "rtm", "off" },
2787 /* spec-ctrl was already enabled by -v3 above */
2788 { "model-id",
2789 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2790 { /* end of list */ }
2791 }
2792 },
2793 { /* end of list */ }
2794 }
2795 },
2796 {
2797 .name = "Skylake-Client",
2798 .level = 0xd,
2799 .vendor = CPUID_VENDOR_INTEL,
2800 .family = 6,
2801 .model = 94,
2802 .stepping = 3,
2803 .features[FEAT_1_EDX] =
2804 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2805 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2806 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2807 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2808 CPUID_DE | CPUID_FP87,
2809 .features[FEAT_1_ECX] =
2810 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2811 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2812 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2813 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2814 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2815 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2816 .features[FEAT_8000_0001_EDX] =
2817 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2818 CPUID_EXT2_SYSCALL,
2819 .features[FEAT_8000_0001_ECX] =
2820 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2821 .features[FEAT_7_0_EBX] =
2822 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2823 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2824 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2825 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2826 CPUID_7_0_EBX_SMAP,
2827 /* Missing: XSAVES (not supported by some Linux versions,
2828 * including v4.1 to v4.12).
2829 * KVM doesn't yet expose any XSAVES state save component,
2830 * and the only one defined in Skylake (processor tracing)
2831 * probably will block migration anyway.
2832 */
2833 .features[FEAT_XSAVE] =
2834 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2835 CPUID_XSAVE_XGETBV1,
2836 .features[FEAT_6_EAX] =
2837 CPUID_6_EAX_ARAT,
2838 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2839 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2840 MSR_VMX_BASIC_TRUE_CTLS,
2841 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2842 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2843 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2844 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2845 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2846 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2847 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2848 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2849 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2850 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2851 .features[FEAT_VMX_EXIT_CTLS] =
2852 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2853 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2854 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2855 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2856 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2857 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2858 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2859 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2860 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2861 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
2862 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2863 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2864 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2865 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2866 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2867 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2868 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2869 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2870 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2871 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2872 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2873 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2874 .features[FEAT_VMX_SECONDARY_CTLS] =
2875 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2876 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2877 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2878 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
2879 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
2880 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
2881 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
2882 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
2883 .xlevel = 0x80000008,
2884 .model_id = "Intel Core Processor (Skylake)",
2885 .versions = (X86CPUVersionDefinition[]) {
2886 { .version = 1 },
2887 {
2888 .version = 2,
2889 .alias = "Skylake-Client-IBRS",
2890 .props = (PropValue[]) {
2891 { "spec-ctrl", "on" },
2892 { "model-id",
2893 "Intel Core Processor (Skylake, IBRS)" },
2894 { /* end of list */ }
2895 }
2896 },
2897 {
2898 .version = 3,
2899 .alias = "Skylake-Client-noTSX-IBRS",
2900 .props = (PropValue[]) {
2901 { "hle", "off" },
2902 { "rtm", "off" },
2903 { "model-id",
2904 "Intel Core Processor (Skylake, IBRS, no TSX)" },
2905 { /* end of list */ }
2906 }
2907 },
2908 { /* end of list */ }
2909 }
2910 },
2911 {
2912 .name = "Skylake-Server",
2913 .level = 0xd,
2914 .vendor = CPUID_VENDOR_INTEL,
2915 .family = 6,
2916 .model = 85,
2917 .stepping = 4,
2918 .features[FEAT_1_EDX] =
2919 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2920 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2921 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2922 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2923 CPUID_DE | CPUID_FP87,
2924 .features[FEAT_1_ECX] =
2925 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2926 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2927 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2928 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2929 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2930 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2931 .features[FEAT_8000_0001_EDX] =
2932 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2933 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2934 .features[FEAT_8000_0001_ECX] =
2935 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2936 .features[FEAT_7_0_EBX] =
2937 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2938 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2939 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2940 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2941 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2942 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2943 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2944 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2945 .features[FEAT_7_0_ECX] =
2946 CPUID_7_0_ECX_PKU,
2947 /* Missing: XSAVES (not supported by some Linux versions,
2948 * including v4.1 to v4.12).
2949 * KVM doesn't yet expose any XSAVES state save component,
2950 * and the only one defined in Skylake (processor tracing)
2951 * probably will block migration anyway.
2952 */
2953 .features[FEAT_XSAVE] =
2954 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2955 CPUID_XSAVE_XGETBV1,
2956 .features[FEAT_6_EAX] =
2957 CPUID_6_EAX_ARAT,
2958 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
2959 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
2960 MSR_VMX_BASIC_TRUE_CTLS,
2961 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
2962 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
2963 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
2964 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
2965 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
2966 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
2967 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
2968 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
2969 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
2970 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
2971 .features[FEAT_VMX_EXIT_CTLS] =
2972 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
2973 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
2974 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
2975 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
2976 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
2977 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
2978 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
2979 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
2980 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
2981 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
2982 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
2983 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
2984 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
2985 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
2986 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
2987 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
2988 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
2989 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
2990 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
2991 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
2992 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
2993 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
2994 .features[FEAT_VMX_SECONDARY_CTLS] =
2995 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2996 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
2997 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
2998 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2999 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3000 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3001 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3002 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3003 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3004 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3005 .xlevel = 0x80000008,
3006 .model_id = "Intel Xeon Processor (Skylake)",
3007 .versions = (X86CPUVersionDefinition[]) {
3008 { .version = 1 },
3009 {
3010 .version = 2,
3011 .alias = "Skylake-Server-IBRS",
3012 .props = (PropValue[]) {
3013 /* clflushopt was not added to Skylake-Server-IBRS */
3014 /* TODO: add -v3 including clflushopt */
3015 { "clflushopt", "off" },
3016 { "spec-ctrl", "on" },
3017 { "model-id",
3018 "Intel Xeon Processor (Skylake, IBRS)" },
3019 { /* end of list */ }
3020 }
3021 },
3022 {
3023 .version = 3,
3024 .alias = "Skylake-Server-noTSX-IBRS",
3025 .props = (PropValue[]) {
3026 { "hle", "off" },
3027 { "rtm", "off" },
3028 { "model-id",
3029 "Intel Xeon Processor (Skylake, IBRS, no TSX)" },
3030 { /* end of list */ }
3031 }
3032 },
3033 { /* end of list */ }
3034 }
3035 },
3036 {
3037 .name = "Cascadelake-Server",
3038 .level = 0xd,
3039 .vendor = CPUID_VENDOR_INTEL,
3040 .family = 6,
3041 .model = 85,
3042 .stepping = 6,
3043 .features[FEAT_1_EDX] =
3044 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3045 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3046 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3047 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3048 CPUID_DE | CPUID_FP87,
3049 .features[FEAT_1_ECX] =
3050 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3051 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3052 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3053 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3054 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3055 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3056 .features[FEAT_8000_0001_EDX] =
3057 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3058 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3059 .features[FEAT_8000_0001_ECX] =
3060 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3061 .features[FEAT_7_0_EBX] =
3062 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3063 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3064 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3065 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3066 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3067 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3068 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3069 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3070 .features[FEAT_7_0_ECX] =
3071 CPUID_7_0_ECX_PKU |
3072 CPUID_7_0_ECX_AVX512VNNI,
3073 .features[FEAT_7_0_EDX] =
3074 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3075 /* Missing: XSAVES (not supported by some Linux versions,
3076 * including v4.1 to v4.12).
3077 * KVM doesn't yet expose any XSAVES state save component,
3078 * and the only one defined in Skylake (processor tracing)
3079 * probably will block migration anyway.
3080 */
3081 .features[FEAT_XSAVE] =
3082 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3083 CPUID_XSAVE_XGETBV1,
3084 .features[FEAT_6_EAX] =
3085 CPUID_6_EAX_ARAT,
3086 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3087 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3088 MSR_VMX_BASIC_TRUE_CTLS,
3089 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3090 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3091 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3092 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3093 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3094 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3095 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3096 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3097 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3098 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3099 .features[FEAT_VMX_EXIT_CTLS] =
3100 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3101 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3102 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3103 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3104 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3105 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3106 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3107 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3108 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3109 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3110 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3111 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3112 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3113 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3114 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3115 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3116 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3117 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3118 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3119 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3120 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3121 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3122 .features[FEAT_VMX_SECONDARY_CTLS] =
3123 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3124 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3125 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3126 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3127 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3128 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3129 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3130 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3131 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3132 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3133 .xlevel = 0x80000008,
3134 .model_id = "Intel Xeon Processor (Cascadelake)",
3135 .versions = (X86CPUVersionDefinition[]) {
3136 { .version = 1 },
3137 { .version = 2,
3138 .props = (PropValue[]) {
3139 { "arch-capabilities", "on" },
3140 { "rdctl-no", "on" },
3141 { "ibrs-all", "on" },
3142 { "skip-l1dfl-vmentry", "on" },
3143 { "mds-no", "on" },
3144 { /* end of list */ }
3145 },
3146 },
3147 { .version = 3,
3148 .alias = "Cascadelake-Server-noTSX",
3149 .props = (PropValue[]) {
3150 { "hle", "off" },
3151 { "rtm", "off" },
3152 { /* end of list */ }
3153 },
3154 },
3155 { /* end of list */ }
3156 }
3157 },
3158 {
3159 .name = "Cooperlake",
3160 .level = 0xd,
3161 .vendor = CPUID_VENDOR_INTEL,
3162 .family = 6,
3163 .model = 85,
3164 .stepping = 10,
3165 .features[FEAT_1_EDX] =
3166 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3167 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3168 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3169 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3170 CPUID_DE | CPUID_FP87,
3171 .features[FEAT_1_ECX] =
3172 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3173 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3174 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3175 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3176 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3177 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3178 .features[FEAT_8000_0001_EDX] =
3179 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3180 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3181 .features[FEAT_8000_0001_ECX] =
3182 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3183 .features[FEAT_7_0_EBX] =
3184 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3185 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3186 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3187 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3188 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3189 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3190 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3191 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3192 .features[FEAT_7_0_ECX] =
3193 CPUID_7_0_ECX_PKU |
3194 CPUID_7_0_ECX_AVX512VNNI,
3195 .features[FEAT_7_0_EDX] =
3196 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_STIBP |
3197 CPUID_7_0_EDX_SPEC_CTRL_SSBD | CPUID_7_0_EDX_ARCH_CAPABILITIES,
3198 .features[FEAT_ARCH_CAPABILITIES] =
3199 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL |
3200 MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO |
3201 MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO,
3202 .features[FEAT_7_1_EAX] =
3203 CPUID_7_1_EAX_AVX512_BF16,
3204 /*
3205 * Missing: XSAVES (not supported by some Linux versions,
3206 * including v4.1 to v4.12).
3207 * KVM doesn't yet expose any XSAVES state save component,
3208 * and the only one defined in Skylake (processor tracing)
3209 * probably will block migration anyway.
3210 */
3211 .features[FEAT_XSAVE] =
3212 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3213 CPUID_XSAVE_XGETBV1,
3214 .features[FEAT_6_EAX] =
3215 CPUID_6_EAX_ARAT,
3216 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3217 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3218 MSR_VMX_BASIC_TRUE_CTLS,
3219 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3220 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3221 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3222 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3223 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3224 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3225 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3226 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3227 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3228 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3229 .features[FEAT_VMX_EXIT_CTLS] =
3230 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3231 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3232 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3233 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3234 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3235 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3236 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3237 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3238 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3239 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3240 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3241 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3242 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3243 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3244 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3245 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3246 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3247 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3248 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3249 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3250 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3251 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3252 .features[FEAT_VMX_SECONDARY_CTLS] =
3253 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3254 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3255 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3256 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3257 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3258 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3259 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3260 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3261 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3262 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3263 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3264 .xlevel = 0x80000008,
3265 .model_id = "Intel Xeon Processor (Cooperlake)",
3266 },
3267 {
3268 .name = "Icelake-Client",
3269 .level = 0xd,
3270 .vendor = CPUID_VENDOR_INTEL,
3271 .family = 6,
3272 .model = 126,
3273 .stepping = 0,
3274 .features[FEAT_1_EDX] =
3275 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3276 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3277 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3278 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3279 CPUID_DE | CPUID_FP87,
3280 .features[FEAT_1_ECX] =
3281 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3282 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3283 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3284 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3285 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3286 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3287 .features[FEAT_8000_0001_EDX] =
3288 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
3289 CPUID_EXT2_SYSCALL,
3290 .features[FEAT_8000_0001_ECX] =
3291 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3292 .features[FEAT_8000_0008_EBX] =
3293 CPUID_8000_0008_EBX_WBNOINVD,
3294 .features[FEAT_7_0_EBX] =
3295 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3296 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3297 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3298 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3299 CPUID_7_0_EBX_SMAP,
3300 .features[FEAT_7_0_ECX] =
3301 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3302 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3303 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3304 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3305 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3306 .features[FEAT_7_0_EDX] =
3307 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3308 /* Missing: XSAVES (not supported by some Linux versions,
3309 * including v4.1 to v4.12).
3310 * KVM doesn't yet expose any XSAVES state save component,
3311 * and the only one defined in Skylake (processor tracing)
3312 * probably will block migration anyway.
3313 */
3314 .features[FEAT_XSAVE] =
3315 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3316 CPUID_XSAVE_XGETBV1,
3317 .features[FEAT_6_EAX] =
3318 CPUID_6_EAX_ARAT,
3319 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3320 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3321 MSR_VMX_BASIC_TRUE_CTLS,
3322 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3323 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3324 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3325 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3326 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3327 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3328 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3329 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3330 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3331 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3332 .features[FEAT_VMX_EXIT_CTLS] =
3333 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3334 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3335 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3336 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3337 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3338 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3339 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3340 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3341 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3342 VMX_PIN_BASED_VMX_PREEMPTION_TIMER,
3343 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3344 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3345 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3346 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3347 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3348 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3349 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3350 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3351 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3352 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3353 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3354 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3355 .features[FEAT_VMX_SECONDARY_CTLS] =
3356 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3357 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3358 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3359 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3360 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3361 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3362 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3363 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3364 .xlevel = 0x80000008,
3365 .model_id = "Intel Core Processor (Icelake)",
3366 .versions = (X86CPUVersionDefinition[]) {
3367 { .version = 1 },
3368 {
3369 .version = 2,
3370 .alias = "Icelake-Client-noTSX",
3371 .props = (PropValue[]) {
3372 { "hle", "off" },
3373 { "rtm", "off" },
3374 { /* end of list */ }
3375 },
3376 },
3377 { /* end of list */ }
3378 }
3379 },
3380 {
3381 .name = "Icelake-Server",
3382 .level = 0xd,
3383 .vendor = CPUID_VENDOR_INTEL,
3384 .family = 6,
3385 .model = 134,
3386 .stepping = 0,
3387 .features[FEAT_1_EDX] =
3388 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3389 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3390 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3391 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3392 CPUID_DE | CPUID_FP87,
3393 .features[FEAT_1_ECX] =
3394 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3395 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3396 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3397 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3398 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3399 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3400 .features[FEAT_8000_0001_EDX] =
3401 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3402 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3403 .features[FEAT_8000_0001_ECX] =
3404 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3405 .features[FEAT_8000_0008_EBX] =
3406 CPUID_8000_0008_EBX_WBNOINVD,
3407 .features[FEAT_7_0_EBX] =
3408 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
3409 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
3410 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
3411 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
3412 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
3413 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
3414 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
3415 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
3416 .features[FEAT_7_0_ECX] =
3417 CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
3418 CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI |
3419 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
3420 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
3421 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
3422 .features[FEAT_7_0_EDX] =
3423 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3424 /* Missing: XSAVES (not supported by some Linux versions,
3425 * including v4.1 to v4.12).
3426 * KVM doesn't yet expose any XSAVES state save component,
3427 * and the only one defined in Skylake (processor tracing)
3428 * probably will block migration anyway.
3429 */
3430 .features[FEAT_XSAVE] =
3431 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3432 CPUID_XSAVE_XGETBV1,
3433 .features[FEAT_6_EAX] =
3434 CPUID_6_EAX_ARAT,
3435 /* Missing: Mode-based execute control (XS/XU), processor tracing, TSC scaling */
3436 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3437 MSR_VMX_BASIC_TRUE_CTLS,
3438 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3439 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3440 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3441 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3442 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3443 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3444 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3445 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3446 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3447 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3448 .features[FEAT_VMX_EXIT_CTLS] =
3449 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3450 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3451 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3452 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3453 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3454 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3455 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3456 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3457 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3458 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3459 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3460 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3461 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3462 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3463 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3464 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3465 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3466 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3467 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3468 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3469 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3470 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3471 .features[FEAT_VMX_SECONDARY_CTLS] =
3472 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3473 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3474 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3475 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3476 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3477 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3478 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3479 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3480 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS,
3481 .xlevel = 0x80000008,
3482 .model_id = "Intel Xeon Processor (Icelake)",
3483 .versions = (X86CPUVersionDefinition[]) {
3484 { .version = 1 },
3485 {
3486 .version = 2,
3487 .alias = "Icelake-Server-noTSX",
3488 .props = (PropValue[]) {
3489 { "hle", "off" },
3490 { "rtm", "off" },
3491 { /* end of list */ }
3492 },
3493 },
3494 {
3495 .version = 3,
3496 .props = (PropValue[]) {
3497 { "arch-capabilities", "on" },
3498 { "rdctl-no", "on" },
3499 { "ibrs-all", "on" },
3500 { "skip-l1dfl-vmentry", "on" },
3501 { "mds-no", "on" },
3502 { "pschange-mc-no", "on" },
3503 { "taa-no", "on" },
3504 { /* end of list */ }
3505 },
3506 },
3507 { /* end of list */ }
3508 }
3509 },
3510 {
3511 .name = "Denverton",
3512 .level = 21,
3513 .vendor = CPUID_VENDOR_INTEL,
3514 .family = 6,
3515 .model = 95,
3516 .stepping = 1,
3517 .features[FEAT_1_EDX] =
3518 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC |
3519 CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC |
3520 CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3521 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR |
3522 CPUID_SSE | CPUID_SSE2,
3523 .features[FEAT_1_ECX] =
3524 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3525 CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | CPUID_EXT_SSE41 |
3526 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3527 CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER |
3528 CPUID_EXT_AES | CPUID_EXT_XSAVE | CPUID_EXT_RDRAND,
3529 .features[FEAT_8000_0001_EDX] =
3530 CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB |
3531 CPUID_EXT2_RDTSCP | CPUID_EXT2_LM,
3532 .features[FEAT_8000_0001_ECX] =
3533 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3534 .features[FEAT_7_0_EBX] =
3535 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_ERMS |
3536 CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_SMAP |
3537 CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_SHA_NI,
3538 .features[FEAT_7_0_EDX] =
3539 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES |
3540 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
3541 /*
3542 * Missing: XSAVES (not supported by some Linux versions,
3543 * including v4.1 to v4.12).
3544 * KVM doesn't yet expose any XSAVES state save component,
3545 * and the only one defined in Skylake (processor tracing)
3546 * probably will block migration anyway.
3547 */
3548 .features[FEAT_XSAVE] =
3549 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | CPUID_XSAVE_XGETBV1,
3550 .features[FEAT_6_EAX] =
3551 CPUID_6_EAX_ARAT,
3552 .features[FEAT_ARCH_CAPABILITIES] =
3553 MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY,
3554 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3555 MSR_VMX_BASIC_TRUE_CTLS,
3556 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3557 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3558 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3559 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3560 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3561 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3562 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3563 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3564 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3565 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3566 .features[FEAT_VMX_EXIT_CTLS] =
3567 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3568 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3569 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3570 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3571 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3572 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3573 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3574 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3575 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3576 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3577 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3578 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3579 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3580 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3581 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3582 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3583 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3584 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3585 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3586 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3587 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3588 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3589 .features[FEAT_VMX_SECONDARY_CTLS] =
3590 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3591 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3592 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3593 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3594 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3595 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3596 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3597 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3598 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3599 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3600 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3601 .xlevel = 0x80000008,
3602 .model_id = "Intel Atom Processor (Denverton)",
3603 .versions = (X86CPUVersionDefinition[]) {
3604 { .version = 1 },
3605 {
3606 .version = 2,
3607 .props = (PropValue[]) {
3608 { "monitor", "off" },
3609 { "mpx", "off" },
3610 { /* end of list */ },
3611 },
3612 },
3613 { /* end of list */ },
3614 },
3615 },
3616 {
3617 .name = "Snowridge",
3618 .level = 27,
3619 .vendor = CPUID_VENDOR_INTEL,
3620 .family = 6,
3621 .model = 134,
3622 .stepping = 1,
3623 .features[FEAT_1_EDX] =
3624 /* missing: CPUID_PN CPUID_IA64 */
3625 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
3626 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
3627 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
3628 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
3629 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
3630 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
3631 CPUID_MMX |
3632 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
3633 .features[FEAT_1_ECX] =
3634 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
3635 CPUID_EXT_SSSE3 |
3636 CPUID_EXT_CX16 |
3637 CPUID_EXT_SSE41 |
3638 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
3639 CPUID_EXT_POPCNT |
3640 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
3641 CPUID_EXT_RDRAND,
3642 .features[FEAT_8000_0001_EDX] =
3643 CPUID_EXT2_SYSCALL |
3644 CPUID_EXT2_NX |
3645 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3646 CPUID_EXT2_LM,
3647 .features[FEAT_8000_0001_ECX] =
3648 CPUID_EXT3_LAHF_LM |
3649 CPUID_EXT3_3DNOWPREFETCH,
3650 .features[FEAT_7_0_EBX] =
3651 CPUID_7_0_EBX_FSGSBASE |
3652 CPUID_7_0_EBX_SMEP |
3653 CPUID_7_0_EBX_ERMS |
3654 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
3655 CPUID_7_0_EBX_RDSEED |
3656 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3657 CPUID_7_0_EBX_CLWB |
3658 CPUID_7_0_EBX_SHA_NI,
3659 .features[FEAT_7_0_ECX] =
3660 CPUID_7_0_ECX_UMIP |
3661 /* missing bit 5 */
3662 CPUID_7_0_ECX_GFNI |
3663 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
3664 CPUID_7_0_ECX_MOVDIR64B,
3665 .features[FEAT_7_0_EDX] =
3666 CPUID_7_0_EDX_SPEC_CTRL |
3667 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
3668 CPUID_7_0_EDX_CORE_CAPABILITY,
3669 .features[FEAT_CORE_CAPABILITY] =
3670 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
3671 /*
3672 * Missing: XSAVES (not supported by some Linux versions,
3673 * including v4.1 to v4.12).
3674 * KVM doesn't yet expose any XSAVES state save component,
3675 * and the only one defined in Skylake (processor tracing)
3676 * probably will block migration anyway.
3677 */
3678 .features[FEAT_XSAVE] =
3679 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3680 CPUID_XSAVE_XGETBV1,
3681 .features[FEAT_6_EAX] =
3682 CPUID_6_EAX_ARAT,
3683 .features[FEAT_VMX_BASIC] = MSR_VMX_BASIC_INS_OUTS |
3684 MSR_VMX_BASIC_TRUE_CTLS,
3685 .features[FEAT_VMX_ENTRY_CTLS] = VMX_VM_ENTRY_IA32E_MODE |
3686 VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VMX_VM_ENTRY_LOAD_IA32_PAT |
3687 VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_LOAD_IA32_EFER,
3688 .features[FEAT_VMX_EPT_VPID_CAPS] = MSR_VMX_EPT_EXECONLY |
3689 MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB |
3690 MSR_VMX_EPT_1GB | MSR_VMX_EPT_INVEPT |
3691 MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT |
3692 MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR |
3693 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | MSR_VMX_EPT_INVVPID_ALL_CONTEXT |
3694 MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS | MSR_VMX_EPT_AD_BITS,
3695 .features[FEAT_VMX_EXIT_CTLS] =
3696 VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_DEBUG_CONTROLS |
3697 VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
3698 VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_LOAD_IA32_EFER |
3699 VMX_VM_EXIT_SAVE_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER |
3700 VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER,
3701 .features[FEAT_VMX_MISC] = MSR_VMX_MISC_ACTIVITY_HLT |
3702 MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_VMWRITE_VMEXIT,
3703 .features[FEAT_VMX_PINBASED_CTLS] = VMX_PIN_BASED_EXT_INTR_MASK |
3704 VMX_PIN_BASED_NMI_EXITING | VMX_PIN_BASED_VIRTUAL_NMIS |
3705 VMX_PIN_BASED_VMX_PREEMPTION_TIMER | VMX_PIN_BASED_POSTED_INTR,
3706 .features[FEAT_VMX_PROCBASED_CTLS] = VMX_CPU_BASED_VIRTUAL_INTR_PENDING |
3707 VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING |
3708 VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING |
3709 VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING |
3710 VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING |
3711 VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_MOV_DR_EXITING |
3712 VMX_CPU_BASED_UNCOND_IO_EXITING | VMX_CPU_BASED_USE_IO_BITMAPS |
3713 VMX_CPU_BASED_MONITOR_EXITING | VMX_CPU_BASED_PAUSE_EXITING |
3714 VMX_CPU_BASED_VIRTUAL_NMI_PENDING | VMX_CPU_BASED_USE_MSR_BITMAPS |
3715 VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING |
3716 VMX_CPU_BASED_MONITOR_TRAP_FLAG |
3717 VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS,
3718 .features[FEAT_VMX_SECONDARY_CTLS] =
3719 VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
3720 VMX_SECONDARY_EXEC_WBINVD_EXITING | VMX_SECONDARY_EXEC_ENABLE_EPT |
3721 VMX_SECONDARY_EXEC_DESC | VMX_SECONDARY_EXEC_RDTSCP |
3722 VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
3723 VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST |
3724 VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT |
3725 VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
3726 VMX_SECONDARY_EXEC_RDRAND_EXITING | VMX_SECONDARY_EXEC_ENABLE_INVPCID |
3727 VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS |
3728 VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML,
3729 .features[FEAT_VMX_VMFUNC] = MSR_VMX_VMFUNC_EPT_SWITCHING,
3730 .xlevel = 0x80000008,
3731 .model_id = "Intel Atom Processor (SnowRidge)",
3732 .versions = (X86CPUVersionDefinition[]) {
3733 { .version = 1 },
3734 {
3735 .version = 2,
3736 .props = (PropValue[]) {
3737 { "mpx", "off" },
3738 { "model-id", "Intel Atom Processor (Snowridge, no MPX)" },
3739 { /* end of list */ },
3740 },
3741 },
3742 { /* end of list */ },
3743 },
3744 },
3745 {
3746 .name = "KnightsMill",
3747 .level = 0xd,
3748 .vendor = CPUID_VENDOR_INTEL,
3749 .family = 6,
3750 .model = 133,
3751 .stepping = 0,
3752 .features[FEAT_1_EDX] =
3753 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
3754 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
3755 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
3756 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
3757 CPUID_PSE | CPUID_DE | CPUID_FP87,
3758 .features[FEAT_1_ECX] =
3759 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3760 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
3761 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
3762 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
3763 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
3764 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
3765 .features[FEAT_8000_0001_EDX] =
3766 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
3767 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3768 .features[FEAT_8000_0001_ECX] =
3769 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
3770 .features[FEAT_7_0_EBX] =
3771 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3772 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
3773 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
3774 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
3775 CPUID_7_0_EBX_AVX512ER,
3776 .features[FEAT_7_0_ECX] =
3777 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
3778 .features[FEAT_7_0_EDX] =
3779 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
3780 .features[FEAT_XSAVE] =
3781 CPUID_XSAVE_XSAVEOPT,
3782 .features[FEAT_6_EAX] =
3783 CPUID_6_EAX_ARAT,
3784 .xlevel = 0x80000008,
3785 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
3786 },
3787 {
3788 .name = "Opteron_G1",
3789 .level = 5,
3790 .vendor = CPUID_VENDOR_AMD,
3791 .family = 15,
3792 .model = 6,
3793 .stepping = 1,
3794 .features[FEAT_1_EDX] =
3795 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3796 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3797 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3798 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3799 CPUID_DE | CPUID_FP87,
3800 .features[FEAT_1_ECX] =
3801 CPUID_EXT_SSE3,
3802 .features[FEAT_8000_0001_EDX] =
3803 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3804 .xlevel = 0x80000008,
3805 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
3806 },
3807 {
3808 .name = "Opteron_G2",
3809 .level = 5,
3810 .vendor = CPUID_VENDOR_AMD,
3811 .family = 15,
3812 .model = 6,
3813 .stepping = 1,
3814 .features[FEAT_1_EDX] =
3815 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3816 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3817 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3818 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3819 CPUID_DE | CPUID_FP87,
3820 .features[FEAT_1_ECX] =
3821 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
3822 .features[FEAT_8000_0001_EDX] =
3823 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
3824 .features[FEAT_8000_0001_ECX] =
3825 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3826 .xlevel = 0x80000008,
3827 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
3828 },
3829 {
3830 .name = "Opteron_G3",
3831 .level = 5,
3832 .vendor = CPUID_VENDOR_AMD,
3833 .family = 16,
3834 .model = 2,
3835 .stepping = 3,
3836 .features[FEAT_1_EDX] =
3837 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3838 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3839 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3840 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3841 CPUID_DE | CPUID_FP87,
3842 .features[FEAT_1_ECX] =
3843 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
3844 CPUID_EXT_SSE3,
3845 .features[FEAT_8000_0001_EDX] =
3846 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
3847 CPUID_EXT2_RDTSCP,
3848 .features[FEAT_8000_0001_ECX] =
3849 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
3850 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
3851 .xlevel = 0x80000008,
3852 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
3853 },
3854 {
3855 .name = "Opteron_G4",
3856 .level = 0xd,
3857 .vendor = CPUID_VENDOR_AMD,
3858 .family = 21,
3859 .model = 1,
3860 .stepping = 2,
3861 .features[FEAT_1_EDX] =
3862 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3863 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3864 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3865 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3866 CPUID_DE | CPUID_FP87,
3867 .features[FEAT_1_ECX] =
3868 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
3869 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3870 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
3871 CPUID_EXT_SSE3,
3872 .features[FEAT_8000_0001_EDX] =
3873 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3874 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3875 .features[FEAT_8000_0001_ECX] =
3876 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3877 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3878 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3879 CPUID_EXT3_LAHF_LM,
3880 .features[FEAT_SVM] =
3881 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3882 /* no xsaveopt! */
3883 .xlevel = 0x8000001A,
3884 .model_id = "AMD Opteron 62xx class CPU",
3885 },
3886 {
3887 .name = "Opteron_G5",
3888 .level = 0xd,
3889 .vendor = CPUID_VENDOR_AMD,
3890 .family = 21,
3891 .model = 2,
3892 .stepping = 0,
3893 .features[FEAT_1_EDX] =
3894 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
3895 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
3896 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
3897 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
3898 CPUID_DE | CPUID_FP87,
3899 .features[FEAT_1_ECX] =
3900 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
3901 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
3902 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
3903 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3904 .features[FEAT_8000_0001_EDX] =
3905 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
3906 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
3907 .features[FEAT_8000_0001_ECX] =
3908 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
3909 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
3910 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
3911 CPUID_EXT3_LAHF_LM,
3912 .features[FEAT_SVM] =
3913 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3914 /* no xsaveopt! */
3915 .xlevel = 0x8000001A,
3916 .model_id = "AMD Opteron 63xx class CPU",
3917 },
3918 {
3919 .name = "EPYC",
3920 .level = 0xd,
3921 .vendor = CPUID_VENDOR_AMD,
3922 .family = 23,
3923 .model = 1,
3924 .stepping = 2,
3925 .features[FEAT_1_EDX] =
3926 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3927 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3928 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3929 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3930 CPUID_VME | CPUID_FP87,
3931 .features[FEAT_1_ECX] =
3932 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3933 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
3934 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3935 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3936 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3937 .features[FEAT_8000_0001_EDX] =
3938 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3939 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3940 CPUID_EXT2_SYSCALL,
3941 .features[FEAT_8000_0001_ECX] =
3942 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3943 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3944 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3945 CPUID_EXT3_TOPOEXT,
3946 .features[FEAT_7_0_EBX] =
3947 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3948 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3949 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3950 CPUID_7_0_EBX_SHA_NI,
3951 .features[FEAT_XSAVE] =
3952 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3953 CPUID_XSAVE_XGETBV1,
3954 .features[FEAT_6_EAX] =
3955 CPUID_6_EAX_ARAT,
3956 .features[FEAT_SVM] =
3957 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3958 .xlevel = 0x8000001E,
3959 .model_id = "AMD EPYC Processor",
3960 .cache_info = &epyc_cache_info,
3961 .use_epyc_apic_id_encoding = 1,
3962 .versions = (X86CPUVersionDefinition[]) {
3963 { .version = 1 },
3964 {
3965 .version = 2,
3966 .alias = "EPYC-IBPB",
3967 .props = (PropValue[]) {
3968 { "ibpb", "on" },
3969 { "model-id",
3970 "AMD EPYC Processor (with IBPB)" },
3971 { /* end of list */ }
3972 }
3973 },
3974 {
3975 .version = 3,
3976 .props = (PropValue[]) {
3977 { "ibpb", "on" },
3978 { "perfctr-core", "on" },
3979 { "clzero", "on" },
3980 { "xsaveerptr", "on" },
3981 { "xsaves", "on" },
3982 { "model-id",
3983 "AMD EPYC Processor" },
3984 { /* end of list */ }
3985 }
3986 },
3987 { /* end of list */ }
3988 }
3989 },
3990 {
3991 .name = "Dhyana",
3992 .level = 0xd,
3993 .vendor = CPUID_VENDOR_HYGON,
3994 .family = 24,
3995 .model = 0,
3996 .stepping = 1,
3997 .features[FEAT_1_EDX] =
3998 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3999 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
4000 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
4001 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
4002 CPUID_VME | CPUID_FP87,
4003 .features[FEAT_1_ECX] =
4004 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
4005 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
4006 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
4007 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
4008 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
4009 .features[FEAT_8000_0001_EDX] =
4010 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
4011 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
4012 CPUID_EXT2_SYSCALL,
4013 .features[FEAT_8000_0001_ECX] =
4014 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
4015 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
4016 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
4017 CPUID_EXT3_TOPOEXT,
4018 .features[FEAT_8000_0008_EBX] =
4019 CPUID_8000_0008_EBX_IBPB,
4020 .features[FEAT_7_0_EBX] =
4021 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
4022 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
4023 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
4024 /*
4025 * Missing: XSAVES (not supported by some Linux versions,
4026 * including v4.1 to v4.12).
4027 * KVM doesn't yet expose any XSAVES state save component.
4028 */
4029 .features[FEAT_XSAVE] =
4030 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
4031 CPUID_XSAVE_XGETBV1,
4032 .features[FEAT_6_EAX] =
4033 CPUID_6_EAX_ARAT,
4034 .features[FEAT_SVM] =
4035 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
4036 .xlevel = 0x8000001E,
4037 .model_id = "Hygon Dhyana Processor",
4038 .cache_info = &epyc_cache_info,
4039 },
4040 {
4041 .name = "EPYC-Rome",
4042 .level = 0xd,
4043 .vendor = CPUID_VENDOR_AMD,
4044 .family = 23,
4045 .model = 49,
4046 .stepping = 0,
4047 .features[FEAT_1_EDX] =
4048 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
4049 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
4050 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
4051 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
4052 CPUID_VME | CPUID_FP87,
4053 .features[FEAT_1_ECX] =
4054 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
4055 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
4056 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
4057 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
4058 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
4059 .features[FEAT_8000_0001_EDX] =
4060 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
4061 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
4062 CPUID_EXT2_SYSCALL,
4063 .features[FEAT_8000_0001_ECX] =
4064 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
4065 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
4066 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
4067 CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE,
4068 .features[FEAT_8000_0008_EBX] =
4069 CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR |
4070 CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB |
4071 CPUID_8000_0008_EBX_STIBP,
4072 .features[FEAT_7_0_EBX] =
4073 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
4074 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
4075 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
4076 CPUID_7_0_EBX_SHA_NI | CPUID_7_0_EBX_CLWB,
4077 .features[FEAT_7_0_ECX] =
4078 CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_RDPID,
4079 .features[FEAT_XSAVE] =
4080 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
4081 CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES,
4082 .features[FEAT_6_EAX] =
4083 CPUID_6_EAX_ARAT,
4084 .features[FEAT_SVM] =
4085 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
4086 .xlevel = 0x8000001E,
4087 .model_id = "AMD EPYC-Rome Processor",
4088 .cache_info = &epyc_rome_cache_info,
4089 .use_epyc_apic_id_encoding = 1,
4090 },
4091 };
4092
4093 /* KVM-specific features that are automatically added/removed
4094 * from all CPU models when KVM is enabled.
4095 */
4096 static PropValue kvm_default_props[] = {
4097 { "kvmclock", "on" },
4098 { "kvm-nopiodelay", "on" },
4099 { "kvm-asyncpf", "on" },
4100 { "kvm-steal-time", "on" },
4101 { "kvm-pv-eoi", "on" },
4102 { "kvmclock-stable-bit", "on" },
4103 { "x2apic", "on" },
4104 { "acpi", "off" },
4105 { "monitor", "off" },
4106 { "svm", "off" },
4107 { NULL, NULL },
4108 };
4109
4110 /* TCG-specific defaults that override all CPU models when using TCG
4111 */
4112 static PropValue tcg_default_props[] = {
4113 { "vme", "off" },
4114 { NULL, NULL },
4115 };
4116
4117
4118 /*
4119 * We resolve CPU model aliases using -v1 when using "-machine
4120 * none", but this is just for compatibility while libvirt isn't
4121 * adapted to resolve CPU model versions before creating VMs.
4122 * See "Runnability guarantee of CPU models" at * qemu-deprecated.texi.
4123 */
4124 X86CPUVersion default_cpu_version = 1;
4125
4126 void x86_cpu_set_default_version(X86CPUVersion version)
4127 {
4128 /* Translating CPU_VERSION_AUTO to CPU_VERSION_AUTO doesn't make sense */
4129 assert(version != CPU_VERSION_AUTO);
4130 default_cpu_version = version;
4131 }
4132
4133 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
4134 {
4135 int v = 0;
4136 const X86CPUVersionDefinition *vdef =
4137 x86_cpu_def_get_versions(model->cpudef);
4138 while (vdef->version) {
4139 v = vdef->version;
4140 vdef++;
4141 }
4142 return v;
4143 }
4144
4145 /* Return the actual version being used for a specific CPU model */
4146 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
4147 {
4148 X86CPUVersion v = model->version;
4149 if (v == CPU_VERSION_AUTO) {
4150 v = default_cpu_version;
4151 }
4152 if (v == CPU_VERSION_LATEST) {
4153 return x86_cpu_model_last_version(model);
4154 }
4155 return v;
4156 }
4157
4158 void x86_cpu_change_kvm_default(const char *prop, const char *value)
4159 {
4160 PropValue *pv;
4161 for (pv = kvm_default_props; pv->prop; pv++) {
4162 if (!strcmp(pv->prop, prop)) {
4163 pv->value = value;
4164 break;
4165 }
4166 }
4167
4168 /* It is valid to call this function only for properties that
4169 * are already present in the kvm_default_props table.
4170 */
4171 assert(pv->prop);
4172 }
4173
4174 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
4175 bool migratable_only);
4176
4177 static bool lmce_supported(void)
4178 {
4179 uint64_t mce_cap = 0;
4180
4181 #ifdef CONFIG_KVM
4182 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
4183 return false;
4184 }
4185 #endif
4186
4187 return !!(mce_cap & MCG_LMCE_P);
4188 }
4189
4190 #define CPUID_MODEL_ID_SZ 48
4191
4192 /**
4193 * cpu_x86_fill_model_id:
4194 * Get CPUID model ID string from host CPU.
4195 *
4196 * @str should have at least CPUID_MODEL_ID_SZ bytes
4197 *
4198 * The function does NOT add a null terminator to the string
4199 * automatically.
4200 */
4201 static int cpu_x86_fill_model_id(char *str)
4202 {
4203 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
4204 int i;
4205
4206 for (i = 0; i < 3; i++) {
4207 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
4208 memcpy(str + i * 16 + 0, &eax, 4);
4209 memcpy(str + i * 16 + 4, &ebx, 4);
4210 memcpy(str + i * 16 + 8, &ecx, 4);
4211 memcpy(str + i * 16 + 12, &edx, 4);
4212 }
4213 return 0;
4214 }
4215
4216 static Property max_x86_cpu_properties[] = {
4217 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
4218 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
4219 DEFINE_PROP_END_OF_LIST()
4220 };
4221
4222 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
4223 {
4224 DeviceClass *dc = DEVICE_CLASS(oc);
4225 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4226
4227 xcc->ordering = 9;
4228
4229 xcc->model_description =
4230 "Enables all features supported by the accelerator in the current host";
4231
4232 device_class_set_props(dc, max_x86_cpu_properties);
4233 }
4234
4235 static void max_x86_cpu_initfn(Object *obj)
4236 {
4237 X86CPU *cpu = X86_CPU(obj);
4238 CPUX86State *env = &cpu->env;
4239 KVMState *s = kvm_state;
4240
4241 /* We can't fill the features array here because we don't know yet if
4242 * "migratable" is true or false.
4243 */
4244 cpu->max_features = true;
4245
4246 if (accel_uses_host_cpuid()) {
4247 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
4248 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
4249 int family, model, stepping;
4250
4251 host_vendor_fms(vendor, &family, &model, &stepping);
4252 cpu_x86_fill_model_id(model_id);
4253
4254 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
4255 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
4256 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
4257 object_property_set_int(OBJECT(cpu), stepping, "stepping",
4258 &error_abort);
4259 object_property_set_str(OBJECT(cpu), model_id, "model-id",
4260 &error_abort);
4261
4262 if (kvm_enabled()) {
4263 env->cpuid_min_level =
4264 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
4265 env->cpuid_min_xlevel =
4266 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
4267 env->cpuid_min_xlevel2 =
4268 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
4269 } else {
4270 env->cpuid_min_level =
4271 hvf_get_supported_cpuid(0x0, 0, R_EAX);
4272 env->cpuid_min_xlevel =
4273 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
4274 env->cpuid_min_xlevel2 =
4275 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
4276 }
4277
4278 if (lmce_supported()) {
4279 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
4280 }
4281 } else {
4282 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
4283 "vendor", &error_abort);
4284 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
4285 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
4286 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
4287 object_property_set_str(OBJECT(cpu),
4288 "QEMU TCG CPU version " QEMU_HW_VERSION,
4289 "model-id", &error_abort);
4290 }
4291
4292 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
4293 }
4294
4295 static const TypeInfo max_x86_cpu_type_info = {
4296 .name = X86_CPU_TYPE_NAME("max"),
4297 .parent = TYPE_X86_CPU,
4298 .instance_init = max_x86_cpu_initfn,
4299 .class_init = max_x86_cpu_class_init,
4300 };
4301
4302 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4303 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
4304 {
4305 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4306
4307 xcc->host_cpuid_required = true;
4308 xcc->ordering = 8;
4309
4310 #if defined(CONFIG_KVM)
4311 xcc->model_description =
4312 "KVM processor with all supported host features ";
4313 #elif defined(CONFIG_HVF)
4314 xcc->model_description =
4315 "HVF processor with all supported host features ";
4316 #endif
4317 }
4318
4319 static const TypeInfo host_x86_cpu_type_info = {
4320 .name = X86_CPU_TYPE_NAME("host"),
4321 .parent = X86_CPU_TYPE_NAME("max"),
4322 .class_init = host_x86_cpu_class_init,
4323 };
4324
4325 #endif
4326
4327 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
4328 {
4329 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
4330
4331 switch (f->type) {
4332 case CPUID_FEATURE_WORD:
4333 {
4334 const char *reg = get_register_name_32(f->cpuid.reg);
4335 assert(reg);
4336 return g_strdup_printf("CPUID.%02XH:%s",
4337 f->cpuid.eax, reg);
4338 }
4339 case MSR_FEATURE_WORD:
4340 return g_strdup_printf("MSR(%02XH)",
4341 f->msr.index);
4342 }
4343
4344 return NULL;
4345 }
4346
4347 static bool x86_cpu_have_filtered_features(X86CPU *cpu)
4348 {
4349 FeatureWord w;
4350
4351 for (w = 0; w < FEATURE_WORDS; w++) {
4352 if (cpu->filtered_features[w]) {
4353 return true;
4354 }
4355 }
4356
4357 return false;
4358 }
4359
4360 static void mark_unavailable_features(X86CPU *cpu, FeatureWord w, uint64_t mask,
4361 const char *verbose_prefix)
4362 {
4363 CPUX86State *env = &cpu->env;
4364 FeatureWordInfo *f = &feature_word_info[w];
4365 int i;
4366
4367 if (!cpu->force_features) {
4368 env->features[w] &= ~mask;
4369 }
4370 cpu->filtered_features[w] |= mask;
4371
4372 if (!verbose_prefix) {
4373 return;
4374 }
4375
4376 for (i = 0; i < 64; ++i) {
4377 if ((1ULL << i) & mask) {
4378 g_autofree char *feat_word_str = feature_word_description(f, i);
4379 warn_report("%s: %s%s%s [bit %d]",
4380 verbose_prefix,
4381 feat_word_str,
4382 f->feat_names[i] ? "." : "",
4383 f->feat_names[i] ? f->feat_names[i] : "", i);
4384 }
4385 }
4386 }
4387
4388 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
4389 const char *name, void *opaque,
4390 Error **errp)
4391 {
4392 X86CPU *cpu = X86_CPU(obj);
4393 CPUX86State *env = &cpu->env;
4394 int64_t value;
4395
4396 value = (env->cpuid_version >> 8) & 0xf;
4397 if (value == 0xf) {
4398 value += (env->cpuid_version >> 20) & 0xff;
4399 }
4400 visit_type_int(v, name, &value, errp);
4401 }
4402
4403 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
4404 const char *name, void *opaque,
4405 Error **errp)
4406 {
4407 X86CPU *cpu = X86_CPU(obj);
4408 CPUX86State *env = &cpu->env;
4409 const int64_t min = 0;
4410 const int64_t max = 0xff + 0xf;
4411 Error *local_err = NULL;
4412 int64_t value;
4413
4414 visit_type_int(v, name, &value, &local_err);
4415 if (local_err) {
4416 error_propagate(errp, local_err);
4417 return;
4418 }
4419 if (value < min || value > max) {
4420 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4421 name ? name : "null", value, min, max);
4422 return;
4423 }
4424
4425 env->cpuid_version &= ~0xff00f00;
4426 if (value > 0x0f) {
4427 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
4428 } else {
4429 env->cpuid_version |= value << 8;
4430 }
4431 }
4432
4433 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
4434 const char *name, void *opaque,
4435 Error **errp)
4436 {
4437 X86CPU *cpu = X86_CPU(obj);
4438 CPUX86State *env = &cpu->env;
4439 int64_t value;
4440
4441 value = (env->cpuid_version >> 4) & 0xf;
4442 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
4443 visit_type_int(v, name, &value, errp);
4444 }
4445
4446 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
4447 const char *name, void *opaque,
4448 Error **errp)
4449 {
4450 X86CPU *cpu = X86_CPU(obj);
4451 CPUX86State *env = &cpu->env;
4452 const int64_t min = 0;
4453 const int64_t max = 0xff;
4454 Error *local_err = NULL;
4455 int64_t value;
4456
4457 visit_type_int(v, name, &value, &local_err);
4458 if (local_err) {
4459 error_propagate(errp, local_err);
4460 return;
4461 }
4462 if (value < min || value > max) {
4463 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4464 name ? name : "null", value, min, max);
4465 return;
4466 }
4467
4468 env->cpuid_version &= ~0xf00f0;
4469 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
4470 }
4471
4472 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
4473 const char *name, void *opaque,
4474 Error **errp)
4475 {
4476 X86CPU *cpu = X86_CPU(obj);
4477 CPUX86State *env = &cpu->env;
4478 int64_t value;
4479
4480 value = env->cpuid_version & 0xf;
4481 visit_type_int(v, name, &value, errp);
4482 }
4483
4484 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
4485 const char *name, void *opaque,
4486 Error **errp)
4487 {
4488 X86CPU *cpu = X86_CPU(obj);
4489 CPUX86State *env = &cpu->env;
4490 const int64_t min = 0;
4491 const int64_t max = 0xf;
4492 Error *local_err = NULL;
4493 int64_t value;
4494
4495 visit_type_int(v, name, &value, &local_err);
4496 if (local_err) {
4497 error_propagate(errp, local_err);
4498 return;
4499 }
4500 if (value < min || value > max) {
4501 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4502 name ? name : "null", value, min, max);
4503 return;
4504 }
4505
4506 env->cpuid_version &= ~0xf;
4507 env->cpuid_version |= value & 0xf;
4508 }
4509
4510 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
4511 {
4512 X86CPU *cpu = X86_CPU(obj);
4513 CPUX86State *env = &cpu->env;
4514 char *value;
4515
4516 value = g_malloc(CPUID_VENDOR_SZ + 1);
4517 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
4518 env->cpuid_vendor3);
4519 return value;
4520 }
4521
4522 static void x86_cpuid_set_vendor(Object *obj, const char *value,
4523 Error **errp)
4524 {
4525 X86CPU *cpu = X86_CPU(obj);
4526 CPUX86State *env = &cpu->env;
4527 int i;
4528
4529 if (strlen(value) != CPUID_VENDOR_SZ) {
4530 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
4531 return;
4532 }
4533
4534 env->cpuid_vendor1 = 0;
4535 env->cpuid_vendor2 = 0;
4536 env->cpuid_vendor3 = 0;
4537 for (i = 0; i < 4; i++) {
4538 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
4539 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
4540 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
4541 }
4542 }
4543
4544 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
4545 {
4546 X86CPU *cpu = X86_CPU(obj);
4547 CPUX86State *env = &cpu->env;
4548 char *value;
4549 int i;
4550
4551 value = g_malloc(48 + 1);
4552 for (i = 0; i < 48; i++) {
4553 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
4554 }
4555 value[48] = '\0';
4556 return value;
4557 }
4558
4559 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
4560 Error **errp)
4561 {
4562 X86CPU *cpu = X86_CPU(obj);
4563 CPUX86State *env = &cpu->env;
4564 int c, len, i;
4565
4566 if (model_id == NULL) {
4567 model_id = "";
4568 }
4569 len = strlen(model_id);
4570 memset(env->cpuid_model, 0, 48);
4571 for (i = 0; i < 48; i++) {
4572 if (i >= len) {
4573 c = '\0';
4574 } else {
4575 c = (uint8_t)model_id[i];
4576 }
4577 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
4578 }
4579 }
4580
4581 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
4582 void *opaque, Error **errp)
4583 {
4584 X86CPU *cpu = X86_CPU(obj);
4585 int64_t value;
4586
4587 value = cpu->env.tsc_khz * 1000;
4588 visit_type_int(v, name, &value, errp);
4589 }
4590
4591 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
4592 void *opaque, Error **errp)
4593 {
4594 X86CPU *cpu = X86_CPU(obj);
4595 const int64_t min = 0;
4596 const int64_t max = INT64_MAX;
4597 Error *local_err = NULL;
4598 int64_t value;
4599
4600 visit_type_int(v, name, &value, &local_err);
4601 if (local_err) {
4602 error_propagate(errp, local_err);
4603 return;
4604 }
4605 if (value < min || value > max) {
4606 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
4607 name ? name : "null", value, min, max);
4608 return;
4609 }
4610
4611 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
4612 }
4613
4614 /* Generic getter for "feature-words" and "filtered-features" properties */
4615 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
4616 const char *name, void *opaque,
4617 Error **errp)
4618 {
4619 uint64_t *array = (uint64_t *)opaque;
4620 FeatureWord w;
4621 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
4622 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
4623 X86CPUFeatureWordInfoList *list = NULL;
4624
4625 for (w = 0; w < FEATURE_WORDS; w++) {
4626 FeatureWordInfo *wi = &feature_word_info[w];
4627 /*
4628 * We didn't have MSR features when "feature-words" was
4629 * introduced. Therefore skipped other type entries.
4630 */
4631 if (wi->type != CPUID_FEATURE_WORD) {
4632 continue;
4633 }
4634 X86CPUFeatureWordInfo *qwi = &word_infos[w];
4635 qwi->cpuid_input_eax = wi->cpuid.eax;
4636 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
4637 qwi->cpuid_input_ecx = wi->cpuid.ecx;
4638 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
4639 qwi->features = array[w];
4640
4641 /* List will be in reverse order, but order shouldn't matter */
4642 list_entries[w].next = list;
4643 list_entries[w].value = &word_infos[w];
4644 list = &list_entries[w];
4645 }
4646
4647 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
4648 }
4649
4650 /* Convert all '_' in a feature string option name to '-', to make feature
4651 * name conform to QOM property naming rule, which uses '-' instead of '_'.
4652 */
4653 static inline void feat2prop(char *s)
4654 {
4655 while ((s = strchr(s, '_'))) {
4656 *s = '-';
4657 }
4658 }
4659
4660 /* Return the feature property name for a feature flag bit */
4661 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
4662 {
4663 const char *name;
4664 /* XSAVE components are automatically enabled by other features,
4665 * so return the original feature name instead
4666 */
4667 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
4668 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
4669
4670 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
4671 x86_ext_save_areas[comp].bits) {
4672 w = x86_ext_save_areas[comp].feature;
4673 bitnr = ctz32(x86_ext_save_areas[comp].bits);
4674 }
4675 }
4676
4677 assert(bitnr < 64);
4678 assert(w < FEATURE_WORDS);
4679 name = feature_word_info[w].feat_names[bitnr];
4680 assert(bitnr < 32 || !(name && feature_word_info[w].type == CPUID_FEATURE_WORD));
4681 return name;
4682 }
4683
4684 /* Compatibily hack to maintain legacy +-feat semantic,
4685 * where +-feat overwrites any feature set by
4686 * feat=on|feat even if the later is parsed after +-feat
4687 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
4688 */
4689 static GList *plus_features, *minus_features;
4690
4691 static gint compare_string(gconstpointer a, gconstpointer b)
4692 {
4693 return g_strcmp0(a, b);
4694 }
4695
4696 /* Parse "+feature,-feature,feature=foo" CPU feature string
4697 */
4698 static void x86_cpu_parse_featurestr(const char *typename, char *features,
4699 Error **errp)
4700 {
4701 char *featurestr; /* Single 'key=value" string being parsed */
4702 static bool cpu_globals_initialized;
4703 bool ambiguous = false;
4704
4705 if (cpu_globals_initialized) {
4706 return;
4707 }
4708 cpu_globals_initialized = true;
4709
4710 if (!features) {
4711 return;
4712 }
4713
4714 for (featurestr = strtok(features, ",");
4715 featurestr;
4716 featurestr = strtok(NULL, ",")) {
4717 const char *name;
4718 const char *val = NULL;
4719 char *eq = NULL;
4720 char num[32];
4721 GlobalProperty *prop;
4722
4723 /* Compatibility syntax: */
4724 if (featurestr[0] == '+') {
4725 plus_features = g_list_append(plus_features,
4726 g_strdup(featurestr + 1));
4727 continue;
4728 } else if (featurestr[0] == '-') {
4729 minus_features = g_list_append(minus_features,
4730 g_strdup(featurestr + 1));
4731 continue;
4732 }
4733
4734 eq = strchr(featurestr, '=');
4735 if (eq) {
4736 *eq++ = 0;
4737 val = eq;
4738 } else {
4739 val = "on";
4740 }
4741
4742 feat2prop(featurestr);
4743 name = featurestr;
4744
4745 if (g_list_find_custom(plus_features, name, compare_string)) {
4746 warn_report("Ambiguous CPU model string. "
4747 "Don't mix both \"+%s\" and \"%s=%s\"",
4748 name, name, val);
4749 ambiguous = true;
4750 }
4751 if (g_list_find_custom(minus_features, name, compare_string)) {
4752 warn_report("Ambiguous CPU model string. "
4753 "Don't mix both \"-%s\" and \"%s=%s\"",
4754 name, name, val);
4755 ambiguous = true;
4756 }
4757
4758 /* Special case: */
4759 if (!strcmp(name, "tsc-freq")) {
4760 int ret;
4761 uint64_t tsc_freq;
4762
4763 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
4764 if (ret < 0 || tsc_freq > INT64_MAX) {
4765 error_setg(errp, "bad numerical value %s", val);
4766 return;
4767 }
4768 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
4769 val = num;
4770 name = "tsc-frequency";
4771 }
4772
4773 prop = g_new0(typeof(*prop), 1);
4774 prop->driver = typename;
4775 prop->property = g_strdup(name);
4776 prop->value = g_strdup(val);
4777 qdev_prop_register_global(prop);
4778 }
4779
4780 if (ambiguous) {
4781 warn_report("Compatibility of ambiguous CPU model "
4782 "strings won't be kept on future QEMU versions");
4783 }
4784 }
4785
4786 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
4787 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose);
4788
4789 /* Build a list with the name of all features on a feature word array */
4790 static void x86_cpu_list_feature_names(FeatureWordArray features,
4791 strList **feat_names)
4792 {
4793 FeatureWord w;
4794 strList **next = feat_names;
4795
4796 for (w = 0; w < FEATURE_WORDS; w++) {
4797 uint64_t filtered = features[w];
4798 int i;
4799 for (i = 0; i < 64; i++) {
4800 if (filtered & (1ULL << i)) {
4801 strList *new = g_new0(strList, 1);
4802 new->value = g_strdup(x86_cpu_feature_name(w, i));
4803 *next = new;
4804 next = &new->next;
4805 }
4806 }
4807 }
4808 }
4809
4810 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
4811 const char *name, void *opaque,
4812 Error **errp)
4813 {
4814 X86CPU *xc = X86_CPU(obj);
4815 strList *result = NULL;
4816
4817 x86_cpu_list_feature_names(xc->filtered_features, &result);
4818 visit_type_strList(v, "unavailable-features", &result, errp);
4819 }
4820
4821 /* Check for missing features that may prevent the CPU class from
4822 * running using the current machine and accelerator.
4823 */
4824 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
4825 strList **missing_feats)
4826 {
4827 X86CPU *xc;
4828 Error *err = NULL;
4829 strList **next = missing_feats;
4830
4831 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4832 strList *new = g_new0(strList, 1);
4833 new->value = g_strdup("kvm");
4834 *missing_feats = new;
4835 return;
4836 }
4837
4838 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
4839
4840 x86_cpu_expand_features(xc, &err);
4841 if (err) {
4842 /* Errors at x86_cpu_expand_features should never happen,
4843 * but in case it does, just report the model as not
4844 * runnable at all using the "type" property.
4845 */
4846 strList *new = g_new0(strList, 1);
4847 new->value = g_strdup("type");
4848 *next = new;
4849 next = &new->next;
4850 }
4851
4852 x86_cpu_filter_features(xc, false);
4853
4854 x86_cpu_list_feature_names(xc->filtered_features, next);
4855
4856 object_unref(OBJECT(xc));
4857 }
4858
4859 /* Print all cpuid feature names in featureset
4860 */
4861 static void listflags(GList *features)
4862 {
4863 size_t len = 0;
4864 GList *tmp;
4865
4866 for (tmp = features; tmp; tmp = tmp->next) {
4867 const char *name = tmp->data;
4868 if ((len + strlen(name) + 1) >= 75) {
4869 qemu_printf("\n");
4870 len = 0;
4871 }
4872 qemu_printf("%s%s", len == 0 ? " " : " ", name);
4873 len += strlen(name) + 1;
4874 }
4875 qemu_printf("\n");
4876 }
4877
4878 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
4879 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
4880 {
4881 ObjectClass *class_a = (ObjectClass *)a;
4882 ObjectClass *class_b = (ObjectClass *)b;
4883 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
4884 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
4885 int ret;
4886
4887 if (cc_a->ordering != cc_b->ordering) {
4888 ret = cc_a->ordering - cc_b->ordering;
4889 } else {
4890 g_autofree char *name_a = x86_cpu_class_get_model_name(cc_a);
4891 g_autofree char *name_b = x86_cpu_class_get_model_name(cc_b);
4892 ret = strcmp(name_a, name_b);
4893 }
4894 return ret;
4895 }
4896
4897 static GSList *get_sorted_cpu_model_list(void)
4898 {
4899 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
4900 list = g_slist_sort(list, x86_cpu_list_compare);
4901 return list;
4902 }
4903
4904 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
4905 {
4906 Object *obj = object_new_with_class(OBJECT_CLASS(xc));
4907 char *r = object_property_get_str(obj, "model-id", &error_abort);
4908 object_unref(obj);
4909 return r;
4910 }
4911
4912 static char *x86_cpu_class_get_alias_of(X86CPUClass *cc)
4913 {
4914 X86CPUVersion version;
4915
4916 if (!cc->model || !cc->model->is_alias) {
4917 return NULL;
4918 }
4919 version = x86_cpu_model_resolve_version(cc->model);
4920 if (version <= 0) {
4921 return NULL;
4922 }
4923 return x86_cpu_versioned_model_name(cc->model->cpudef, version);
4924 }
4925
4926 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
4927 {
4928 ObjectClass *oc = data;
4929 X86CPUClass *cc = X86_CPU_CLASS(oc);
4930 g_autofree char *name = x86_cpu_class_get_model_name(cc);
4931 g_autofree char *desc = g_strdup(cc->model_description);
4932 g_autofree char *alias_of = x86_cpu_class_get_alias_of(cc);
4933 g_autofree char *model_id = x86_cpu_class_get_model_id(cc);
4934
4935 if (!desc && alias_of) {
4936 if (cc->model && cc->model->version == CPU_VERSION_AUTO) {
4937 desc = g_strdup("(alias configured by machine type)");
4938 } else {
4939 desc = g_strdup_printf("(alias of %s)", alias_of);
4940 }
4941 }
4942 if (!desc && cc->model && cc->model->note) {
4943 desc = g_strdup_printf("%s [%s]", model_id, cc->model->note);
4944 }
4945 if (!desc) {
4946 desc = g_strdup_printf("%s", model_id);
4947 }
4948
4949 qemu_printf("x86 %-20s %-58s\n", name, desc);
4950 }
4951
4952 /* list available CPU models and flags */
4953 void x86_cpu_list(void)
4954 {
4955 int i, j;
4956 GSList *list;
4957 GList *names = NULL;
4958
4959 qemu_printf("Available CPUs:\n");
4960 list = get_sorted_cpu_model_list();
4961 g_slist_foreach(list, x86_cpu_list_entry, NULL);
4962 g_slist_free(list);
4963
4964 names = NULL;
4965 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
4966 FeatureWordInfo *fw = &feature_word_info[i];
4967 for (j = 0; j < 64; j++) {
4968 if (fw->feat_names[j]) {
4969 names = g_list_append(names, (gpointer)fw->feat_names[j]);
4970 }
4971 }
4972 }
4973
4974 names = g_list_sort(names, (GCompareFunc)strcmp);
4975
4976 qemu_printf("\nRecognized CPUID flags:\n");
4977 listflags(names);
4978 qemu_printf("\n");
4979 g_list_free(names);
4980 }
4981
4982 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
4983 {
4984 ObjectClass *oc = data;
4985 X86CPUClass *cc = X86_CPU_CLASS(oc);
4986 CpuDefinitionInfoList **cpu_list = user_data;
4987 CpuDefinitionInfoList *entry;
4988 CpuDefinitionInfo *info;
4989
4990 info = g_malloc0(sizeof(*info));
4991 info->name = x86_cpu_class_get_model_name(cc);
4992 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
4993 info->has_unavailable_features = true;
4994 info->q_typename = g_strdup(object_class_get_name(oc));
4995 info->migration_safe = cc->migration_safe;
4996 info->has_migration_safe = true;
4997 info->q_static = cc->static_model;
4998 /*
4999 * Old machine types won't report aliases, so that alias translation
5000 * doesn't break compatibility with previous QEMU versions.
5001 */
5002 if (default_cpu_version != CPU_VERSION_LEGACY) {
5003 info->alias_of = x86_cpu_class_get_alias_of(cc);
5004 info->has_alias_of = !!info->alias_of;
5005 }
5006
5007 entry = g_malloc0(sizeof(*entry));
5008 entry->value = info;
5009 entry->next = *cpu_list;
5010 *cpu_list = entry;
5011 }
5012
5013 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
5014 {
5015 CpuDefinitionInfoList *cpu_list = NULL;
5016 GSList *list = get_sorted_cpu_model_list();
5017 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
5018 g_slist_free(list);
5019 return cpu_list;
5020 }
5021
5022 static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
5023 bool migratable_only)
5024 {
5025 FeatureWordInfo *wi = &feature_word_info[w];
5026 uint64_t r = 0;
5027
5028 if (kvm_enabled()) {
5029 switch (wi->type) {
5030 case CPUID_FEATURE_WORD:
5031 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
5032 wi->cpuid.ecx,
5033 wi->cpuid.reg);
5034 break;
5035 case MSR_FEATURE_WORD:
5036 r = kvm_arch_get_supported_msr_feature(kvm_state,
5037 wi->msr.index);
5038 break;
5039 }
5040 } else if (hvf_enabled()) {
5041 if (wi->type != CPUID_FEATURE_WORD) {
5042 return 0;
5043 }
5044 r = hvf_get_supported_cpuid(wi->cpuid.eax,
5045 wi->cpuid.ecx,
5046 wi->cpuid.reg);
5047 } else if (tcg_enabled()) {
5048 r = wi->tcg_features;
5049 } else {
5050 return ~0;
5051 }
5052 if (migratable_only) {
5053 r &= x86_cpu_get_migratable_flags(w);
5054 }
5055 return r;
5056 }
5057
5058 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
5059 {
5060 PropValue *pv;
5061 for (pv = props; pv->prop; pv++) {
5062 if (!pv->value) {
5063 continue;
5064 }
5065 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
5066 &error_abort);
5067 }
5068 }
5069
5070 /* Apply properties for the CPU model version specified in model */
5071 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
5072 {
5073 const X86CPUVersionDefinition *vdef;
5074 X86CPUVersion version = x86_cpu_model_resolve_version(model);
5075
5076 if (version == CPU_VERSION_LEGACY) {
5077 return;
5078 }
5079
5080 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
5081 PropValue *p;
5082
5083 for (p = vdef->props; p && p->prop; p++) {
5084 object_property_parse(OBJECT(cpu), p->value, p->prop,
5085 &error_abort);
5086 }
5087
5088 if (vdef->version == version) {
5089 break;
5090 }
5091 }
5092
5093 /*
5094 * If we reached the end of the list, version number was invalid
5095 */
5096 assert(vdef->version == version);
5097 }
5098
5099 /* Load data from X86CPUDefinition into a X86CPU object
5100 */
5101 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model)
5102 {
5103 X86CPUDefinition *def = model->cpudef;
5104 CPUX86State *env = &cpu->env;
5105 const char *vendor;
5106 char host_vendor[CPUID_VENDOR_SZ + 1];
5107 FeatureWord w;
5108
5109 /*NOTE: any property set by this function should be returned by
5110 * x86_cpu_static_props(), so static expansion of
5111 * query-cpu-model-expansion is always complete.
5112 */
5113
5114 /* CPU models only set _minimum_ values for level/xlevel: */
5115 object_property_set_uint(OBJECT(cpu), def->level, "min-level",
5116 &error_abort);
5117 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel",
5118 &error_abort);
5119
5120 object_property_set_int(OBJECT(cpu), def->family, "family",
5121 &error_abort);
5122 object_property_set_int(OBJECT(cpu), def->model, "model",
5123 &error_abort);
5124 object_property_set_int(OBJECT(cpu), def->stepping, "stepping",
5125 &error_abort);
5126 object_property_set_str(OBJECT(cpu), def->model_id, "model-id",
5127 &error_abort);
5128 for (w = 0; w < FEATURE_WORDS; w++) {
5129 env->features[w] = def->features[w];
5130 }
5131
5132 /* legacy-cache defaults to 'off' if CPU model provides cache info */
5133 cpu->legacy_cache = !def->cache_info;
5134
5135 /* Special cases not set in the X86CPUDefinition structs: */
5136 /* TODO: in-kernel irqchip for hvf */
5137 if (kvm_enabled()) {
5138 if (!kvm_irqchip_in_kernel()) {
5139 x86_cpu_change_kvm_default("x2apic", "off");
5140 }
5141
5142 x86_cpu_apply_props(cpu, kvm_default_props);
5143 } else if (tcg_enabled()) {
5144 x86_cpu_apply_props(cpu, tcg_default_props);
5145 }
5146
5147 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
5148
5149 /* sysenter isn't supported in compatibility mode on AMD,
5150 * syscall isn't supported in compatibility mode on Intel.
5151 * Normally we advertise the actual CPU vendor, but you can
5152 * override this using the 'vendor' property if you want to use
5153 * KVM's sysenter/syscall emulation in compatibility mode and
5154 * when doing cross vendor migration
5155 */
5156 vendor = def->vendor;
5157 if (accel_uses_host_cpuid()) {
5158 uint32_t ebx = 0, ecx = 0, edx = 0;
5159 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
5160 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
5161 vendor = host_vendor;
5162 }
5163
5164 object_property_set_str(OBJECT(cpu), vendor, "vendor",
5165 &error_abort);
5166
5167 x86_cpu_apply_version_props(cpu, model);
5168 }
5169
5170 #ifndef CONFIG_USER_ONLY
5171 /* Return a QDict containing keys for all properties that can be included
5172 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
5173 * must be included in the dictionary.
5174 */
5175 static QDict *x86_cpu_static_props(void)
5176 {
5177 FeatureWord w;
5178 int i;
5179 static const char *props[] = {
5180 "min-level",
5181 "min-xlevel",
5182 "family",
5183 "model",
5184 "stepping",
5185 "model-id",
5186 "vendor",
5187 "lmce",
5188 NULL,
5189 };
5190 static QDict *d;
5191
5192 if (d) {
5193 return d;
5194 }
5195
5196 d = qdict_new();
5197 for (i = 0; props[i]; i++) {
5198 qdict_put_null(d, props[i]);
5199 }
5200
5201 for (w = 0; w < FEATURE_WORDS; w++) {
5202 FeatureWordInfo *fi = &feature_word_info[w];
5203 int bit;
5204 for (bit = 0; bit < 64; bit++) {
5205 if (!fi->feat_names[bit]) {
5206 continue;
5207 }
5208 qdict_put_null(d, fi->feat_names[bit]);
5209 }
5210 }
5211
5212 return d;
5213 }
5214
5215 /* Add an entry to @props dict, with the value for property. */
5216 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
5217 {
5218 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
5219 &error_abort);
5220
5221 qdict_put_obj(props, prop, value);
5222 }
5223
5224 /* Convert CPU model data from X86CPU object to a property dictionary
5225 * that can recreate exactly the same CPU model.
5226 */
5227 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
5228 {
5229 QDict *sprops = x86_cpu_static_props();
5230 const QDictEntry *e;
5231
5232 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
5233 const char *prop = qdict_entry_key(e);
5234 x86_cpu_expand_prop(cpu, props, prop);
5235 }
5236 }
5237
5238 /* Convert CPU model data from X86CPU object to a property dictionary
5239 * that can recreate exactly the same CPU model, including every
5240 * writeable QOM property.
5241 */
5242 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
5243 {
5244 ObjectPropertyIterator iter;
5245 ObjectProperty *prop;
5246
5247 object_property_iter_init(&iter, OBJECT(cpu));
5248 while ((prop = object_property_iter_next(&iter))) {
5249 /* skip read-only or write-only properties */
5250 if (!prop->get || !prop->set) {
5251 continue;
5252 }
5253
5254 /* "hotplugged" is the only property that is configurable
5255 * on the command-line but will be set differently on CPUs
5256 * created using "-cpu ... -smp ..." and by CPUs created
5257 * on the fly by x86_cpu_from_model() for querying. Skip it.
5258 */
5259 if (!strcmp(prop->name, "hotplugged")) {
5260 continue;
5261 }
5262 x86_cpu_expand_prop(cpu, props, prop->name);
5263 }
5264 }
5265
5266 static void object_apply_props(Object *obj, QDict *props, Error **errp)
5267 {
5268 const QDictEntry *prop;
5269 Error *err = NULL;
5270
5271 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
5272 object_property_set_qobject(obj, qdict_entry_value(prop),
5273 qdict_entry_key(prop), &err);
5274 if (err) {
5275 break;
5276 }
5277 }
5278
5279 error_propagate(errp, err);
5280 }
5281
5282 /* Create X86CPU object according to model+props specification */
5283 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
5284 {
5285 X86CPU *xc = NULL;
5286 X86CPUClass *xcc;
5287 Error *err = NULL;
5288
5289 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
5290 if (xcc == NULL) {
5291 error_setg(&err, "CPU model '%s' not found", model);
5292 goto out;
5293 }
5294
5295 xc = X86_CPU(object_new_with_class(OBJECT_CLASS(xcc)));
5296 if (props) {
5297 object_apply_props(OBJECT(xc), props, &err);
5298 if (err) {
5299 goto out;
5300 }
5301 }
5302
5303 x86_cpu_expand_features(xc, &err);
5304 if (err) {
5305 goto out;
5306 }
5307
5308 out:
5309 if (err) {
5310 error_propagate(errp, err);
5311 object_unref(OBJECT(xc));
5312 xc = NULL;
5313 }
5314 return xc;
5315 }
5316
5317 CpuModelExpansionInfo *
5318 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
5319 CpuModelInfo *model,
5320 Error **errp)
5321 {
5322 X86CPU *xc = NULL;
5323 Error *err = NULL;
5324 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
5325 QDict *props = NULL;
5326 const char *base_name;
5327
5328 xc = x86_cpu_from_model(model->name,
5329 model->has_props ?
5330 qobject_to(QDict, model->props) :
5331 NULL, &err);
5332 if (err) {
5333 goto out;
5334 }
5335
5336 props = qdict_new();
5337 ret->model = g_new0(CpuModelInfo, 1);
5338 ret->model->props = QOBJECT(props);
5339 ret->model->has_props = true;
5340
5341 switch (type) {
5342 case CPU_MODEL_EXPANSION_TYPE_STATIC:
5343 /* Static expansion will be based on "base" only */
5344 base_name = "base";
5345 x86_cpu_to_dict(xc, props);
5346 break;
5347 case CPU_MODEL_EXPANSION_TYPE_FULL:
5348 /* As we don't return every single property, full expansion needs
5349 * to keep the original model name+props, and add extra
5350 * properties on top of that.
5351 */
5352 base_name = model->name;
5353 x86_cpu_to_dict_full(xc, props);
5354 break;
5355 default:
5356 error_setg(&err, "Unsupported expansion type");
5357 goto out;
5358 }
5359
5360 x86_cpu_to_dict(xc, props);
5361
5362 ret->model->name = g_strdup(base_name);
5363
5364 out:
5365 object_unref(OBJECT(xc));
5366 if (err) {
5367 error_propagate(errp, err);
5368 qapi_free_CpuModelExpansionInfo(ret);
5369 ret = NULL;
5370 }
5371 return ret;
5372 }
5373 #endif /* !CONFIG_USER_ONLY */
5374
5375 static gchar *x86_gdb_arch_name(CPUState *cs)
5376 {
5377 #ifdef TARGET_X86_64
5378 return g_strdup("i386:x86-64");
5379 #else
5380 return g_strdup("i386");
5381 #endif
5382 }
5383
5384 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
5385 {
5386 X86CPUModel *model = data;
5387 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5388
5389 xcc->model = model;
5390 xcc->migration_safe = true;
5391 }
5392
5393 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
5394 {
5395 g_autofree char *typename = x86_cpu_type_name(name);
5396 TypeInfo ti = {
5397 .name = typename,
5398 .parent = TYPE_X86_CPU,
5399 .class_init = x86_cpu_cpudef_class_init,
5400 .class_data = model,
5401 };
5402
5403 type_register(&ti);
5404 }
5405
5406 static void x86_register_cpudef_types(X86CPUDefinition *def)
5407 {
5408 X86CPUModel *m;
5409 const X86CPUVersionDefinition *vdef;
5410
5411 /* AMD aliases are handled at runtime based on CPUID vendor, so
5412 * they shouldn't be set on the CPU model table.
5413 */
5414 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
5415 /* catch mistakes instead of silently truncating model_id when too long */
5416 assert(def->model_id && strlen(def->model_id) <= 48);
5417
5418 /* Unversioned model: */
5419 m = g_new0(X86CPUModel, 1);
5420 m->cpudef = def;
5421 m->version = CPU_VERSION_AUTO;
5422 m->is_alias = true;
5423 x86_register_cpu_model_type(def->name, m);
5424
5425 /* Versioned models: */
5426
5427 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
5428 X86CPUModel *m = g_new0(X86CPUModel, 1);
5429 g_autofree char *name =
5430 x86_cpu_versioned_model_name(def, vdef->version);
5431 m->cpudef = def;
5432 m->version = vdef->version;
5433 m->note = vdef->note;
5434 x86_register_cpu_model_type(name, m);
5435
5436 if (vdef->alias) {
5437 X86CPUModel *am = g_new0(X86CPUModel, 1);
5438 am->cpudef = def;
5439 am->version = vdef->version;
5440 am->is_alias = true;
5441 x86_register_cpu_model_type(vdef->alias, am);
5442 }
5443 }
5444
5445 }
5446
5447 #if !defined(CONFIG_USER_ONLY)
5448
5449 void cpu_clear_apic_feature(CPUX86State *env)
5450 {
5451 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
5452 }
5453
5454 #endif /* !CONFIG_USER_ONLY */
5455
5456 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
5457 uint32_t *eax, uint32_t *ebx,
5458 uint32_t *ecx, uint32_t *edx)
5459 {
5460 X86CPU *cpu = env_archcpu(env);
5461 CPUState *cs = env_cpu(env);
5462 uint32_t die_offset;
5463 uint32_t limit;
5464 uint32_t signature[3];
5465 X86CPUTopoInfo topo_info;
5466
5467 topo_info.nodes_per_pkg = env->nr_nodes;
5468 topo_info.dies_per_pkg = env->nr_dies;
5469 topo_info.cores_per_die = cs->nr_cores;
5470 topo_info.threads_per_core = cs->nr_threads;
5471
5472 /* Calculate & apply limits for different index ranges */
5473 if (index >= 0xC0000000) {
5474 limit = env->cpuid_xlevel2;
5475 } else if (index >= 0x80000000) {
5476 limit = env->cpuid_xlevel;
5477 } else if (index >= 0x40000000) {
5478 limit = 0x40000001;
5479 } else {
5480 limit = env->cpuid_level;
5481 }
5482
5483 if (index > limit) {
5484 /* Intel documentation states that invalid EAX input will
5485 * return the same information as EAX=cpuid_level
5486 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
5487 */
5488 index = env->cpuid_level;
5489 }
5490
5491 switch(index) {
5492 case 0:
5493 *eax = env->cpuid_level;
5494 *ebx = env->cpuid_vendor1;
5495 *edx = env->cpuid_vendor2;
5496 *ecx = env->cpuid_vendor3;
5497 break;
5498 case 1:
5499 *eax = env->cpuid_version;
5500 *ebx = (cpu->apic_id << 24) |
5501 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
5502 *ecx = env->features[FEAT_1_ECX];
5503 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
5504 *ecx |= CPUID_EXT_OSXSAVE;
5505 }
5506 *edx = env->features[FEAT_1_EDX];
5507 if (cs->nr_cores * cs->nr_threads > 1) {
5508 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
5509 *edx |= CPUID_HT;
5510 }
5511 if (!cpu->enable_pmu) {
5512 *ecx &= ~CPUID_EXT_PDCM;
5513 }
5514 break;
5515 case 2:
5516 /* cache info: needed for Pentium Pro compatibility */
5517 if (cpu->cache_info_passthrough) {
5518 host_cpuid(index, 0, eax, ebx, ecx, edx);
5519 break;
5520 }
5521 *eax = 1; /* Number of CPUID[EAX=2] calls required */
5522 *ebx = 0;
5523 if (!cpu->enable_l3_cache) {
5524 *ecx = 0;
5525 } else {
5526 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
5527 }
5528 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
5529 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
5530 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
5531 break;
5532 case 4:
5533 /* cache info: needed for Core compatibility */
5534 if (cpu->cache_info_passthrough) {
5535 host_cpuid(index, count, eax, ebx, ecx, edx);
5536 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
5537 *eax &= ~0xFC000000;
5538 if ((*eax & 31) && cs->nr_cores > 1) {
5539 *eax |= (cs->nr_cores - 1) << 26;
5540 }
5541 } else {
5542 *eax = 0;
5543 switch (count) {
5544 case 0: /* L1 dcache info */
5545 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
5546 1, cs->nr_cores,
5547 eax, ebx, ecx, edx);
5548 break;
5549 case 1: /* L1 icache info */
5550 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
5551 1, cs->nr_cores,
5552 eax, ebx, ecx, edx);
5553 break;
5554 case 2: /* L2 cache info */
5555 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
5556 cs->nr_threads, cs->nr_cores,
5557 eax, ebx, ecx, edx);
5558 break;
5559 case 3: /* L3 cache info */
5560 die_offset = apicid_die_offset(&topo_info);
5561 if (cpu->enable_l3_cache) {
5562 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
5563 (1 << die_offset), cs->nr_cores,
5564 eax, ebx, ecx, edx);
5565 break;
5566 }
5567 /* fall through */
5568 default: /* end of info */
5569 *eax = *ebx = *ecx = *edx = 0;
5570 break;
5571 }
5572 }
5573 break;
5574 case 5:
5575 /* MONITOR/MWAIT Leaf */
5576 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
5577 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
5578 *ecx = cpu->mwait.ecx; /* flags */
5579 *edx = cpu->mwait.edx; /* mwait substates */
5580 break;
5581 case 6:
5582 /* Thermal and Power Leaf */
5583 *eax = env->features[FEAT_6_EAX];
5584 *ebx = 0;
5585 *ecx = 0;
5586 *edx = 0;
5587 break;
5588 case 7:
5589 /* Structured Extended Feature Flags Enumeration Leaf */
5590 if (count == 0) {
5591 /* Maximum ECX value for sub-leaves */
5592 *eax = env->cpuid_level_func7;
5593 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
5594 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
5595 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
5596 *ecx |= CPUID_7_0_ECX_OSPKE;
5597 }
5598 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
5599 } else if (count == 1) {
5600 *eax = env->features[FEAT_7_1_EAX];
5601 *ebx = 0;
5602 *ecx = 0;
5603 *edx = 0;
5604 } else {
5605 *eax = 0;
5606 *ebx = 0;
5607 *ecx = 0;
5608 *edx = 0;
5609 }
5610 break;
5611 case 9:
5612 /* Direct Cache Access Information Leaf */
5613 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
5614 *ebx = 0;
5615 *ecx = 0;
5616 *edx = 0;
5617 break;
5618 case 0xA:
5619 /* Architectural Performance Monitoring Leaf */
5620 if (kvm_enabled() && cpu->enable_pmu) {
5621 KVMState *s = cs->kvm_state;
5622
5623 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
5624 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
5625 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
5626 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
5627 } else if (hvf_enabled() && cpu->enable_pmu) {
5628 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
5629 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
5630 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
5631 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
5632 } else {
5633 *eax = 0;
5634 *ebx = 0;
5635 *ecx = 0;
5636 *edx = 0;
5637 }
5638 break;
5639 case 0xB:
5640 /* Extended Topology Enumeration Leaf */
5641 if (!cpu->enable_cpuid_0xb) {
5642 *eax = *ebx = *ecx = *edx = 0;
5643 break;
5644 }
5645
5646 *ecx = count & 0xff;
5647 *edx = cpu->apic_id;
5648
5649 switch (count) {
5650 case 0:
5651 *eax = apicid_core_offset(&topo_info);
5652 *ebx = cs->nr_threads;
5653 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5654 break;
5655 case 1:
5656 *eax = env->pkg_offset;
5657 *ebx = cs->nr_cores * cs->nr_threads;
5658 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5659 break;
5660 default:
5661 *eax = 0;
5662 *ebx = 0;
5663 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5664 }
5665
5666 assert(!(*eax & ~0x1f));
5667 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5668 break;
5669 case 0x1F:
5670 /* V2 Extended Topology Enumeration Leaf */
5671 if (env->nr_dies < 2) {
5672 *eax = *ebx = *ecx = *edx = 0;
5673 break;
5674 }
5675
5676 *ecx = count & 0xff;
5677 *edx = cpu->apic_id;
5678 switch (count) {
5679 case 0:
5680 *eax = apicid_core_offset(&topo_info);
5681 *ebx = cs->nr_threads;
5682 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
5683 break;
5684 case 1:
5685 *eax = apicid_die_offset(&topo_info);
5686 *ebx = cs->nr_cores * cs->nr_threads;
5687 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
5688 break;
5689 case 2:
5690 *eax = env->pkg_offset;
5691 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
5692 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
5693 break;
5694 default:
5695 *eax = 0;
5696 *ebx = 0;
5697 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
5698 }
5699 assert(!(*eax & ~0x1f));
5700 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
5701 break;
5702 case 0xD: {
5703 /* Processor Extended State */
5704 *eax = 0;
5705 *ebx = 0;
5706 *ecx = 0;
5707 *edx = 0;
5708 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5709 break;
5710 }
5711
5712 if (count == 0) {
5713 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
5714 *eax = env->features[FEAT_XSAVE_COMP_LO];
5715 *edx = env->features[FEAT_XSAVE_COMP_HI];
5716 /*
5717 * The initial value of xcr0 and ebx == 0, On host without kvm
5718 * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0
5719 * even through guest update xcr0, this will crash some legacy guest
5720 * (e.g., CentOS 6), So set ebx == ecx to workaroud it.
5721 */
5722 *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0);
5723 } else if (count == 1) {
5724 *eax = env->features[FEAT_XSAVE];
5725 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
5726 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
5727 const ExtSaveArea *esa = &x86_ext_save_areas[count];
5728 *eax = esa->size;
5729 *ebx = esa->offset;
5730 }
5731 }
5732 break;
5733 }
5734 case 0x14: {
5735 /* Intel Processor Trace Enumeration */
5736 *eax = 0;
5737 *ebx = 0;
5738 *ecx = 0;
5739 *edx = 0;
5740 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
5741 !kvm_enabled()) {
5742 break;
5743 }
5744
5745 if (count == 0) {
5746 *eax = INTEL_PT_MAX_SUBLEAF;
5747 *ebx = INTEL_PT_MINIMAL_EBX;
5748 *ecx = INTEL_PT_MINIMAL_ECX;
5749 } else if (count == 1) {
5750 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
5751 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
5752 }
5753 break;
5754 }
5755 case 0x40000000:
5756 /*
5757 * CPUID code in kvm_arch_init_vcpu() ignores stuff
5758 * set here, but we restrict to TCG none the less.
5759 */
5760 if (tcg_enabled() && cpu->expose_tcg) {
5761 memcpy(signature, "TCGTCGTCGTCG", 12);
5762 *eax = 0x40000001;
5763 *ebx = signature[0];
5764 *ecx = signature[1];
5765 *edx = signature[2];
5766 } else {
5767 *eax = 0;
5768 *ebx = 0;
5769 *ecx = 0;
5770 *edx = 0;
5771 }
5772 break;
5773 case 0x40000001:
5774 *eax = 0;
5775 *ebx = 0;
5776 *ecx = 0;
5777 *edx = 0;
5778 break;
5779 case 0x80000000:
5780 *eax = env->cpuid_xlevel;
5781 *ebx = env->cpuid_vendor1;
5782 *edx = env->cpuid_vendor2;
5783 *ecx = env->cpuid_vendor3;
5784 break;
5785 case 0x80000001:
5786 *eax = env->cpuid_version;
5787 *ebx = 0;
5788 *ecx = env->features[FEAT_8000_0001_ECX];
5789 *edx = env->features[FEAT_8000_0001_EDX];
5790
5791 /* The Linux kernel checks for the CMPLegacy bit and
5792 * discards multiple thread information if it is set.
5793 * So don't set it here for Intel to make Linux guests happy.
5794 */
5795 if (cs->nr_cores * cs->nr_threads > 1) {
5796 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
5797 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
5798 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
5799 *ecx |= 1 << 1; /* CmpLegacy bit */
5800 }
5801 }
5802 break;
5803 case 0x80000002:
5804 case 0x80000003:
5805 case 0x80000004:
5806 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
5807 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
5808 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
5809 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
5810 break;
5811 case 0x80000005:
5812 /* cache info (L1 cache) */
5813 if (cpu->cache_info_passthrough) {
5814 host_cpuid(index, 0, eax, ebx, ecx, edx);
5815 break;
5816 }
5817 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) |
5818 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
5819 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) |
5820 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
5821 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
5822 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
5823 break;
5824 case 0x80000006:
5825 /* cache info (L2 cache) */
5826 if (cpu->cache_info_passthrough) {
5827 host_cpuid(index, 0, eax, ebx, ecx, edx);
5828 break;
5829 }
5830 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) |
5831 (L2_DTLB_2M_ENTRIES << 16) |
5832 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) |
5833 (L2_ITLB_2M_ENTRIES);
5834 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) |
5835 (L2_DTLB_4K_ENTRIES << 16) |
5836 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) |
5837 (L2_ITLB_4K_ENTRIES);
5838 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
5839 cpu->enable_l3_cache ?
5840 env->cache_info_amd.l3_cache : NULL,
5841 ecx, edx);
5842 break;
5843 case 0x80000007:
5844 *eax = 0;
5845 *ebx = 0;
5846 *ecx = 0;
5847 *edx = env->features[FEAT_8000_0007_EDX];
5848 break;
5849 case 0x80000008:
5850 /* virtual & phys address size in low 2 bytes. */
5851 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5852 /* 64 bit processor */
5853 *eax = cpu->phys_bits; /* configurable physical bits */
5854 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
5855 *eax |= 0x00003900; /* 57 bits virtual */
5856 } else {
5857 *eax |= 0x00003000; /* 48 bits virtual */
5858 }
5859 } else {
5860 *eax = cpu->phys_bits;
5861 }
5862 *ebx = env->features[FEAT_8000_0008_EBX];
5863 if (cs->nr_cores * cs->nr_threads > 1) {
5864 /*
5865 * Bits 15:12 is "The number of bits in the initial
5866 * Core::X86::Apic::ApicId[ApicId] value that indicate
5867 * thread ID within a package". This is already stored at
5868 * CPUX86State::pkg_offset.
5869 * Bits 7:0 is "The number of threads in the package is NC+1"
5870 */
5871 *ecx = (env->pkg_offset << 12) |
5872 ((cs->nr_cores * cs->nr_threads) - 1);
5873 } else {
5874 *ecx = 0;
5875 }
5876 *edx = 0;
5877 break;
5878 case 0x8000000A:
5879 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5880 *eax = 0x00000001; /* SVM Revision */
5881 *ebx = 0x00000010; /* nr of ASIDs */
5882 *ecx = 0;
5883 *edx = env->features[FEAT_SVM]; /* optional features */
5884 } else {
5885 *eax = 0;
5886 *ebx = 0;
5887 *ecx = 0;
5888 *edx = 0;
5889 }
5890 break;
5891 case 0x8000001D:
5892 *eax = 0;
5893 if (cpu->cache_info_passthrough) {
5894 host_cpuid(index, count, eax, ebx, ecx, edx);
5895 break;
5896 }
5897 switch (count) {
5898 case 0: /* L1 dcache info */
5899 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
5900 &topo_info, eax, ebx, ecx, edx);
5901 break;
5902 case 1: /* L1 icache info */
5903 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
5904 &topo_info, eax, ebx, ecx, edx);
5905 break;
5906 case 2: /* L2 cache info */
5907 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
5908 &topo_info, eax, ebx, ecx, edx);
5909 break;
5910 case 3: /* L3 cache info */
5911 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
5912 &topo_info, eax, ebx, ecx, edx);
5913 break;
5914 default: /* end of info */
5915 *eax = *ebx = *ecx = *edx = 0;
5916 break;
5917 }
5918 break;
5919 case 0x8000001E:
5920 assert(cpu->core_id <= 255);
5921 encode_topo_cpuid8000001e(&topo_info, cpu, eax, ebx, ecx, edx);
5922 break;
5923 case 0xC0000000:
5924 *eax = env->cpuid_xlevel2;
5925 *ebx = 0;
5926 *ecx = 0;
5927 *edx = 0;
5928 break;
5929 case 0xC0000001:
5930 /* Support for VIA CPU's CPUID instruction */
5931 *eax = env->cpuid_version;
5932 *ebx = 0;
5933 *ecx = 0;
5934 *edx = env->features[FEAT_C000_0001_EDX];
5935 break;
5936 case 0xC0000002:
5937 case 0xC0000003:
5938 case 0xC0000004:
5939 /* Reserved for the future, and now filled with zero */
5940 *eax = 0;
5941 *ebx = 0;
5942 *ecx = 0;
5943 *edx = 0;
5944 break;
5945 case 0x8000001F:
5946 *eax = sev_enabled() ? 0x2 : 0;
5947 *ebx = sev_get_cbit_position();
5948 *ebx |= sev_get_reduced_phys_bits() << 6;
5949 *ecx = 0;
5950 *edx = 0;
5951 break;
5952 default:
5953 /* reserved values: zero */
5954 *eax = 0;
5955 *ebx = 0;
5956 *ecx = 0;
5957 *edx = 0;
5958 break;
5959 }
5960 }
5961
5962 static void x86_cpu_reset(DeviceState *dev)
5963 {
5964 CPUState *s = CPU(dev);
5965 X86CPU *cpu = X86_CPU(s);
5966 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
5967 CPUX86State *env = &cpu->env;
5968 target_ulong cr4;
5969 uint64_t xcr0;
5970 int i;
5971
5972 xcc->parent_reset(dev);
5973
5974 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
5975
5976 env->old_exception = -1;
5977
5978 /* init to reset state */
5979
5980 env->hflags2 |= HF2_GIF_MASK;
5981
5982 cpu_x86_update_cr0(env, 0x60000010);
5983 env->a20_mask = ~0x0;
5984 env->smbase = 0x30000;
5985 env->msr_smi_count = 0;
5986
5987 env->idt.limit = 0xffff;
5988 env->gdt.limit = 0xffff;
5989 env->ldt.limit = 0xffff;
5990 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
5991 env->tr.limit = 0xffff;
5992 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
5993
5994 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
5995 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
5996 DESC_R_MASK | DESC_A_MASK);
5997 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
5998 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
5999 DESC_A_MASK);
6000 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
6001 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
6002 DESC_A_MASK);
6003 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
6004 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
6005 DESC_A_MASK);
6006 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
6007 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
6008 DESC_A_MASK);
6009 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
6010 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
6011 DESC_A_MASK);
6012
6013 env->eip = 0xfff0;
6014 env->regs[R_EDX] = env->cpuid_version;
6015
6016 env->eflags = 0x2;
6017
6018 /* FPU init */
6019 for (i = 0; i < 8; i++) {
6020 env->fptags[i] = 1;
6021 }
6022 cpu_set_fpuc(env, 0x37f);
6023
6024 env->mxcsr = 0x1f80;
6025 /* All units are in INIT state. */
6026 env->xstate_bv = 0;
6027
6028 env->pat = 0x0007040600070406ULL;
6029 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
6030 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
6031 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
6032 }
6033
6034 memset(env->dr, 0, sizeof(env->dr));
6035 env->dr[6] = DR6_FIXED_1;
6036 env->dr[7] = DR7_FIXED_1;
6037 cpu_breakpoint_remove_all(s, BP_CPU);
6038 cpu_watchpoint_remove_all(s, BP_CPU);
6039
6040 cr4 = 0;
6041 xcr0 = XSTATE_FP_MASK;
6042
6043 #ifdef CONFIG_USER_ONLY
6044 /* Enable all the features for user-mode. */
6045 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
6046 xcr0 |= XSTATE_SSE_MASK;
6047 }
6048 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
6049 const ExtSaveArea *esa = &x86_ext_save_areas[i];
6050 if (env->features[esa->feature] & esa->bits) {
6051 xcr0 |= 1ull << i;
6052 }
6053 }
6054
6055 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
6056 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
6057 }
6058 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
6059 cr4 |= CR4_FSGSBASE_MASK;
6060 }
6061 #endif
6062
6063 env->xcr0 = xcr0;
6064 cpu_x86_update_cr4(env, cr4);
6065
6066 /*
6067 * SDM 11.11.5 requires:
6068 * - IA32_MTRR_DEF_TYPE MSR.E = 0
6069 * - IA32_MTRR_PHYSMASKn.V = 0
6070 * All other bits are undefined. For simplification, zero it all.
6071 */
6072 env->mtrr_deftype = 0;
6073 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
6074 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
6075
6076 env->interrupt_injected = -1;
6077 env->exception_nr = -1;
6078 env->exception_pending = 0;
6079 env->exception_injected = 0;
6080 env->exception_has_payload = false;
6081 env->exception_payload = 0;
6082 env->nmi_injected = false;
6083 #if !defined(CONFIG_USER_ONLY)
6084 /* We hard-wire the BSP to the first CPU. */
6085 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
6086
6087 s->halted = !cpu_is_bsp(cpu);
6088
6089 if (kvm_enabled()) {
6090 kvm_arch_reset_vcpu(cpu);
6091 }
6092 else if (hvf_enabled()) {
6093 hvf_reset_vcpu(s);
6094 }
6095 #endif
6096 }
6097
6098 #ifndef CONFIG_USER_ONLY
6099 bool cpu_is_bsp(X86CPU *cpu)
6100 {
6101 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
6102 }
6103
6104 /* TODO: remove me, when reset over QOM tree is implemented */
6105 static void x86_cpu_machine_reset_cb(void *opaque)
6106 {
6107 X86CPU *cpu = opaque;
6108 cpu_reset(CPU(cpu));
6109 }
6110 #endif
6111
6112 static void mce_init(X86CPU *cpu)
6113 {
6114 CPUX86State *cenv = &cpu->env;
6115 unsigned int bank;
6116
6117 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
6118 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
6119 (CPUID_MCE | CPUID_MCA)) {
6120 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
6121 (cpu->enable_lmce ? MCG_LMCE_P : 0);
6122 cenv->mcg_ctl = ~(uint64_t)0;
6123 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
6124 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
6125 }
6126 }
6127 }
6128
6129 #ifndef CONFIG_USER_ONLY
6130 APICCommonClass *apic_get_class(void)
6131 {
6132 const char *apic_type = "apic";
6133
6134 /* TODO: in-kernel irqchip for hvf */
6135 if (kvm_apic_in_kernel()) {
6136 apic_type = "kvm-apic";
6137 } else if (xen_enabled()) {
6138 apic_type = "xen-apic";
6139 }
6140
6141 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
6142 }
6143
6144 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
6145 {
6146 APICCommonState *apic;
6147 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
6148
6149 cpu->apic_state = DEVICE(object_new_with_class(apic_class));
6150
6151 object_property_add_child(OBJECT(cpu), "lapic",
6152 OBJECT(cpu->apic_state));
6153 object_unref(OBJECT(cpu->apic_state));
6154
6155 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
6156 /* TODO: convert to link<> */
6157 apic = APIC_COMMON(cpu->apic_state);
6158 apic->cpu = cpu;
6159 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
6160 }
6161
6162 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6163 {
6164 APICCommonState *apic;
6165 static bool apic_mmio_map_once;
6166
6167 if (cpu->apic_state == NULL) {
6168 return;
6169 }
6170 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
6171 errp);
6172
6173 /* Map APIC MMIO area */
6174 apic = APIC_COMMON(cpu->apic_state);
6175 if (!apic_mmio_map_once) {
6176 memory_region_add_subregion_overlap(get_system_memory(),
6177 apic->apicbase &
6178 MSR_IA32_APICBASE_BASE,
6179 &apic->io_memory,
6180 0x1000);
6181 apic_mmio_map_once = true;
6182 }
6183 }
6184
6185 static void x86_cpu_machine_done(Notifier *n, void *unused)
6186 {
6187 X86CPU *cpu = container_of(n, X86CPU, machine_done);
6188 MemoryRegion *smram =
6189 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
6190
6191 if (smram) {
6192 cpu->smram = g_new(MemoryRegion, 1);
6193 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
6194 smram, 0, 1ull << 32);
6195 memory_region_set_enabled(cpu->smram, true);
6196 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
6197 }
6198 }
6199 #else
6200 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
6201 {
6202 }
6203 #endif
6204
6205 /* Note: Only safe for use on x86(-64) hosts */
6206 static uint32_t x86_host_phys_bits(void)
6207 {
6208 uint32_t eax;
6209 uint32_t host_phys_bits;
6210
6211 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
6212 if (eax >= 0x80000008) {
6213 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
6214 /* Note: According to AMD doc 25481 rev 2.34 they have a field
6215 * at 23:16 that can specify a maximum physical address bits for
6216 * the guest that can override this value; but I've not seen
6217 * anything with that set.
6218 */
6219 host_phys_bits = eax & 0xff;
6220 } else {
6221 /* It's an odd 64 bit machine that doesn't have the leaf for
6222 * physical address bits; fall back to 36 that's most older
6223 * Intel.
6224 */
6225 host_phys_bits = 36;
6226 }
6227
6228 return host_phys_bits;
6229 }
6230
6231 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
6232 {
6233 if (*min < value) {
6234 *min = value;
6235 }
6236 }
6237
6238 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
6239 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
6240 {
6241 CPUX86State *env = &cpu->env;
6242 FeatureWordInfo *fi = &feature_word_info[w];
6243 uint32_t eax = fi->cpuid.eax;
6244 uint32_t region = eax & 0xF0000000;
6245
6246 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
6247 if (!env->features[w]) {
6248 return;
6249 }
6250
6251 switch (region) {
6252 case 0x00000000:
6253 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
6254 break;
6255 case 0x80000000:
6256 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
6257 break;
6258 case 0xC0000000:
6259 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
6260 break;
6261 }
6262
6263 if (eax == 7) {
6264 x86_cpu_adjust_level(cpu, &env->cpuid_min_level_func7,
6265 fi->cpuid.ecx);
6266 }
6267 }
6268
6269 /* Calculate XSAVE components based on the configured CPU feature flags */
6270 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
6271 {
6272 CPUX86State *env = &cpu->env;
6273 int i;
6274 uint64_t mask;
6275
6276 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
6277 return;
6278 }
6279
6280 mask = 0;
6281 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
6282 const ExtSaveArea *esa = &x86_ext_save_areas[i];
6283 if (env->features[esa->feature] & esa->bits) {
6284 mask |= (1ULL << i);
6285 }
6286 }
6287
6288 env->features[FEAT_XSAVE_COMP_LO] = mask;
6289 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
6290 }
6291
6292 /***** Steps involved on loading and filtering CPUID data
6293 *
6294 * When initializing and realizing a CPU object, the steps
6295 * involved in setting up CPUID data are:
6296 *
6297 * 1) Loading CPU model definition (X86CPUDefinition). This is
6298 * implemented by x86_cpu_load_model() and should be completely
6299 * transparent, as it is done automatically by instance_init.
6300 * No code should need to look at X86CPUDefinition structs
6301 * outside instance_init.
6302 *
6303 * 2) CPU expansion. This is done by realize before CPUID
6304 * filtering, and will make sure host/accelerator data is
6305 * loaded for CPU models that depend on host capabilities
6306 * (e.g. "host"). Done by x86_cpu_expand_features().
6307 *
6308 * 3) CPUID filtering. This initializes extra data related to
6309 * CPUID, and checks if the host supports all capabilities
6310 * required by the CPU. Runnability of a CPU model is
6311 * determined at this step. Done by x86_cpu_filter_features().
6312 *
6313 * Some operations don't require all steps to be performed.
6314 * More precisely:
6315 *
6316 * - CPU instance creation (instance_init) will run only CPU
6317 * model loading. CPU expansion can't run at instance_init-time
6318 * because host/accelerator data may be not available yet.
6319 * - CPU realization will perform both CPU model expansion and CPUID
6320 * filtering, and return an error in case one of them fails.
6321 * - query-cpu-definitions needs to run all 3 steps. It needs
6322 * to run CPUID filtering, as the 'unavailable-features'
6323 * field is set based on the filtering results.
6324 * - The query-cpu-model-expansion QMP command only needs to run
6325 * CPU model loading and CPU expansion. It should not filter
6326 * any CPUID data based on host capabilities.
6327 */
6328
6329 /* Expand CPU configuration data, based on configured features
6330 * and host/accelerator capabilities when appropriate.
6331 */
6332 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
6333 {
6334 CPUX86State *env = &cpu->env;
6335 FeatureWord w;
6336 int i;
6337 GList *l;
6338 Error *local_err = NULL;
6339
6340 for (l = plus_features; l; l = l->next) {
6341 const char *prop = l->data;
6342 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
6343 if (local_err) {
6344 goto out;
6345 }
6346 }
6347
6348 for (l = minus_features; l; l = l->next) {
6349 const char *prop = l->data;
6350 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
6351 if (local_err) {
6352 goto out;
6353 }
6354 }
6355
6356 /*TODO: Now cpu->max_features doesn't overwrite features
6357 * set using QOM properties, and we can convert
6358 * plus_features & minus_features to global properties
6359 * inside x86_cpu_parse_featurestr() too.
6360 */
6361 if (cpu->max_features) {
6362 for (w = 0; w < FEATURE_WORDS; w++) {
6363 /* Override only features that weren't set explicitly
6364 * by the user.
6365 */
6366 env->features[w] |=
6367 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
6368 ~env->user_features[w] &
6369 ~feature_word_info[w].no_autoenable_flags;
6370 }
6371 }
6372
6373 for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
6374 FeatureDep *d = &feature_dependencies[i];
6375 if (!(env->features[d->from.index] & d->from.mask)) {
6376 uint64_t unavailable_features = env->features[d->to.index] & d->to.mask;
6377
6378 /* Not an error unless the dependent feature was added explicitly. */
6379 mark_unavailable_features(cpu, d->to.index,
6380 unavailable_features & env->user_features[d->to.index],
6381 "This feature depends on other features that were not requested");
6382
6383 env->user_features[d->to.index] |= unavailable_features;
6384 env->features[d->to.index] &= ~unavailable_features;
6385 }
6386 }
6387
6388 if (!kvm_enabled() || !cpu->expose_kvm) {
6389 env->features[FEAT_KVM] = 0;
6390 }
6391
6392 x86_cpu_enable_xsave_components(cpu);
6393
6394 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
6395 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
6396 if (cpu->full_cpuid_auto_level) {
6397 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
6398 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
6399 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
6400 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
6401 x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX);
6402 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
6403 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
6404 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
6405 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
6406 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
6407 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
6408 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
6409
6410 /* Intel Processor Trace requires CPUID[0x14] */
6411 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT)) {
6412 if (cpu->intel_pt_auto_level) {
6413 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
6414 } else if (cpu->env.cpuid_min_level < 0x14) {
6415 mark_unavailable_features(cpu, FEAT_7_0_EBX,
6416 CPUID_7_0_EBX_INTEL_PT,
6417 "Intel PT need CPUID leaf 0x14, please set by \"-cpu ...,+intel-pt,level=0x14\"");
6418 }
6419 }
6420
6421 /* CPU topology with multi-dies support requires CPUID[0x1F] */
6422 if (env->nr_dies > 1) {
6423 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
6424 }
6425
6426 /* SVM requires CPUID[0x8000000A] */
6427 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
6428 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
6429 }
6430
6431 /* SEV requires CPUID[0x8000001F] */
6432 if (sev_enabled()) {
6433 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
6434 }
6435 }
6436
6437 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
6438 if (env->cpuid_level_func7 == UINT32_MAX) {
6439 env->cpuid_level_func7 = env->cpuid_min_level_func7;
6440 }
6441 if (env->cpuid_level == UINT32_MAX) {
6442 env->cpuid_level = env->cpuid_min_level;
6443 }
6444 if (env->cpuid_xlevel == UINT32_MAX) {
6445 env->cpuid_xlevel = env->cpuid_min_xlevel;
6446 }
6447 if (env->cpuid_xlevel2 == UINT32_MAX) {
6448 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
6449 }
6450
6451 out:
6452 if (local_err != NULL) {
6453 error_propagate(errp, local_err);
6454 }
6455 }
6456
6457 /*
6458 * Finishes initialization of CPUID data, filters CPU feature
6459 * words based on host availability of each feature.
6460 *
6461 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
6462 */
6463 static void x86_cpu_filter_features(X86CPU *cpu, bool verbose)
6464 {
6465 CPUX86State *env = &cpu->env;
6466 FeatureWord w;
6467 const char *prefix = NULL;
6468
6469 if (verbose) {
6470 prefix = accel_uses_host_cpuid()
6471 ? "host doesn't support requested feature"
6472 : "TCG doesn't support requested feature";
6473 }
6474
6475 for (w = 0; w < FEATURE_WORDS; w++) {
6476 uint64_t host_feat =
6477 x86_cpu_get_supported_feature_word(w, false);
6478 uint64_t requested_features = env->features[w];
6479 uint64_t unavailable_features = requested_features & ~host_feat;
6480 mark_unavailable_features(cpu, w, unavailable_features, prefix);
6481 }
6482
6483 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
6484 kvm_enabled()) {
6485 KVMState *s = CPU(cpu)->kvm_state;
6486 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
6487 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
6488 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
6489 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
6490 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
6491
6492 if (!eax_0 ||
6493 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
6494 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
6495 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
6496 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
6497 INTEL_PT_ADDR_RANGES_NUM) ||
6498 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
6499 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
6500 (ecx_0 & INTEL_PT_IP_LIP)) {
6501 /*
6502 * Processor Trace capabilities aren't configurable, so if the
6503 * host can't emulate the capabilities we report on
6504 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
6505 */
6506 mark_unavailable_features(cpu, FEAT_7_0_EBX, CPUID_7_0_EBX_INTEL_PT, prefix);
6507 }
6508 }
6509 }
6510
6511 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
6512 {
6513 CPUState *cs = CPU(dev);
6514 X86CPU *cpu = X86_CPU(dev);
6515 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6516 CPUX86State *env = &cpu->env;
6517 Error *local_err = NULL;
6518 static bool ht_warned;
6519
6520 if (xcc->host_cpuid_required) {
6521 if (!accel_uses_host_cpuid()) {
6522 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6523 error_setg(&local_err, "CPU model '%s' requires KVM", name);
6524 goto out;
6525 }
6526 }
6527
6528 if (cpu->max_features && accel_uses_host_cpuid()) {
6529 if (enable_cpu_pm) {
6530 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
6531 &cpu->mwait.ecx, &cpu->mwait.edx);
6532 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
6533 }
6534 if (kvm_enabled() && cpu->ucode_rev == 0) {
6535 cpu->ucode_rev = kvm_arch_get_supported_msr_feature(kvm_state,
6536 MSR_IA32_UCODE_REV);
6537 }
6538 }
6539
6540 if (cpu->ucode_rev == 0) {
6541 /* The default is the same as KVM's. */
6542 if (IS_AMD_CPU(env)) {
6543 cpu->ucode_rev = 0x01000065;
6544 } else {
6545 cpu->ucode_rev = 0x100000000ULL;
6546 }
6547 }
6548
6549 /* mwait extended info: needed for Core compatibility */
6550 /* We always wake on interrupt even if host does not have the capability */
6551 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
6552
6553 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
6554 error_setg(errp, "apic-id property was not initialized properly");
6555 return;
6556 }
6557
6558 x86_cpu_expand_features(cpu, &local_err);
6559 if (local_err) {
6560 goto out;
6561 }
6562
6563 x86_cpu_filter_features(cpu, cpu->check_cpuid || cpu->enforce_cpuid);
6564
6565 if (cpu->enforce_cpuid && x86_cpu_have_filtered_features(cpu)) {
6566 error_setg(&local_err,
6567 accel_uses_host_cpuid() ?
6568 "Host doesn't support requested features" :
6569 "TCG doesn't support requested features");
6570 goto out;
6571 }
6572
6573 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
6574 * CPUID[1].EDX.
6575 */
6576 if (IS_AMD_CPU(env)) {
6577 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
6578 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
6579 & CPUID_EXT2_AMD_ALIASES);
6580 }
6581
6582 /* For 64bit systems think about the number of physical bits to present.
6583 * ideally this should be the same as the host; anything other than matching
6584 * the host can cause incorrect guest behaviour.
6585 * QEMU used to pick the magic value of 40 bits that corresponds to
6586 * consumer AMD devices but nothing else.
6587 */
6588 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
6589 if (accel_uses_host_cpuid()) {
6590 uint32_t host_phys_bits = x86_host_phys_bits();
6591 static bool warned;
6592
6593 /* Print a warning if the user set it to a value that's not the
6594 * host value.
6595 */
6596 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
6597 !warned) {
6598 warn_report("Host physical bits (%u)"
6599 " does not match phys-bits property (%u)",
6600 host_phys_bits, cpu->phys_bits);
6601 warned = true;
6602 }
6603
6604 if (cpu->host_phys_bits) {
6605 /* The user asked for us to use the host physical bits */
6606 cpu->phys_bits = host_phys_bits;
6607 if (cpu->host_phys_bits_limit &&
6608 cpu->phys_bits > cpu->host_phys_bits_limit) {
6609 cpu->phys_bits = cpu->host_phys_bits_limit;
6610 }
6611 }
6612
6613 if (cpu->phys_bits &&
6614 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
6615 cpu->phys_bits < 32)) {
6616 error_setg(errp, "phys-bits should be between 32 and %u "
6617 " (but is %u)",
6618 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
6619 return;
6620 }
6621 } else {
6622 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
6623 error_setg(errp, "TCG only supports phys-bits=%u",
6624 TCG_PHYS_ADDR_BITS);
6625 return;
6626 }
6627 }
6628 /* 0 means it was not explicitly set by the user (or by machine
6629 * compat_props or by the host code above). In this case, the default
6630 * is the value used by TCG (40).
6631 */
6632 if (cpu->phys_bits == 0) {
6633 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
6634 }
6635 } else {
6636 /* For 32 bit systems don't use the user set value, but keep
6637 * phys_bits consistent with what we tell the guest.
6638 */
6639 if (cpu->phys_bits != 0) {
6640 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
6641 return;
6642 }
6643
6644 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
6645 cpu->phys_bits = 36;
6646 } else {
6647 cpu->phys_bits = 32;
6648 }
6649 }
6650
6651 /* Cache information initialization */
6652 if (!cpu->legacy_cache) {
6653 if (!xcc->model || !xcc->model->cpudef->cache_info) {
6654 g_autofree char *name = x86_cpu_class_get_model_name(xcc);
6655 error_setg(errp,
6656 "CPU model '%s' doesn't support legacy-cache=off", name);
6657 return;
6658 }
6659 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
6660 *xcc->model->cpudef->cache_info;
6661 } else {
6662 /* Build legacy cache information */
6663 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
6664 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
6665 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
6666 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
6667
6668 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
6669 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
6670 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
6671 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
6672
6673 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
6674 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
6675 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
6676 env->cache_info_amd.l3_cache = &legacy_l3_cache;
6677 }
6678
6679
6680 cpu_exec_realizefn(cs, &local_err);
6681 if (local_err != NULL) {
6682 error_propagate(errp, local_err);
6683 return;
6684 }
6685
6686 #ifndef CONFIG_USER_ONLY
6687 MachineState *ms = MACHINE(qdev_get_machine());
6688 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
6689
6690 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
6691 x86_cpu_apic_create(cpu, &local_err);
6692 if (local_err != NULL) {
6693 goto out;
6694 }
6695 }
6696 #endif
6697
6698 mce_init(cpu);
6699
6700 #ifndef CONFIG_USER_ONLY
6701 if (tcg_enabled()) {
6702 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
6703 cpu->cpu_as_root = g_new(MemoryRegion, 1);
6704
6705 /* Outer container... */
6706 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
6707 memory_region_set_enabled(cpu->cpu_as_root, true);
6708
6709 /* ... with two regions inside: normal system memory with low
6710 * priority, and...
6711 */
6712 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
6713 get_system_memory(), 0, ~0ull);
6714 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
6715 memory_region_set_enabled(cpu->cpu_as_mem, true);
6716
6717 cs->num_ases = 2;
6718 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
6719 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
6720
6721 /* ... SMRAM with higher priority, linked from /machine/smram. */
6722 cpu->machine_done.notify = x86_cpu_machine_done;
6723 qemu_add_machine_init_done_notifier(&cpu->machine_done);
6724 }
6725 #endif
6726
6727 qemu_init_vcpu(cs);
6728
6729 /*
6730 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
6731 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
6732 * based on inputs (sockets,cores,threads), it is still better to give
6733 * users a warning.
6734 *
6735 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
6736 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
6737 */
6738 if (IS_AMD_CPU(env) &&
6739 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
6740 cs->nr_threads > 1 && !ht_warned) {
6741 warn_report("This family of AMD CPU doesn't support "
6742 "hyperthreading(%d)",
6743 cs->nr_threads);
6744 error_printf("Please configure -smp options properly"
6745 " or try enabling topoext feature.\n");
6746 ht_warned = true;
6747 }
6748
6749 x86_cpu_apic_realize(cpu, &local_err);
6750 if (local_err != NULL) {
6751 goto out;
6752 }
6753 cpu_reset(cs);
6754
6755 xcc->parent_realize(dev, &local_err);
6756
6757 out:
6758 if (local_err != NULL) {
6759 error_propagate(errp, local_err);
6760 return;
6761 }
6762 }
6763
6764 static void x86_cpu_unrealizefn(DeviceState *dev)
6765 {
6766 X86CPU *cpu = X86_CPU(dev);
6767 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
6768
6769 #ifndef CONFIG_USER_ONLY
6770 cpu_remove_sync(CPU(dev));
6771 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
6772 #endif
6773
6774 if (cpu->apic_state) {
6775 object_unparent(OBJECT(cpu->apic_state));
6776 cpu->apic_state = NULL;
6777 }
6778
6779 xcc->parent_unrealize(dev);
6780 }
6781
6782 typedef struct BitProperty {
6783 FeatureWord w;
6784 uint64_t mask;
6785 } BitProperty;
6786
6787 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
6788 void *opaque, Error **errp)
6789 {
6790 X86CPU *cpu = X86_CPU(obj);
6791 BitProperty *fp = opaque;
6792 uint64_t f = cpu->env.features[fp->w];
6793 bool value = (f & fp->mask) == fp->mask;
6794 visit_type_bool(v, name, &value, errp);
6795 }
6796
6797 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
6798 void *opaque, Error **errp)
6799 {
6800 DeviceState *dev = DEVICE(obj);
6801 X86CPU *cpu = X86_CPU(obj);
6802 BitProperty *fp = opaque;
6803 Error *local_err = NULL;
6804 bool value;
6805
6806 if (dev->realized) {
6807 qdev_prop_set_after_realize(dev, name, errp);
6808 return;
6809 }
6810
6811 visit_type_bool(v, name, &value, &local_err);
6812 if (local_err) {
6813 error_propagate(errp, local_err);
6814 return;
6815 }
6816
6817 if (value) {
6818 cpu->env.features[fp->w] |= fp->mask;
6819 } else {
6820 cpu->env.features[fp->w] &= ~fp->mask;
6821 }
6822 cpu->env.user_features[fp->w] |= fp->mask;
6823 }
6824
6825 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
6826 void *opaque)
6827 {
6828 BitProperty *prop = opaque;
6829 g_free(prop);
6830 }
6831
6832 /* Register a boolean property to get/set a single bit in a uint32_t field.
6833 *
6834 * The same property name can be registered multiple times to make it affect
6835 * multiple bits in the same FeatureWord. In that case, the getter will return
6836 * true only if all bits are set.
6837 */
6838 static void x86_cpu_register_bit_prop(X86CPU *cpu,
6839 const char *prop_name,
6840 FeatureWord w,
6841 int bitnr)
6842 {
6843 BitProperty *fp;
6844 ObjectProperty *op;
6845 uint64_t mask = (1ULL << bitnr);
6846
6847 op = object_property_find(OBJECT(cpu), prop_name, NULL);
6848 if (op) {
6849 fp = op->opaque;
6850 assert(fp->w == w);
6851 fp->mask |= mask;
6852 } else {
6853 fp = g_new0(BitProperty, 1);
6854 fp->w = w;
6855 fp->mask = mask;
6856 object_property_add(OBJECT(cpu), prop_name, "bool",
6857 x86_cpu_get_bit_prop,
6858 x86_cpu_set_bit_prop,
6859 x86_cpu_release_bit_prop, fp);
6860 }
6861 }
6862
6863 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
6864 FeatureWord w,
6865 int bitnr)
6866 {
6867 FeatureWordInfo *fi = &feature_word_info[w];
6868 const char *name = fi->feat_names[bitnr];
6869
6870 if (!name) {
6871 return;
6872 }
6873
6874 /* Property names should use "-" instead of "_".
6875 * Old names containing underscores are registered as aliases
6876 * using object_property_add_alias()
6877 */
6878 assert(!strchr(name, '_'));
6879 /* aliases don't use "|" delimiters anymore, they are registered
6880 * manually using object_property_add_alias() */
6881 assert(!strchr(name, '|'));
6882 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
6883 }
6884
6885 #if !defined(CONFIG_USER_ONLY)
6886 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
6887 {
6888 X86CPU *cpu = X86_CPU(cs);
6889 CPUX86State *env = &cpu->env;
6890 GuestPanicInformation *panic_info = NULL;
6891
6892 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
6893 panic_info = g_malloc0(sizeof(GuestPanicInformation));
6894
6895 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
6896
6897 assert(HV_CRASH_PARAMS >= 5);
6898 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
6899 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
6900 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
6901 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
6902 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
6903 }
6904
6905 return panic_info;
6906 }
6907 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
6908 const char *name, void *opaque,
6909 Error **errp)
6910 {
6911 CPUState *cs = CPU(obj);
6912 GuestPanicInformation *panic_info;
6913
6914 if (!cs->crash_occurred) {
6915 error_setg(errp, "No crash occured");
6916 return;
6917 }
6918
6919 panic_info = x86_cpu_get_crash_info(cs);
6920 if (panic_info == NULL) {
6921 error_setg(errp, "No crash information");
6922 return;
6923 }
6924
6925 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
6926 errp);
6927 qapi_free_GuestPanicInformation(panic_info);
6928 }
6929 #endif /* !CONFIG_USER_ONLY */
6930
6931 static void x86_cpu_initfn(Object *obj)
6932 {
6933 X86CPU *cpu = X86_CPU(obj);
6934 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
6935 CPUX86State *env = &cpu->env;
6936 FeatureWord w;
6937
6938 env->nr_dies = 1;
6939 env->nr_nodes = 1;
6940 cpu_set_cpustate_pointers(cpu);
6941
6942 object_property_add(obj, "family", "int",
6943 x86_cpuid_version_get_family,
6944 x86_cpuid_version_set_family, NULL, NULL);
6945 object_property_add(obj, "model", "int",
6946 x86_cpuid_version_get_model,
6947 x86_cpuid_version_set_model, NULL, NULL);
6948 object_property_add(obj, "stepping", "int",
6949 x86_cpuid_version_get_stepping,
6950 x86_cpuid_version_set_stepping, NULL, NULL);
6951 object_property_add_str(obj, "vendor",
6952 x86_cpuid_get_vendor,
6953 x86_cpuid_set_vendor);
6954 object_property_add_str(obj, "model-id",
6955 x86_cpuid_get_model_id,
6956 x86_cpuid_set_model_id);
6957 object_property_add(obj, "tsc-frequency", "int",
6958 x86_cpuid_get_tsc_freq,
6959 x86_cpuid_set_tsc_freq, NULL, NULL);
6960 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
6961 x86_cpu_get_feature_words,
6962 NULL, NULL, (void *)env->features);
6963 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
6964 x86_cpu_get_feature_words,
6965 NULL, NULL, (void *)cpu->filtered_features);
6966 /*
6967 * The "unavailable-features" property has the same semantics as
6968 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
6969 * QMP command: they list the features that would have prevented the
6970 * CPU from running if the "enforce" flag was set.
6971 */
6972 object_property_add(obj, "unavailable-features", "strList",
6973 x86_cpu_get_unavailable_features,
6974 NULL, NULL, NULL);
6975
6976 #if !defined(CONFIG_USER_ONLY)
6977 object_property_add(obj, "crash-information", "GuestPanicInformation",
6978 x86_cpu_get_crash_info_qom, NULL, NULL, NULL);
6979 #endif
6980
6981 for (w = 0; w < FEATURE_WORDS; w++) {
6982 int bitnr;
6983
6984 for (bitnr = 0; bitnr < 64; bitnr++) {
6985 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
6986 }
6987 }
6988
6989 object_property_add_alias(obj, "sse3", obj, "pni");
6990 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq");
6991 object_property_add_alias(obj, "sse4-1", obj, "sse4.1");
6992 object_property_add_alias(obj, "sse4-2", obj, "sse4.2");
6993 object_property_add_alias(obj, "xd", obj, "nx");
6994 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt");
6995 object_property_add_alias(obj, "i64", obj, "lm");
6996
6997 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl");
6998 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust");
6999 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt");
7000 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm");
7001 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy");
7002 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr");
7003 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core");
7004 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb");
7005 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay");
7006 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu");
7007 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf");
7008 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time");
7009 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi");
7010 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt");
7011 object_property_add_alias(obj, "kvm_poll_control", obj, "kvm-poll-control");
7012 object_property_add_alias(obj, "svm_lock", obj, "svm-lock");
7013 object_property_add_alias(obj, "nrip_save", obj, "nrip-save");
7014 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale");
7015 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean");
7016 object_property_add_alias(obj, "pause_filter", obj, "pause-filter");
7017 object_property_add_alias(obj, "sse4_1", obj, "sse4.1");
7018 object_property_add_alias(obj, "sse4_2", obj, "sse4.2");
7019
7020 if (xcc->model) {
7021 x86_cpu_load_model(cpu, xcc->model);
7022 }
7023 }
7024
7025 static int64_t x86_cpu_get_arch_id(CPUState *cs)
7026 {
7027 X86CPU *cpu = X86_CPU(cs);
7028
7029 return cpu->apic_id;
7030 }
7031
7032 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
7033 {
7034 X86CPU *cpu = X86_CPU(cs);
7035
7036 return cpu->env.cr[0] & CR0_PG_MASK;
7037 }
7038
7039 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
7040 {
7041 X86CPU *cpu = X86_CPU(cs);
7042
7043 cpu->env.eip = value;
7044 }
7045
7046 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
7047 {
7048 X86CPU *cpu = X86_CPU(cs);
7049
7050 cpu->env.eip = tb->pc - tb->cs_base;
7051 }
7052
7053 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
7054 {
7055 X86CPU *cpu = X86_CPU(cs);
7056 CPUX86State *env = &cpu->env;
7057
7058 #if !defined(CONFIG_USER_ONLY)
7059 if (interrupt_request & CPU_INTERRUPT_POLL) {
7060 return CPU_INTERRUPT_POLL;
7061 }
7062 #endif
7063 if (interrupt_request & CPU_INTERRUPT_SIPI) {
7064 return CPU_INTERRUPT_SIPI;
7065 }
7066
7067 if (env->hflags2 & HF2_GIF_MASK) {
7068 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
7069 !(env->hflags & HF_SMM_MASK)) {
7070 return CPU_INTERRUPT_SMI;
7071 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
7072 !(env->hflags2 & HF2_NMI_MASK)) {
7073 return CPU_INTERRUPT_NMI;
7074 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
7075 return CPU_INTERRUPT_MCE;
7076 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
7077 (((env->hflags2 & HF2_VINTR_MASK) &&
7078 (env->hflags2 & HF2_HIF_MASK)) ||
7079 (!(env->hflags2 & HF2_VINTR_MASK) &&
7080 (env->eflags & IF_MASK &&
7081 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
7082 return CPU_INTERRUPT_HARD;
7083 #if !defined(CONFIG_USER_ONLY)
7084 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
7085 (env->eflags & IF_MASK) &&
7086 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
7087 return CPU_INTERRUPT_VIRQ;
7088 #endif
7089 }
7090 }
7091
7092 return 0;
7093 }
7094
7095 static bool x86_cpu_has_work(CPUState *cs)
7096 {
7097 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
7098 }
7099
7100 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
7101 {
7102 X86CPU *cpu = X86_CPU(cs);
7103 CPUX86State *env = &cpu->env;
7104
7105 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
7106 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
7107 : bfd_mach_i386_i8086);
7108 info->print_insn = print_insn_i386;
7109
7110 info->cap_arch = CS_ARCH_X86;
7111 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
7112 : env->hflags & HF_CS32_MASK ? CS_MODE_32
7113 : CS_MODE_16);
7114 info->cap_insn_unit = 1;
7115 info->cap_insn_split = 8;
7116 }
7117
7118 void x86_update_hflags(CPUX86State *env)
7119 {
7120 uint32_t hflags;
7121 #define HFLAG_COPY_MASK \
7122 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
7123 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
7124 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
7125 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
7126
7127 hflags = env->hflags & HFLAG_COPY_MASK;
7128 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
7129 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
7130 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
7131 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
7132 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
7133
7134 if (env->cr[4] & CR4_OSFXSR_MASK) {
7135 hflags |= HF_OSFXSR_MASK;
7136 }
7137
7138 if (env->efer & MSR_EFER_LMA) {
7139 hflags |= HF_LMA_MASK;
7140 }
7141
7142 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
7143 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
7144 } else {
7145 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
7146 (DESC_B_SHIFT - HF_CS32_SHIFT);
7147 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
7148 (DESC_B_SHIFT - HF_SS32_SHIFT);
7149 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
7150 !(hflags & HF_CS32_MASK)) {
7151 hflags |= HF_ADDSEG_MASK;
7152 } else {
7153 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
7154 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
7155 }
7156 }
7157 env->hflags = hflags;
7158 }
7159
7160 static Property x86_cpu_properties[] = {
7161 #ifdef CONFIG_USER_ONLY
7162 /* apic_id = 0 by default for *-user, see commit 9886e834 */
7163 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
7164 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
7165 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
7166 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
7167 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
7168 #else
7169 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
7170 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
7171 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
7172 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
7173 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
7174 #endif
7175 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
7176 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
7177
7178 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
7179 HYPERV_SPINLOCK_NEVER_RETRY),
7180 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
7181 HYPERV_FEAT_RELAXED, 0),
7182 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
7183 HYPERV_FEAT_VAPIC, 0),
7184 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
7185 HYPERV_FEAT_TIME, 0),
7186 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
7187 HYPERV_FEAT_CRASH, 0),
7188 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
7189 HYPERV_FEAT_RESET, 0),
7190 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
7191 HYPERV_FEAT_VPINDEX, 0),
7192 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
7193 HYPERV_FEAT_RUNTIME, 0),
7194 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
7195 HYPERV_FEAT_SYNIC, 0),
7196 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
7197 HYPERV_FEAT_STIMER, 0),
7198 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
7199 HYPERV_FEAT_FREQUENCIES, 0),
7200 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
7201 HYPERV_FEAT_REENLIGHTENMENT, 0),
7202 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
7203 HYPERV_FEAT_TLBFLUSH, 0),
7204 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
7205 HYPERV_FEAT_EVMCS, 0),
7206 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
7207 HYPERV_FEAT_IPI, 0),
7208 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
7209 HYPERV_FEAT_STIMER_DIRECT, 0),
7210 DEFINE_PROP_ON_OFF_AUTO("hv-no-nonarch-coresharing", X86CPU,
7211 hyperv_no_nonarch_cs, ON_OFF_AUTO_OFF),
7212 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
7213
7214 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
7215 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
7216 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
7217 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
7218 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
7219 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
7220 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
7221 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
7222 DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7,
7223 UINT32_MAX),
7224 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
7225 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
7226 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
7227 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
7228 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
7229 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
7230 DEFINE_PROP_UINT64("ucode-rev", X86CPU, ucode_rev, 0),
7231 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
7232 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
7233 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
7234 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
7235 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
7236 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
7237 false),
7238 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
7239 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
7240 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
7241 true),
7242 /*
7243 * lecacy_cache defaults to true unless the CPU model provides its
7244 * own cache information (see x86_cpu_load_def()).
7245 */
7246 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
7247
7248 /*
7249 * From "Requirements for Implementing the Microsoft
7250 * Hypervisor Interface":
7251 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
7252 *
7253 * "Starting with Windows Server 2012 and Windows 8, if
7254 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
7255 * the hypervisor imposes no specific limit to the number of VPs.
7256 * In this case, Windows Server 2012 guest VMs may use more than
7257 * 64 VPs, up to the maximum supported number of processors applicable
7258 * to the specific Windows version being used."
7259 */
7260 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
7261 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
7262 false),
7263 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
7264 true),
7265 DEFINE_PROP_END_OF_LIST()
7266 };
7267
7268 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
7269 {
7270 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7271 CPUClass *cc = CPU_CLASS(oc);
7272 DeviceClass *dc = DEVICE_CLASS(oc);
7273
7274 device_class_set_parent_realize(dc, x86_cpu_realizefn,
7275 &xcc->parent_realize);
7276 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
7277 &xcc->parent_unrealize);
7278 device_class_set_props(dc, x86_cpu_properties);
7279
7280 device_class_set_parent_reset(dc, x86_cpu_reset, &xcc->parent_reset);
7281 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
7282
7283 cc->class_by_name = x86_cpu_class_by_name;
7284 cc->parse_features = x86_cpu_parse_featurestr;
7285 cc->has_work = x86_cpu_has_work;
7286 #ifdef CONFIG_TCG
7287 cc->do_interrupt = x86_cpu_do_interrupt;
7288 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
7289 #endif
7290 cc->dump_state = x86_cpu_dump_state;
7291 cc->set_pc = x86_cpu_set_pc;
7292 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
7293 cc->gdb_read_register = x86_cpu_gdb_read_register;
7294 cc->gdb_write_register = x86_cpu_gdb_write_register;
7295 cc->get_arch_id = x86_cpu_get_arch_id;
7296 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
7297 #ifndef CONFIG_USER_ONLY
7298 cc->asidx_from_attrs = x86_asidx_from_attrs;
7299 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
7300 cc->get_phys_page_attrs_debug = x86_cpu_get_phys_page_attrs_debug;
7301 cc->get_crash_info = x86_cpu_get_crash_info;
7302 cc->write_elf64_note = x86_cpu_write_elf64_note;
7303 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
7304 cc->write_elf32_note = x86_cpu_write_elf32_note;
7305 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
7306 cc->vmsd = &vmstate_x86_cpu;
7307 #endif
7308 cc->gdb_arch_name = x86_gdb_arch_name;
7309 #ifdef TARGET_X86_64
7310 cc->gdb_core_xml_file = "i386-64bit.xml";
7311 cc->gdb_num_core_regs = 66;
7312 #else
7313 cc->gdb_core_xml_file = "i386-32bit.xml";
7314 cc->gdb_num_core_regs = 50;
7315 #endif
7316 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
7317 cc->debug_excp_handler = breakpoint_handler;
7318 #endif
7319 cc->cpu_exec_enter = x86_cpu_exec_enter;
7320 cc->cpu_exec_exit = x86_cpu_exec_exit;
7321 #ifdef CONFIG_TCG
7322 cc->tcg_initialize = tcg_x86_init;
7323 cc->tlb_fill = x86_cpu_tlb_fill;
7324 #endif
7325 cc->disas_set_info = x86_disas_set_info;
7326
7327 dc->user_creatable = true;
7328 }
7329
7330 static const TypeInfo x86_cpu_type_info = {
7331 .name = TYPE_X86_CPU,
7332 .parent = TYPE_CPU,
7333 .instance_size = sizeof(X86CPU),
7334 .instance_init = x86_cpu_initfn,
7335 .abstract = true,
7336 .class_size = sizeof(X86CPUClass),
7337 .class_init = x86_cpu_common_class_init,
7338 };
7339
7340
7341 /* "base" CPU model, used by query-cpu-model-expansion */
7342 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
7343 {
7344 X86CPUClass *xcc = X86_CPU_CLASS(oc);
7345
7346 xcc->static_model = true;
7347 xcc->migration_safe = true;
7348 xcc->model_description = "base CPU model type with no features enabled";
7349 xcc->ordering = 8;
7350 }
7351
7352 static const TypeInfo x86_base_cpu_type_info = {
7353 .name = X86_CPU_TYPE_NAME("base"),
7354 .parent = TYPE_X86_CPU,
7355 .class_init = x86_cpu_base_class_init,
7356 };
7357
7358 static void x86_cpu_register_types(void)
7359 {
7360 int i;
7361
7362 type_register_static(&x86_cpu_type_info);
7363 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
7364 x86_register_cpudef_types(&builtin_x86_defs[i]);
7365 }
7366 type_register_static(&max_x86_cpu_type_info);
7367 type_register_static(&x86_base_cpu_type_info);
7368 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
7369 type_register_static(&host_x86_cpu_type_info);
7370 #endif
7371 }
7372
7373 type_init(x86_cpu_register_types)