]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
f41917649d154e091f6e7ded53f6121ad95223e7
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/hvf.h"
30 #include "sysemu/cpus.h"
31 #include "kvm_i386.h"
32 #include "sev_i386.h"
33
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qemu/option.h"
37 #include "qemu/config-file.h"
38 #include "qapi/error.h"
39 #include "qapi/qapi-visit-machine.h"
40 #include "qapi/qapi-visit-run-state.h"
41 #include "qapi/qmp/qdict.h"
42 #include "qapi/qmp/qerror.h"
43 #include "qapi/visitor.h"
44 #include "qom/qom-qobject.h"
45 #include "sysemu/arch_init.h"
46 #include "qapi/qapi-commands-machine-target.h"
47
48 #include "standard-headers/asm-x86/kvm_para.h"
49
50 #include "sysemu/sysemu.h"
51 #include "sysemu/tcg.h"
52 #include "hw/qdev-properties.h"
53 #include "hw/i386/topology.h"
54 #ifndef CONFIG_USER_ONLY
55 #include "exec/address-spaces.h"
56 #include "hw/hw.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /*
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
348 */
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
357
358 /*
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
361 */
362 static int nodes_in_socket(int nr_cores)
363 {
364 int nodes;
365
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
367
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
370 }
371
372 /*
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
378 */
379 static int cores_in_core_complex(int nr_cores)
380 {
381 int nodes;
382
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
386 }
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
389
390 /*
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
393 */
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
395 }
396
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
401 {
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
405
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
408
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
415 }
416
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
425
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
428
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
432 }
433
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
438 /*
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
441 */
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
447 };
448
449 /*
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
456 */
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
459 {
460 int nodes, cores_in_ccx;
461
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
464
465 cores_in_ccx = cores_in_core_complex(nr_cores);
466
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
471 }
472
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
477 {
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
481
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
484 /*
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
498 */
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
504 }
505 /*
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
512 */
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
517 /*
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
529 */
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
534 }
535 *edx = 0;
536 }
537
538 /*
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
542 */
543
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
555 };
556
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
569 };
570
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
582 };
583
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
596 };
597
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
609 };
610
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
618 };
619
620
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
631 };
632
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
646 };
647
648 /* TLB definitions: */
649
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
654
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
659
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
664
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
669
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
672 /*
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
679 */
680 #define INTEL_PT_MINIMAL_EBX 0xf
681 /*
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
689 */
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
698
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
701 {
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
707 }
708 dst[CPUID_VENDOR_SZ] = '\0';
709 }
710
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
722
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
744
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
750
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_APM_FEATURES 0
774 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
775 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
776 /* missing:
777 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
778
779 typedef enum FeatureWordType {
780 CPUID_FEATURE_WORD,
781 MSR_FEATURE_WORD,
782 } FeatureWordType;
783
784 typedef struct FeatureWordInfo {
785 FeatureWordType type;
786 /* feature flags names are taken from "Intel Processor Identification and
787 * the CPUID Instruction" and AMD's "CPUID Specification".
788 * In cases of disagreement between feature naming conventions,
789 * aliases may be added.
790 */
791 const char *feat_names[32];
792 union {
793 /* If type==CPUID_FEATURE_WORD */
794 struct {
795 uint32_t eax; /* Input EAX for CPUID */
796 bool needs_ecx; /* CPUID instruction uses ECX as input */
797 uint32_t ecx; /* Input ECX value for CPUID */
798 int reg; /* output register (R_* constant) */
799 } cpuid;
800 /* If type==MSR_FEATURE_WORD */
801 struct {
802 uint32_t index;
803 struct { /*CPUID that enumerate this MSR*/
804 FeatureWord cpuid_class;
805 uint32_t cpuid_flag;
806 } cpuid_dep;
807 } msr;
808 };
809 uint32_t tcg_features; /* Feature flags supported by TCG */
810 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
811 uint32_t migratable_flags; /* Feature flags known to be migratable */
812 /* Features that shouldn't be auto-enabled by "-cpu host" */
813 uint32_t no_autoenable_flags;
814 } FeatureWordInfo;
815
816 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
817 [FEAT_1_EDX] = {
818 .type = CPUID_FEATURE_WORD,
819 .feat_names = {
820 "fpu", "vme", "de", "pse",
821 "tsc", "msr", "pae", "mce",
822 "cx8", "apic", NULL, "sep",
823 "mtrr", "pge", "mca", "cmov",
824 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
825 NULL, "ds" /* Intel dts */, "acpi", "mmx",
826 "fxsr", "sse", "sse2", "ss",
827 "ht" /* Intel htt */, "tm", "ia64", "pbe",
828 },
829 .cpuid = {.eax = 1, .reg = R_EDX, },
830 .tcg_features = TCG_FEATURES,
831 },
832 [FEAT_1_ECX] = {
833 .type = CPUID_FEATURE_WORD,
834 .feat_names = {
835 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
836 "ds-cpl", "vmx", "smx", "est",
837 "tm2", "ssse3", "cid", NULL,
838 "fma", "cx16", "xtpr", "pdcm",
839 NULL, "pcid", "dca", "sse4.1",
840 "sse4.2", "x2apic", "movbe", "popcnt",
841 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
842 "avx", "f16c", "rdrand", "hypervisor",
843 },
844 .cpuid = { .eax = 1, .reg = R_ECX, },
845 .tcg_features = TCG_EXT_FEATURES,
846 },
847 /* Feature names that are already defined on feature_name[] but
848 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
849 * names on feat_names below. They are copied automatically
850 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
851 */
852 [FEAT_8000_0001_EDX] = {
853 .type = CPUID_FEATURE_WORD,
854 .feat_names = {
855 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
856 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
857 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
858 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
859 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
860 "nx", NULL, "mmxext", NULL /* mmx */,
861 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
862 NULL, "lm", "3dnowext", "3dnow",
863 },
864 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
865 .tcg_features = TCG_EXT2_FEATURES,
866 },
867 [FEAT_8000_0001_ECX] = {
868 .type = CPUID_FEATURE_WORD,
869 .feat_names = {
870 "lahf-lm", "cmp-legacy", "svm", "extapic",
871 "cr8legacy", "abm", "sse4a", "misalignsse",
872 "3dnowprefetch", "osvw", "ibs", "xop",
873 "skinit", "wdt", NULL, "lwp",
874 "fma4", "tce", NULL, "nodeid-msr",
875 NULL, "tbm", "topoext", "perfctr-core",
876 "perfctr-nb", NULL, NULL, NULL,
877 NULL, NULL, NULL, NULL,
878 },
879 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
880 .tcg_features = TCG_EXT3_FEATURES,
881 /*
882 * TOPOEXT is always allowed but can't be enabled blindly by
883 * "-cpu host", as it requires consistent cache topology info
884 * to be provided so it doesn't confuse guests.
885 */
886 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
887 },
888 [FEAT_C000_0001_EDX] = {
889 .type = CPUID_FEATURE_WORD,
890 .feat_names = {
891 NULL, NULL, "xstore", "xstore-en",
892 NULL, NULL, "xcrypt", "xcrypt-en",
893 "ace2", "ace2-en", "phe", "phe-en",
894 "pmm", "pmm-en", NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL,
898 NULL, NULL, NULL, NULL,
899 },
900 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
901 .tcg_features = TCG_EXT4_FEATURES,
902 },
903 [FEAT_KVM] = {
904 .type = CPUID_FEATURE_WORD,
905 .feat_names = {
906 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
907 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
908 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
909 NULL, NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 "kvmclock-stable-bit", NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
914 },
915 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
916 .tcg_features = TCG_KVM_FEATURES,
917 },
918 [FEAT_KVM_HINTS] = {
919 .type = CPUID_FEATURE_WORD,
920 .feat_names = {
921 "kvm-hint-dedicated", NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 },
930 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
931 .tcg_features = TCG_KVM_FEATURES,
932 /*
933 * KVM hints aren't auto-enabled by -cpu host, they need to be
934 * explicitly enabled in the command-line.
935 */
936 .no_autoenable_flags = ~0U,
937 },
938 /*
939 * .feat_names are commented out for Hyper-V enlightenments because we
940 * don't want to have two different ways for enabling them on QEMU command
941 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
942 * enabling several feature bits simultaneously, exposing these bits
943 * individually may just confuse guests.
944 */
945 [FEAT_HYPERV_EAX] = {
946 .type = CPUID_FEATURE_WORD,
947 .feat_names = {
948 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
949 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
950 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
951 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
952 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
953 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
954 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
955 NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 NULL, NULL, NULL, NULL,
960 },
961 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
962 },
963 [FEAT_HYPERV_EBX] = {
964 .type = CPUID_FEATURE_WORD,
965 .feat_names = {
966 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
967 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
968 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
969 NULL /* hv_create_port */, NULL /* hv_connect_port */,
970 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
971 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
972 NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 NULL, NULL, NULL, NULL,
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
977 },
978 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
979 },
980 [FEAT_HYPERV_EDX] = {
981 .type = CPUID_FEATURE_WORD,
982 .feat_names = {
983 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
984 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
985 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
986 NULL, NULL,
987 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL, NULL,
991 NULL, NULL, NULL, NULL,
992 NULL, NULL, NULL, NULL,
993 },
994 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
995 },
996 [FEAT_HV_RECOMM_EAX] = {
997 .type = CPUID_FEATURE_WORD,
998 .feat_names = {
999 NULL /* hv_recommend_pv_as_switch */,
1000 NULL /* hv_recommend_pv_tlbflush_local */,
1001 NULL /* hv_recommend_pv_tlbflush_remote */,
1002 NULL /* hv_recommend_msr_apic_access */,
1003 NULL /* hv_recommend_msr_reset */,
1004 NULL /* hv_recommend_relaxed_timing */,
1005 NULL /* hv_recommend_dma_remapping */,
1006 NULL /* hv_recommend_int_remapping */,
1007 NULL /* hv_recommend_x2apic_msrs */,
1008 NULL /* hv_recommend_autoeoi_deprecation */,
1009 NULL /* hv_recommend_pv_ipi */,
1010 NULL /* hv_recommend_ex_hypercalls */,
1011 NULL /* hv_hypervisor_is_nested */,
1012 NULL /* hv_recommend_int_mbec */,
1013 NULL /* hv_recommend_evmcs */,
1014 NULL,
1015 NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1019 },
1020 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1021 },
1022 [FEAT_HV_NESTED_EAX] = {
1023 .type = CPUID_FEATURE_WORD,
1024 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1025 },
1026 [FEAT_SVM] = {
1027 .type = CPUID_FEATURE_WORD,
1028 .feat_names = {
1029 "npt", "lbrv", "svm-lock", "nrip-save",
1030 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1031 NULL, NULL, "pause-filter", NULL,
1032 "pfthreshold", NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 NULL, NULL, NULL, NULL,
1036 NULL, NULL, NULL, NULL,
1037 },
1038 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1039 .tcg_features = TCG_SVM_FEATURES,
1040 },
1041 [FEAT_7_0_EBX] = {
1042 .type = CPUID_FEATURE_WORD,
1043 .feat_names = {
1044 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1045 "hle", "avx2", NULL, "smep",
1046 "bmi2", "erms", "invpcid", "rtm",
1047 NULL, NULL, "mpx", NULL,
1048 "avx512f", "avx512dq", "rdseed", "adx",
1049 "smap", "avx512ifma", "pcommit", "clflushopt",
1050 "clwb", "intel-pt", "avx512pf", "avx512er",
1051 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1052 },
1053 .cpuid = {
1054 .eax = 7,
1055 .needs_ecx = true, .ecx = 0,
1056 .reg = R_EBX,
1057 },
1058 .tcg_features = TCG_7_0_EBX_FEATURES,
1059 },
1060 [FEAT_7_0_ECX] = {
1061 .type = CPUID_FEATURE_WORD,
1062 .feat_names = {
1063 NULL, "avx512vbmi", "umip", "pku",
1064 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1065 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1066 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1067 "la57", NULL, NULL, NULL,
1068 NULL, NULL, "rdpid", NULL,
1069 NULL, "cldemote", NULL, "movdiri",
1070 "movdir64b", NULL, NULL, NULL,
1071 },
1072 .cpuid = {
1073 .eax = 7,
1074 .needs_ecx = true, .ecx = 0,
1075 .reg = R_ECX,
1076 },
1077 .tcg_features = TCG_7_0_ECX_FEATURES,
1078 },
1079 [FEAT_7_0_EDX] = {
1080 .type = CPUID_FEATURE_WORD,
1081 .feat_names = {
1082 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1083 NULL, NULL, NULL, NULL,
1084 NULL, NULL, "md-clear", NULL,
1085 NULL, NULL, NULL, NULL,
1086 NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL,
1088 NULL, NULL, "spec-ctrl", "stibp",
1089 NULL, "arch-capabilities", "core-capability", "ssbd",
1090 },
1091 .cpuid = {
1092 .eax = 7,
1093 .needs_ecx = true, .ecx = 0,
1094 .reg = R_EDX,
1095 },
1096 .tcg_features = TCG_7_0_EDX_FEATURES,
1097 },
1098 [FEAT_8000_0007_EDX] = {
1099 .type = CPUID_FEATURE_WORD,
1100 .feat_names = {
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 "invtsc", NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL,
1108 NULL, NULL, NULL, NULL,
1109 },
1110 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1111 .tcg_features = TCG_APM_FEATURES,
1112 .unmigratable_flags = CPUID_APM_INVTSC,
1113 },
1114 [FEAT_8000_0008_EBX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 NULL, "wbnoinvd", NULL, NULL,
1120 "ibpb", NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1124 NULL, NULL, NULL, NULL,
1125 },
1126 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1127 .tcg_features = 0,
1128 .unmigratable_flags = 0,
1129 },
1130 [FEAT_XSAVE] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 NULL, NULL, NULL, NULL,
1140 NULL, NULL, NULL, NULL,
1141 },
1142 .cpuid = {
1143 .eax = 0xd,
1144 .needs_ecx = true, .ecx = 1,
1145 .reg = R_EAX,
1146 },
1147 .tcg_features = TCG_XSAVE_FEATURES,
1148 },
1149 [FEAT_6_EAX] = {
1150 .type = CPUID_FEATURE_WORD,
1151 .feat_names = {
1152 NULL, NULL, "arat", NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 NULL, NULL, NULL, NULL,
1158 NULL, NULL, NULL, NULL,
1159 NULL, NULL, NULL, NULL,
1160 },
1161 .cpuid = { .eax = 6, .reg = R_EAX, },
1162 .tcg_features = TCG_6_EAX_FEATURES,
1163 },
1164 [FEAT_XSAVE_COMP_LO] = {
1165 .type = CPUID_FEATURE_WORD,
1166 .cpuid = {
1167 .eax = 0xD,
1168 .needs_ecx = true, .ecx = 0,
1169 .reg = R_EAX,
1170 },
1171 .tcg_features = ~0U,
1172 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1173 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1174 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1175 XSTATE_PKRU_MASK,
1176 },
1177 [FEAT_XSAVE_COMP_HI] = {
1178 .type = CPUID_FEATURE_WORD,
1179 .cpuid = {
1180 .eax = 0xD,
1181 .needs_ecx = true, .ecx = 0,
1182 .reg = R_EDX,
1183 },
1184 .tcg_features = ~0U,
1185 },
1186 /*Below are MSR exposed features*/
1187 [FEAT_ARCH_CAPABILITIES] = {
1188 .type = MSR_FEATURE_WORD,
1189 .feat_names = {
1190 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1191 "ssb-no", "mds-no", NULL, NULL,
1192 NULL, NULL, NULL, NULL,
1193 NULL, NULL, NULL, NULL,
1194 NULL, NULL, NULL, NULL,
1195 NULL, NULL, NULL, NULL,
1196 NULL, NULL, NULL, NULL,
1197 NULL, NULL, NULL, NULL,
1198 },
1199 .msr = {
1200 .index = MSR_IA32_ARCH_CAPABILITIES,
1201 .cpuid_dep = {
1202 FEAT_7_0_EDX,
1203 CPUID_7_0_EDX_ARCH_CAPABILITIES
1204 }
1205 },
1206 },
1207 [FEAT_CORE_CAPABILITY] = {
1208 .type = MSR_FEATURE_WORD,
1209 .feat_names = {
1210 NULL, NULL, NULL, NULL,
1211 NULL, "split-lock-detect", NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1214 NULL, NULL, NULL, NULL,
1215 NULL, NULL, NULL, NULL,
1216 NULL, NULL, NULL, NULL,
1217 NULL, NULL, NULL, NULL,
1218 },
1219 .msr = {
1220 .index = MSR_IA32_CORE_CAPABILITY,
1221 .cpuid_dep = {
1222 FEAT_7_0_EDX,
1223 CPUID_7_0_EDX_CORE_CAPABILITY,
1224 },
1225 },
1226 },
1227 };
1228
1229 typedef struct X86RegisterInfo32 {
1230 /* Name of register */
1231 const char *name;
1232 /* QAPI enum value register */
1233 X86CPURegister32 qapi_enum;
1234 } X86RegisterInfo32;
1235
1236 #define REGISTER(reg) \
1237 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1238 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1239 REGISTER(EAX),
1240 REGISTER(ECX),
1241 REGISTER(EDX),
1242 REGISTER(EBX),
1243 REGISTER(ESP),
1244 REGISTER(EBP),
1245 REGISTER(ESI),
1246 REGISTER(EDI),
1247 };
1248 #undef REGISTER
1249
1250 typedef struct ExtSaveArea {
1251 uint32_t feature, bits;
1252 uint32_t offset, size;
1253 } ExtSaveArea;
1254
1255 static const ExtSaveArea x86_ext_save_areas[] = {
1256 [XSTATE_FP_BIT] = {
1257 /* x87 FP state component is always enabled if XSAVE is supported */
1258 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1259 /* x87 state is in the legacy region of the XSAVE area */
1260 .offset = 0,
1261 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1262 },
1263 [XSTATE_SSE_BIT] = {
1264 /* SSE state component is always enabled if XSAVE is supported */
1265 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1266 /* SSE state is in the legacy region of the XSAVE area */
1267 .offset = 0,
1268 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1269 },
1270 [XSTATE_YMM_BIT] =
1271 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1272 .offset = offsetof(X86XSaveArea, avx_state),
1273 .size = sizeof(XSaveAVX) },
1274 [XSTATE_BNDREGS_BIT] =
1275 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1276 .offset = offsetof(X86XSaveArea, bndreg_state),
1277 .size = sizeof(XSaveBNDREG) },
1278 [XSTATE_BNDCSR_BIT] =
1279 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1280 .offset = offsetof(X86XSaveArea, bndcsr_state),
1281 .size = sizeof(XSaveBNDCSR) },
1282 [XSTATE_OPMASK_BIT] =
1283 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1284 .offset = offsetof(X86XSaveArea, opmask_state),
1285 .size = sizeof(XSaveOpmask) },
1286 [XSTATE_ZMM_Hi256_BIT] =
1287 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1288 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1289 .size = sizeof(XSaveZMM_Hi256) },
1290 [XSTATE_Hi16_ZMM_BIT] =
1291 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1292 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1293 .size = sizeof(XSaveHi16_ZMM) },
1294 [XSTATE_PKRU_BIT] =
1295 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1296 .offset = offsetof(X86XSaveArea, pkru_state),
1297 .size = sizeof(XSavePKRU) },
1298 };
1299
1300 static uint32_t xsave_area_size(uint64_t mask)
1301 {
1302 int i;
1303 uint64_t ret = 0;
1304
1305 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1306 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1307 if ((mask >> i) & 1) {
1308 ret = MAX(ret, esa->offset + esa->size);
1309 }
1310 }
1311 return ret;
1312 }
1313
1314 static inline bool accel_uses_host_cpuid(void)
1315 {
1316 return kvm_enabled() || hvf_enabled();
1317 }
1318
1319 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1320 {
1321 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1322 cpu->env.features[FEAT_XSAVE_COMP_LO];
1323 }
1324
1325 const char *get_register_name_32(unsigned int reg)
1326 {
1327 if (reg >= CPU_NB_REGS32) {
1328 return NULL;
1329 }
1330 return x86_reg_info_32[reg].name;
1331 }
1332
1333 /*
1334 * Returns the set of feature flags that are supported and migratable by
1335 * QEMU, for a given FeatureWord.
1336 */
1337 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1338 {
1339 FeatureWordInfo *wi = &feature_word_info[w];
1340 uint32_t r = 0;
1341 int i;
1342
1343 for (i = 0; i < 32; i++) {
1344 uint32_t f = 1U << i;
1345
1346 /* If the feature name is known, it is implicitly considered migratable,
1347 * unless it is explicitly set in unmigratable_flags */
1348 if ((wi->migratable_flags & f) ||
1349 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1350 r |= f;
1351 }
1352 }
1353 return r;
1354 }
1355
1356 void host_cpuid(uint32_t function, uint32_t count,
1357 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1358 {
1359 uint32_t vec[4];
1360
1361 #ifdef __x86_64__
1362 asm volatile("cpuid"
1363 : "=a"(vec[0]), "=b"(vec[1]),
1364 "=c"(vec[2]), "=d"(vec[3])
1365 : "0"(function), "c"(count) : "cc");
1366 #elif defined(__i386__)
1367 asm volatile("pusha \n\t"
1368 "cpuid \n\t"
1369 "mov %%eax, 0(%2) \n\t"
1370 "mov %%ebx, 4(%2) \n\t"
1371 "mov %%ecx, 8(%2) \n\t"
1372 "mov %%edx, 12(%2) \n\t"
1373 "popa"
1374 : : "a"(function), "c"(count), "S"(vec)
1375 : "memory", "cc");
1376 #else
1377 abort();
1378 #endif
1379
1380 if (eax)
1381 *eax = vec[0];
1382 if (ebx)
1383 *ebx = vec[1];
1384 if (ecx)
1385 *ecx = vec[2];
1386 if (edx)
1387 *edx = vec[3];
1388 }
1389
1390 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1391 {
1392 uint32_t eax, ebx, ecx, edx;
1393
1394 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1395 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1396
1397 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1398 if (family) {
1399 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1400 }
1401 if (model) {
1402 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1403 }
1404 if (stepping) {
1405 *stepping = eax & 0x0F;
1406 }
1407 }
1408
1409 /* CPU class name definitions: */
1410
1411 /* Return type name for a given CPU model name
1412 * Caller is responsible for freeing the returned string.
1413 */
1414 static char *x86_cpu_type_name(const char *model_name)
1415 {
1416 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1417 }
1418
1419 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1420 {
1421 ObjectClass *oc;
1422 char *typename = x86_cpu_type_name(cpu_model);
1423 oc = object_class_by_name(typename);
1424 g_free(typename);
1425 return oc;
1426 }
1427
1428 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1429 {
1430 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1431 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1432 return g_strndup(class_name,
1433 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1434 }
1435
1436 typedef struct PropValue {
1437 const char *prop, *value;
1438 } PropValue;
1439
1440 typedef struct X86CPUVersionDefinition {
1441 X86CPUVersion version;
1442 const char *alias;
1443 PropValue *props;
1444 } X86CPUVersionDefinition;
1445
1446 /* Base definition for a CPU model */
1447 typedef struct X86CPUDefinition {
1448 const char *name;
1449 uint32_t level;
1450 uint32_t xlevel;
1451 /* vendor is zero-terminated, 12 character ASCII string */
1452 char vendor[CPUID_VENDOR_SZ + 1];
1453 int family;
1454 int model;
1455 int stepping;
1456 FeatureWordArray features;
1457 const char *model_id;
1458 CPUCaches *cache_info;
1459 /*
1460 * Definitions for alternative versions of CPU model.
1461 * List is terminated by item with version == 0.
1462 * If NULL, version 1 will be registered automatically.
1463 */
1464 const X86CPUVersionDefinition *versions;
1465 } X86CPUDefinition;
1466
1467 /* Reference to a specific CPU model version */
1468 struct X86CPUModel {
1469 /* Base CPU definition */
1470 X86CPUDefinition *cpudef;
1471 /* CPU model version */
1472 X86CPUVersion version;
1473 };
1474
1475 /* Get full model name for CPU version */
1476 static char *x86_cpu_versioned_model_name(X86CPUDefinition *cpudef,
1477 X86CPUVersion version)
1478 {
1479 assert(version > 0);
1480 return g_strdup_printf("%s-v%d", cpudef->name, (int)version);
1481 }
1482
1483 static const X86CPUVersionDefinition *x86_cpu_def_get_versions(X86CPUDefinition *def)
1484 {
1485 /* When X86CPUDefinition::versions is NULL, we register only v1 */
1486 static const X86CPUVersionDefinition default_version_list[] = {
1487 { 1 },
1488 { /* end of list */ }
1489 };
1490
1491 return def->versions ?: default_version_list;
1492 }
1493
1494 static CPUCaches epyc_cache_info = {
1495 .l1d_cache = &(CPUCacheInfo) {
1496 .type = DATA_CACHE,
1497 .level = 1,
1498 .size = 32 * KiB,
1499 .line_size = 64,
1500 .associativity = 8,
1501 .partitions = 1,
1502 .sets = 64,
1503 .lines_per_tag = 1,
1504 .self_init = 1,
1505 .no_invd_sharing = true,
1506 },
1507 .l1i_cache = &(CPUCacheInfo) {
1508 .type = INSTRUCTION_CACHE,
1509 .level = 1,
1510 .size = 64 * KiB,
1511 .line_size = 64,
1512 .associativity = 4,
1513 .partitions = 1,
1514 .sets = 256,
1515 .lines_per_tag = 1,
1516 .self_init = 1,
1517 .no_invd_sharing = true,
1518 },
1519 .l2_cache = &(CPUCacheInfo) {
1520 .type = UNIFIED_CACHE,
1521 .level = 2,
1522 .size = 512 * KiB,
1523 .line_size = 64,
1524 .associativity = 8,
1525 .partitions = 1,
1526 .sets = 1024,
1527 .lines_per_tag = 1,
1528 },
1529 .l3_cache = &(CPUCacheInfo) {
1530 .type = UNIFIED_CACHE,
1531 .level = 3,
1532 .size = 8 * MiB,
1533 .line_size = 64,
1534 .associativity = 16,
1535 .partitions = 1,
1536 .sets = 8192,
1537 .lines_per_tag = 1,
1538 .self_init = true,
1539 .inclusive = true,
1540 .complex_indexing = true,
1541 },
1542 };
1543
1544 static X86CPUDefinition builtin_x86_defs[] = {
1545 {
1546 .name = "qemu64",
1547 .level = 0xd,
1548 .vendor = CPUID_VENDOR_AMD,
1549 .family = 6,
1550 .model = 6,
1551 .stepping = 3,
1552 .features[FEAT_1_EDX] =
1553 PPRO_FEATURES |
1554 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1555 CPUID_PSE36,
1556 .features[FEAT_1_ECX] =
1557 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1558 .features[FEAT_8000_0001_EDX] =
1559 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1560 .features[FEAT_8000_0001_ECX] =
1561 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1562 .xlevel = 0x8000000A,
1563 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1564 },
1565 {
1566 .name = "phenom",
1567 .level = 5,
1568 .vendor = CPUID_VENDOR_AMD,
1569 .family = 16,
1570 .model = 2,
1571 .stepping = 3,
1572 /* Missing: CPUID_HT */
1573 .features[FEAT_1_EDX] =
1574 PPRO_FEATURES |
1575 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1576 CPUID_PSE36 | CPUID_VME,
1577 .features[FEAT_1_ECX] =
1578 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1579 CPUID_EXT_POPCNT,
1580 .features[FEAT_8000_0001_EDX] =
1581 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1582 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1583 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1584 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1585 CPUID_EXT3_CR8LEG,
1586 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1587 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1588 .features[FEAT_8000_0001_ECX] =
1589 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1590 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1591 /* Missing: CPUID_SVM_LBRV */
1592 .features[FEAT_SVM] =
1593 CPUID_SVM_NPT,
1594 .xlevel = 0x8000001A,
1595 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1596 },
1597 {
1598 .name = "core2duo",
1599 .level = 10,
1600 .vendor = CPUID_VENDOR_INTEL,
1601 .family = 6,
1602 .model = 15,
1603 .stepping = 11,
1604 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1605 .features[FEAT_1_EDX] =
1606 PPRO_FEATURES |
1607 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1608 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1609 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1610 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1611 .features[FEAT_1_ECX] =
1612 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1613 CPUID_EXT_CX16,
1614 .features[FEAT_8000_0001_EDX] =
1615 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1616 .features[FEAT_8000_0001_ECX] =
1617 CPUID_EXT3_LAHF_LM,
1618 .xlevel = 0x80000008,
1619 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1620 },
1621 {
1622 .name = "kvm64",
1623 .level = 0xd,
1624 .vendor = CPUID_VENDOR_INTEL,
1625 .family = 15,
1626 .model = 6,
1627 .stepping = 1,
1628 /* Missing: CPUID_HT */
1629 .features[FEAT_1_EDX] =
1630 PPRO_FEATURES | CPUID_VME |
1631 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1632 CPUID_PSE36,
1633 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1634 .features[FEAT_1_ECX] =
1635 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1636 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1637 .features[FEAT_8000_0001_EDX] =
1638 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1639 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1640 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1641 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1642 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1643 .features[FEAT_8000_0001_ECX] =
1644 0,
1645 .xlevel = 0x80000008,
1646 .model_id = "Common KVM processor"
1647 },
1648 {
1649 .name = "qemu32",
1650 .level = 4,
1651 .vendor = CPUID_VENDOR_INTEL,
1652 .family = 6,
1653 .model = 6,
1654 .stepping = 3,
1655 .features[FEAT_1_EDX] =
1656 PPRO_FEATURES,
1657 .features[FEAT_1_ECX] =
1658 CPUID_EXT_SSE3,
1659 .xlevel = 0x80000004,
1660 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1661 },
1662 {
1663 .name = "kvm32",
1664 .level = 5,
1665 .vendor = CPUID_VENDOR_INTEL,
1666 .family = 15,
1667 .model = 6,
1668 .stepping = 1,
1669 .features[FEAT_1_EDX] =
1670 PPRO_FEATURES | CPUID_VME |
1671 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1672 .features[FEAT_1_ECX] =
1673 CPUID_EXT_SSE3,
1674 .features[FEAT_8000_0001_ECX] =
1675 0,
1676 .xlevel = 0x80000008,
1677 .model_id = "Common 32-bit KVM processor"
1678 },
1679 {
1680 .name = "coreduo",
1681 .level = 10,
1682 .vendor = CPUID_VENDOR_INTEL,
1683 .family = 6,
1684 .model = 14,
1685 .stepping = 8,
1686 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1687 .features[FEAT_1_EDX] =
1688 PPRO_FEATURES | CPUID_VME |
1689 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1690 CPUID_SS,
1691 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1692 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1693 .features[FEAT_1_ECX] =
1694 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1695 .features[FEAT_8000_0001_EDX] =
1696 CPUID_EXT2_NX,
1697 .xlevel = 0x80000008,
1698 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1699 },
1700 {
1701 .name = "486",
1702 .level = 1,
1703 .vendor = CPUID_VENDOR_INTEL,
1704 .family = 4,
1705 .model = 8,
1706 .stepping = 0,
1707 .features[FEAT_1_EDX] =
1708 I486_FEATURES,
1709 .xlevel = 0,
1710 .model_id = "",
1711 },
1712 {
1713 .name = "pentium",
1714 .level = 1,
1715 .vendor = CPUID_VENDOR_INTEL,
1716 .family = 5,
1717 .model = 4,
1718 .stepping = 3,
1719 .features[FEAT_1_EDX] =
1720 PENTIUM_FEATURES,
1721 .xlevel = 0,
1722 .model_id = "",
1723 },
1724 {
1725 .name = "pentium2",
1726 .level = 2,
1727 .vendor = CPUID_VENDOR_INTEL,
1728 .family = 6,
1729 .model = 5,
1730 .stepping = 2,
1731 .features[FEAT_1_EDX] =
1732 PENTIUM2_FEATURES,
1733 .xlevel = 0,
1734 .model_id = "",
1735 },
1736 {
1737 .name = "pentium3",
1738 .level = 3,
1739 .vendor = CPUID_VENDOR_INTEL,
1740 .family = 6,
1741 .model = 7,
1742 .stepping = 3,
1743 .features[FEAT_1_EDX] =
1744 PENTIUM3_FEATURES,
1745 .xlevel = 0,
1746 .model_id = "",
1747 },
1748 {
1749 .name = "athlon",
1750 .level = 2,
1751 .vendor = CPUID_VENDOR_AMD,
1752 .family = 6,
1753 .model = 2,
1754 .stepping = 3,
1755 .features[FEAT_1_EDX] =
1756 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1757 CPUID_MCA,
1758 .features[FEAT_8000_0001_EDX] =
1759 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1760 .xlevel = 0x80000008,
1761 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1762 },
1763 {
1764 .name = "n270",
1765 .level = 10,
1766 .vendor = CPUID_VENDOR_INTEL,
1767 .family = 6,
1768 .model = 28,
1769 .stepping = 2,
1770 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1771 .features[FEAT_1_EDX] =
1772 PPRO_FEATURES |
1773 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1774 CPUID_ACPI | CPUID_SS,
1775 /* Some CPUs got no CPUID_SEP */
1776 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1777 * CPUID_EXT_XTPR */
1778 .features[FEAT_1_ECX] =
1779 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1780 CPUID_EXT_MOVBE,
1781 .features[FEAT_8000_0001_EDX] =
1782 CPUID_EXT2_NX,
1783 .features[FEAT_8000_0001_ECX] =
1784 CPUID_EXT3_LAHF_LM,
1785 .xlevel = 0x80000008,
1786 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1787 },
1788 {
1789 .name = "Conroe",
1790 .level = 10,
1791 .vendor = CPUID_VENDOR_INTEL,
1792 .family = 6,
1793 .model = 15,
1794 .stepping = 3,
1795 .features[FEAT_1_EDX] =
1796 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1797 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1798 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1799 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1800 CPUID_DE | CPUID_FP87,
1801 .features[FEAT_1_ECX] =
1802 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1803 .features[FEAT_8000_0001_EDX] =
1804 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1805 .features[FEAT_8000_0001_ECX] =
1806 CPUID_EXT3_LAHF_LM,
1807 .xlevel = 0x80000008,
1808 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1809 },
1810 {
1811 .name = "Penryn",
1812 .level = 10,
1813 .vendor = CPUID_VENDOR_INTEL,
1814 .family = 6,
1815 .model = 23,
1816 .stepping = 3,
1817 .features[FEAT_1_EDX] =
1818 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1819 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1820 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1821 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1822 CPUID_DE | CPUID_FP87,
1823 .features[FEAT_1_ECX] =
1824 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1825 CPUID_EXT_SSE3,
1826 .features[FEAT_8000_0001_EDX] =
1827 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1828 .features[FEAT_8000_0001_ECX] =
1829 CPUID_EXT3_LAHF_LM,
1830 .xlevel = 0x80000008,
1831 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1832 },
1833 {
1834 .name = "Nehalem",
1835 .level = 11,
1836 .vendor = CPUID_VENDOR_INTEL,
1837 .family = 6,
1838 .model = 26,
1839 .stepping = 3,
1840 .features[FEAT_1_EDX] =
1841 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1842 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1843 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1844 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1845 CPUID_DE | CPUID_FP87,
1846 .features[FEAT_1_ECX] =
1847 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1848 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1849 .features[FEAT_8000_0001_EDX] =
1850 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1851 .features[FEAT_8000_0001_ECX] =
1852 CPUID_EXT3_LAHF_LM,
1853 .xlevel = 0x80000008,
1854 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1855 .versions = (X86CPUVersionDefinition[]) {
1856 { .version = 1 },
1857 {
1858 .version = 2,
1859 .alias = "Nehalem-IBRS",
1860 .props = (PropValue[]) {
1861 { "spec-ctrl", "on" },
1862 { "model-id",
1863 "Intel Core i7 9xx (Nehalem Core i7, IBRS update)" },
1864 { /* end of list */ }
1865 }
1866 },
1867 { /* end of list */ }
1868 }
1869 },
1870 {
1871 .name = "Westmere",
1872 .level = 11,
1873 .vendor = CPUID_VENDOR_INTEL,
1874 .family = 6,
1875 .model = 44,
1876 .stepping = 1,
1877 .features[FEAT_1_EDX] =
1878 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1879 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1880 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1881 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1882 CPUID_DE | CPUID_FP87,
1883 .features[FEAT_1_ECX] =
1884 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1885 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1886 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1887 .features[FEAT_8000_0001_EDX] =
1888 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1889 .features[FEAT_8000_0001_ECX] =
1890 CPUID_EXT3_LAHF_LM,
1891 .features[FEAT_6_EAX] =
1892 CPUID_6_EAX_ARAT,
1893 .xlevel = 0x80000008,
1894 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1895 .versions = (X86CPUVersionDefinition[]) {
1896 { .version = 1 },
1897 {
1898 .version = 2,
1899 .alias = "Westmere-IBRS",
1900 .props = (PropValue[]) {
1901 { "spec-ctrl", "on" },
1902 { "model-id",
1903 "Westmere E56xx/L56xx/X56xx (IBRS update)" },
1904 { /* end of list */ }
1905 }
1906 },
1907 { /* end of list */ }
1908 }
1909 },
1910 {
1911 .name = "SandyBridge",
1912 .level = 0xd,
1913 .vendor = CPUID_VENDOR_INTEL,
1914 .family = 6,
1915 .model = 42,
1916 .stepping = 1,
1917 .features[FEAT_1_EDX] =
1918 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1919 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1920 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1921 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1922 CPUID_DE | CPUID_FP87,
1923 .features[FEAT_1_ECX] =
1924 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1925 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1926 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1927 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1928 CPUID_EXT_SSE3,
1929 .features[FEAT_8000_0001_EDX] =
1930 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1931 CPUID_EXT2_SYSCALL,
1932 .features[FEAT_8000_0001_ECX] =
1933 CPUID_EXT3_LAHF_LM,
1934 .features[FEAT_XSAVE] =
1935 CPUID_XSAVE_XSAVEOPT,
1936 .features[FEAT_6_EAX] =
1937 CPUID_6_EAX_ARAT,
1938 .xlevel = 0x80000008,
1939 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1940 .versions = (X86CPUVersionDefinition[]) {
1941 { .version = 1 },
1942 {
1943 .version = 2,
1944 .alias = "SandyBridge-IBRS",
1945 .props = (PropValue[]) {
1946 { "spec-ctrl", "on" },
1947 { "model-id",
1948 "Intel Xeon E312xx (Sandy Bridge, IBRS update)" },
1949 { /* end of list */ }
1950 }
1951 },
1952 { /* end of list */ }
1953 }
1954 },
1955 {
1956 .name = "IvyBridge",
1957 .level = 0xd,
1958 .vendor = CPUID_VENDOR_INTEL,
1959 .family = 6,
1960 .model = 58,
1961 .stepping = 9,
1962 .features[FEAT_1_EDX] =
1963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1967 CPUID_DE | CPUID_FP87,
1968 .features[FEAT_1_ECX] =
1969 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1970 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1971 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1972 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1973 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1974 .features[FEAT_7_0_EBX] =
1975 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1976 CPUID_7_0_EBX_ERMS,
1977 .features[FEAT_8000_0001_EDX] =
1978 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1979 CPUID_EXT2_SYSCALL,
1980 .features[FEAT_8000_0001_ECX] =
1981 CPUID_EXT3_LAHF_LM,
1982 .features[FEAT_XSAVE] =
1983 CPUID_XSAVE_XSAVEOPT,
1984 .features[FEAT_6_EAX] =
1985 CPUID_6_EAX_ARAT,
1986 .xlevel = 0x80000008,
1987 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1988 .versions = (X86CPUVersionDefinition[]) {
1989 { .version = 1 },
1990 {
1991 .version = 2,
1992 .alias = "IvyBridge-IBRS",
1993 .props = (PropValue[]) {
1994 { "spec-ctrl", "on" },
1995 { "model-id",
1996 "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)" },
1997 { /* end of list */ }
1998 }
1999 },
2000 { /* end of list */ }
2001 }
2002 },
2003 {
2004 .name = "Haswell",
2005 .level = 0xd,
2006 .vendor = CPUID_VENDOR_INTEL,
2007 .family = 6,
2008 .model = 60,
2009 .stepping = 4,
2010 .features[FEAT_1_EDX] =
2011 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2012 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2013 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2014 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2015 CPUID_DE | CPUID_FP87,
2016 .features[FEAT_1_ECX] =
2017 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2018 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2019 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2020 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2021 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2022 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2023 .features[FEAT_8000_0001_EDX] =
2024 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2025 CPUID_EXT2_SYSCALL,
2026 .features[FEAT_8000_0001_ECX] =
2027 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2028 .features[FEAT_7_0_EBX] =
2029 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2030 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2031 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2032 CPUID_7_0_EBX_RTM,
2033 .features[FEAT_XSAVE] =
2034 CPUID_XSAVE_XSAVEOPT,
2035 .features[FEAT_6_EAX] =
2036 CPUID_6_EAX_ARAT,
2037 .xlevel = 0x80000008,
2038 .model_id = "Intel Core Processor (Haswell)",
2039 .versions = (X86CPUVersionDefinition[]) {
2040 { .version = 1 },
2041 {
2042 .version = 2,
2043 .alias = "Haswell-noTSX",
2044 .props = (PropValue[]) {
2045 { "hle", "off" },
2046 { "rtm", "off" },
2047 { "stepping", "1" },
2048 { "model-id", "Intel Core Processor (Haswell, no TSX)", },
2049 { /* end of list */ }
2050 },
2051 },
2052 {
2053 .version = 3,
2054 .alias = "Haswell-IBRS",
2055 .props = (PropValue[]) {
2056 /* Restore TSX features removed by -v2 above */
2057 { "hle", "on" },
2058 { "rtm", "on" },
2059 /*
2060 * Haswell and Haswell-IBRS had stepping=4 in
2061 * QEMU 4.0 and older
2062 */
2063 { "stepping", "4" },
2064 { "spec-ctrl", "on" },
2065 { "model-id",
2066 "Intel Core Processor (Haswell, IBRS)" },
2067 { /* end of list */ }
2068 }
2069 },
2070 {
2071 .version = 4,
2072 .alias = "Haswell-noTSX-IBRS",
2073 .props = (PropValue[]) {
2074 { "hle", "off" },
2075 { "rtm", "off" },
2076 /* spec-ctrl was already enabled by -v3 above */
2077 { "stepping", "1" },
2078 { "model-id",
2079 "Intel Core Processor (Haswell, no TSX, IBRS)" },
2080 { /* end of list */ }
2081 }
2082 },
2083 { /* end of list */ }
2084 }
2085 },
2086 {
2087 .name = "Broadwell",
2088 .level = 0xd,
2089 .vendor = CPUID_VENDOR_INTEL,
2090 .family = 6,
2091 .model = 61,
2092 .stepping = 2,
2093 .features[FEAT_1_EDX] =
2094 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2095 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2096 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2097 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2098 CPUID_DE | CPUID_FP87,
2099 .features[FEAT_1_ECX] =
2100 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2101 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2102 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2103 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2104 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2105 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2106 .features[FEAT_8000_0001_EDX] =
2107 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2108 CPUID_EXT2_SYSCALL,
2109 .features[FEAT_8000_0001_ECX] =
2110 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2111 .features[FEAT_7_0_EBX] =
2112 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2113 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2114 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2115 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2116 CPUID_7_0_EBX_SMAP,
2117 .features[FEAT_XSAVE] =
2118 CPUID_XSAVE_XSAVEOPT,
2119 .features[FEAT_6_EAX] =
2120 CPUID_6_EAX_ARAT,
2121 .xlevel = 0x80000008,
2122 .model_id = "Intel Core Processor (Broadwell)",
2123 .versions = (X86CPUVersionDefinition[]) {
2124 { .version = 1 },
2125 {
2126 .version = 2,
2127 .alias = "Broadwell-noTSX",
2128 .props = (PropValue[]) {
2129 { "hle", "off" },
2130 { "rtm", "off" },
2131 { "model-id", "Intel Core Processor (Broadwell, no TSX)", },
2132 { /* end of list */ }
2133 },
2134 },
2135 {
2136 .version = 3,
2137 .alias = "Broadwell-IBRS",
2138 .props = (PropValue[]) {
2139 /* Restore TSX features removed by -v2 above */
2140 { "hle", "on" },
2141 { "rtm", "on" },
2142 { "spec-ctrl", "on" },
2143 { "model-id",
2144 "Intel Core Processor (Broadwell, IBRS)" },
2145 { /* end of list */ }
2146 }
2147 },
2148 {
2149 .version = 4,
2150 .alias = "Broadwell-noTSX-IBRS",
2151 .props = (PropValue[]) {
2152 { "hle", "off" },
2153 { "rtm", "off" },
2154 /* spec-ctrl was already enabled by -v3 above */
2155 { "model-id",
2156 "Intel Core Processor (Broadwell, no TSX, IBRS)" },
2157 { /* end of list */ }
2158 }
2159 },
2160 { /* end of list */ }
2161 }
2162 },
2163 {
2164 .name = "Skylake-Client",
2165 .level = 0xd,
2166 .vendor = CPUID_VENDOR_INTEL,
2167 .family = 6,
2168 .model = 94,
2169 .stepping = 3,
2170 .features[FEAT_1_EDX] =
2171 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2172 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2173 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2174 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2175 CPUID_DE | CPUID_FP87,
2176 .features[FEAT_1_ECX] =
2177 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2178 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2179 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2180 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2181 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2182 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2183 .features[FEAT_8000_0001_EDX] =
2184 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2185 CPUID_EXT2_SYSCALL,
2186 .features[FEAT_8000_0001_ECX] =
2187 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2188 .features[FEAT_7_0_EBX] =
2189 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2190 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2191 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2192 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2193 CPUID_7_0_EBX_SMAP,
2194 /* Missing: XSAVES (not supported by some Linux versions,
2195 * including v4.1 to v4.12).
2196 * KVM doesn't yet expose any XSAVES state save component,
2197 * and the only one defined in Skylake (processor tracing)
2198 * probably will block migration anyway.
2199 */
2200 .features[FEAT_XSAVE] =
2201 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2202 CPUID_XSAVE_XGETBV1,
2203 .features[FEAT_6_EAX] =
2204 CPUID_6_EAX_ARAT,
2205 .xlevel = 0x80000008,
2206 .model_id = "Intel Core Processor (Skylake)",
2207 .versions = (X86CPUVersionDefinition[]) {
2208 { .version = 1 },
2209 {
2210 .version = 2,
2211 .alias = "Skylake-Client-IBRS",
2212 .props = (PropValue[]) {
2213 { "spec-ctrl", "on" },
2214 { "model-id",
2215 "Intel Core Processor (Skylake, IBRS)" },
2216 { /* end of list */ }
2217 }
2218 },
2219 { /* end of list */ }
2220 }
2221 },
2222 {
2223 .name = "Skylake-Server",
2224 .level = 0xd,
2225 .vendor = CPUID_VENDOR_INTEL,
2226 .family = 6,
2227 .model = 85,
2228 .stepping = 4,
2229 .features[FEAT_1_EDX] =
2230 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2231 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2232 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2233 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2234 CPUID_DE | CPUID_FP87,
2235 .features[FEAT_1_ECX] =
2236 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2237 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2238 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2239 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2240 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2241 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2242 .features[FEAT_8000_0001_EDX] =
2243 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2244 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2245 .features[FEAT_8000_0001_ECX] =
2246 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2247 .features[FEAT_7_0_EBX] =
2248 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2249 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2250 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2251 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2252 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2253 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2254 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2255 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2256 .features[FEAT_7_0_ECX] =
2257 CPUID_7_0_ECX_PKU,
2258 /* Missing: XSAVES (not supported by some Linux versions,
2259 * including v4.1 to v4.12).
2260 * KVM doesn't yet expose any XSAVES state save component,
2261 * and the only one defined in Skylake (processor tracing)
2262 * probably will block migration anyway.
2263 */
2264 .features[FEAT_XSAVE] =
2265 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2266 CPUID_XSAVE_XGETBV1,
2267 .features[FEAT_6_EAX] =
2268 CPUID_6_EAX_ARAT,
2269 .xlevel = 0x80000008,
2270 .model_id = "Intel Xeon Processor (Skylake)",
2271 .versions = (X86CPUVersionDefinition[]) {
2272 { .version = 1 },
2273 {
2274 .version = 2,
2275 .alias = "Skylake-Server-IBRS",
2276 .props = (PropValue[]) {
2277 /* clflushopt was not added to Skylake-Server-IBRS */
2278 /* TODO: add -v3 including clflushopt */
2279 { "clflushopt", "off" },
2280 { "spec-ctrl", "on" },
2281 { "model-id",
2282 "Intel Xeon Processor (Skylake, IBRS)" },
2283 { /* end of list */ }
2284 }
2285 },
2286 { /* end of list */ }
2287 }
2288 },
2289 {
2290 .name = "Cascadelake-Server",
2291 .level = 0xd,
2292 .vendor = CPUID_VENDOR_INTEL,
2293 .family = 6,
2294 .model = 85,
2295 .stepping = 6,
2296 .features[FEAT_1_EDX] =
2297 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2298 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2299 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2300 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2301 CPUID_DE | CPUID_FP87,
2302 .features[FEAT_1_ECX] =
2303 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2304 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2305 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2306 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2307 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2308 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2309 .features[FEAT_8000_0001_EDX] =
2310 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2311 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2312 .features[FEAT_8000_0001_ECX] =
2313 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2314 .features[FEAT_7_0_EBX] =
2315 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2316 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2317 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2318 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2319 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2320 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2321 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2322 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2323 .features[FEAT_7_0_ECX] =
2324 CPUID_7_0_ECX_PKU |
2325 CPUID_7_0_ECX_AVX512VNNI,
2326 .features[FEAT_7_0_EDX] =
2327 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2328 /* Missing: XSAVES (not supported by some Linux versions,
2329 * including v4.1 to v4.12).
2330 * KVM doesn't yet expose any XSAVES state save component,
2331 * and the only one defined in Skylake (processor tracing)
2332 * probably will block migration anyway.
2333 */
2334 .features[FEAT_XSAVE] =
2335 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2336 CPUID_XSAVE_XGETBV1,
2337 .features[FEAT_6_EAX] =
2338 CPUID_6_EAX_ARAT,
2339 .xlevel = 0x80000008,
2340 .model_id = "Intel Xeon Processor (Cascadelake)",
2341 },
2342 {
2343 .name = "Icelake-Client",
2344 .level = 0xd,
2345 .vendor = CPUID_VENDOR_INTEL,
2346 .family = 6,
2347 .model = 126,
2348 .stepping = 0,
2349 .features[FEAT_1_EDX] =
2350 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2351 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2352 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2353 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2354 CPUID_DE | CPUID_FP87,
2355 .features[FEAT_1_ECX] =
2356 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2357 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2358 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2359 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2360 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2361 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2362 .features[FEAT_8000_0001_EDX] =
2363 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2364 CPUID_EXT2_SYSCALL,
2365 .features[FEAT_8000_0001_ECX] =
2366 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2367 .features[FEAT_8000_0008_EBX] =
2368 CPUID_8000_0008_EBX_WBNOINVD,
2369 .features[FEAT_7_0_EBX] =
2370 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2371 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2372 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2373 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2374 CPUID_7_0_EBX_SMAP,
2375 .features[FEAT_7_0_ECX] =
2376 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2377 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2378 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2379 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2380 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2381 .features[FEAT_7_0_EDX] =
2382 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2383 /* Missing: XSAVES (not supported by some Linux versions,
2384 * including v4.1 to v4.12).
2385 * KVM doesn't yet expose any XSAVES state save component,
2386 * and the only one defined in Skylake (processor tracing)
2387 * probably will block migration anyway.
2388 */
2389 .features[FEAT_XSAVE] =
2390 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2391 CPUID_XSAVE_XGETBV1,
2392 .features[FEAT_6_EAX] =
2393 CPUID_6_EAX_ARAT,
2394 .xlevel = 0x80000008,
2395 .model_id = "Intel Core Processor (Icelake)",
2396 },
2397 {
2398 .name = "Icelake-Server",
2399 .level = 0xd,
2400 .vendor = CPUID_VENDOR_INTEL,
2401 .family = 6,
2402 .model = 134,
2403 .stepping = 0,
2404 .features[FEAT_1_EDX] =
2405 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2406 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2407 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2408 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2409 CPUID_DE | CPUID_FP87,
2410 .features[FEAT_1_ECX] =
2411 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2412 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2413 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2414 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2415 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2416 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2417 .features[FEAT_8000_0001_EDX] =
2418 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2419 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2420 .features[FEAT_8000_0001_ECX] =
2421 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2422 .features[FEAT_8000_0008_EBX] =
2423 CPUID_8000_0008_EBX_WBNOINVD,
2424 .features[FEAT_7_0_EBX] =
2425 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2426 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2427 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2428 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2429 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2430 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2431 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2432 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2433 .features[FEAT_7_0_ECX] =
2434 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2435 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2436 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2437 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2438 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2439 .features[FEAT_7_0_EDX] =
2440 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2441 /* Missing: XSAVES (not supported by some Linux versions,
2442 * including v4.1 to v4.12).
2443 * KVM doesn't yet expose any XSAVES state save component,
2444 * and the only one defined in Skylake (processor tracing)
2445 * probably will block migration anyway.
2446 */
2447 .features[FEAT_XSAVE] =
2448 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2449 CPUID_XSAVE_XGETBV1,
2450 .features[FEAT_6_EAX] =
2451 CPUID_6_EAX_ARAT,
2452 .xlevel = 0x80000008,
2453 .model_id = "Intel Xeon Processor (Icelake)",
2454 },
2455 {
2456 .name = "SnowRidge-Server",
2457 .level = 27,
2458 .vendor = CPUID_VENDOR_INTEL,
2459 .family = 6,
2460 .model = 134,
2461 .stepping = 1,
2462 .features[FEAT_1_EDX] =
2463 /* missing: CPUID_PN CPUID_IA64 */
2464 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2465 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
2466 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
2467 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
2468 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
2469 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
2470 CPUID_MMX |
2471 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
2472 .features[FEAT_1_ECX] =
2473 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
2474 CPUID_EXT_VMX |
2475 CPUID_EXT_SSSE3 |
2476 CPUID_EXT_CX16 |
2477 CPUID_EXT_SSE41 |
2478 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
2479 CPUID_EXT_POPCNT |
2480 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
2481 CPUID_EXT_RDRAND,
2482 .features[FEAT_8000_0001_EDX] =
2483 CPUID_EXT2_SYSCALL |
2484 CPUID_EXT2_NX |
2485 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2486 CPUID_EXT2_LM,
2487 .features[FEAT_8000_0001_ECX] =
2488 CPUID_EXT3_LAHF_LM |
2489 CPUID_EXT3_3DNOWPREFETCH,
2490 .features[FEAT_7_0_EBX] =
2491 CPUID_7_0_EBX_FSGSBASE |
2492 CPUID_7_0_EBX_SMEP |
2493 CPUID_7_0_EBX_ERMS |
2494 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
2495 CPUID_7_0_EBX_RDSEED |
2496 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2497 CPUID_7_0_EBX_CLWB |
2498 CPUID_7_0_EBX_SHA_NI,
2499 .features[FEAT_7_0_ECX] =
2500 CPUID_7_0_ECX_UMIP |
2501 /* missing bit 5 */
2502 CPUID_7_0_ECX_GFNI |
2503 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
2504 CPUID_7_0_ECX_MOVDIR64B,
2505 .features[FEAT_7_0_EDX] =
2506 CPUID_7_0_EDX_SPEC_CTRL |
2507 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
2508 CPUID_7_0_EDX_CORE_CAPABILITY,
2509 .features[FEAT_CORE_CAPABILITY] =
2510 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
2511 /*
2512 * Missing: XSAVES (not supported by some Linux versions,
2513 * including v4.1 to v4.12).
2514 * KVM doesn't yet expose any XSAVES state save component,
2515 * and the only one defined in Skylake (processor tracing)
2516 * probably will block migration anyway.
2517 */
2518 .features[FEAT_XSAVE] =
2519 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2520 CPUID_XSAVE_XGETBV1,
2521 .features[FEAT_6_EAX] =
2522 CPUID_6_EAX_ARAT,
2523 .xlevel = 0x80000008,
2524 .model_id = "Intel Atom Processor (SnowRidge)",
2525 },
2526 {
2527 .name = "KnightsMill",
2528 .level = 0xd,
2529 .vendor = CPUID_VENDOR_INTEL,
2530 .family = 6,
2531 .model = 133,
2532 .stepping = 0,
2533 .features[FEAT_1_EDX] =
2534 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2535 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2536 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2537 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2538 CPUID_PSE | CPUID_DE | CPUID_FP87,
2539 .features[FEAT_1_ECX] =
2540 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2541 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2542 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2543 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2544 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2545 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2546 .features[FEAT_8000_0001_EDX] =
2547 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2548 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2549 .features[FEAT_8000_0001_ECX] =
2550 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2551 .features[FEAT_7_0_EBX] =
2552 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2553 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2554 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2555 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2556 CPUID_7_0_EBX_AVX512ER,
2557 .features[FEAT_7_0_ECX] =
2558 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2559 .features[FEAT_7_0_EDX] =
2560 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2561 .features[FEAT_XSAVE] =
2562 CPUID_XSAVE_XSAVEOPT,
2563 .features[FEAT_6_EAX] =
2564 CPUID_6_EAX_ARAT,
2565 .xlevel = 0x80000008,
2566 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2567 },
2568 {
2569 .name = "Opteron_G1",
2570 .level = 5,
2571 .vendor = CPUID_VENDOR_AMD,
2572 .family = 15,
2573 .model = 6,
2574 .stepping = 1,
2575 .features[FEAT_1_EDX] =
2576 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2577 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2578 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2579 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2580 CPUID_DE | CPUID_FP87,
2581 .features[FEAT_1_ECX] =
2582 CPUID_EXT_SSE3,
2583 .features[FEAT_8000_0001_EDX] =
2584 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2585 .xlevel = 0x80000008,
2586 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2587 },
2588 {
2589 .name = "Opteron_G2",
2590 .level = 5,
2591 .vendor = CPUID_VENDOR_AMD,
2592 .family = 15,
2593 .model = 6,
2594 .stepping = 1,
2595 .features[FEAT_1_EDX] =
2596 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2597 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2598 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2599 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2600 CPUID_DE | CPUID_FP87,
2601 .features[FEAT_1_ECX] =
2602 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2603 .features[FEAT_8000_0001_EDX] =
2604 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2605 .features[FEAT_8000_0001_ECX] =
2606 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2607 .xlevel = 0x80000008,
2608 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2609 },
2610 {
2611 .name = "Opteron_G3",
2612 .level = 5,
2613 .vendor = CPUID_VENDOR_AMD,
2614 .family = 16,
2615 .model = 2,
2616 .stepping = 3,
2617 .features[FEAT_1_EDX] =
2618 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2619 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2620 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2621 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2622 CPUID_DE | CPUID_FP87,
2623 .features[FEAT_1_ECX] =
2624 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2625 CPUID_EXT_SSE3,
2626 .features[FEAT_8000_0001_EDX] =
2627 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2628 CPUID_EXT2_RDTSCP,
2629 .features[FEAT_8000_0001_ECX] =
2630 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2631 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2632 .xlevel = 0x80000008,
2633 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2634 },
2635 {
2636 .name = "Opteron_G4",
2637 .level = 0xd,
2638 .vendor = CPUID_VENDOR_AMD,
2639 .family = 21,
2640 .model = 1,
2641 .stepping = 2,
2642 .features[FEAT_1_EDX] =
2643 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2644 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2645 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2646 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2647 CPUID_DE | CPUID_FP87,
2648 .features[FEAT_1_ECX] =
2649 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2650 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2651 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2652 CPUID_EXT_SSE3,
2653 .features[FEAT_8000_0001_EDX] =
2654 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2655 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2656 .features[FEAT_8000_0001_ECX] =
2657 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2658 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2659 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2660 CPUID_EXT3_LAHF_LM,
2661 .features[FEAT_SVM] =
2662 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2663 /* no xsaveopt! */
2664 .xlevel = 0x8000001A,
2665 .model_id = "AMD Opteron 62xx class CPU",
2666 },
2667 {
2668 .name = "Opteron_G5",
2669 .level = 0xd,
2670 .vendor = CPUID_VENDOR_AMD,
2671 .family = 21,
2672 .model = 2,
2673 .stepping = 0,
2674 .features[FEAT_1_EDX] =
2675 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2676 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2677 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2678 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2679 CPUID_DE | CPUID_FP87,
2680 .features[FEAT_1_ECX] =
2681 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2682 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2683 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2684 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2685 .features[FEAT_8000_0001_EDX] =
2686 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2687 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2688 .features[FEAT_8000_0001_ECX] =
2689 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2690 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2691 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2692 CPUID_EXT3_LAHF_LM,
2693 .features[FEAT_SVM] =
2694 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2695 /* no xsaveopt! */
2696 .xlevel = 0x8000001A,
2697 .model_id = "AMD Opteron 63xx class CPU",
2698 },
2699 {
2700 .name = "EPYC",
2701 .level = 0xd,
2702 .vendor = CPUID_VENDOR_AMD,
2703 .family = 23,
2704 .model = 1,
2705 .stepping = 2,
2706 .features[FEAT_1_EDX] =
2707 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2708 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2709 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2710 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2711 CPUID_VME | CPUID_FP87,
2712 .features[FEAT_1_ECX] =
2713 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2714 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2715 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2716 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2717 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2718 .features[FEAT_8000_0001_EDX] =
2719 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2720 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2721 CPUID_EXT2_SYSCALL,
2722 .features[FEAT_8000_0001_ECX] =
2723 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2724 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2725 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2726 CPUID_EXT3_TOPOEXT,
2727 .features[FEAT_7_0_EBX] =
2728 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2729 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2730 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2731 CPUID_7_0_EBX_SHA_NI,
2732 /* Missing: XSAVES (not supported by some Linux versions,
2733 * including v4.1 to v4.12).
2734 * KVM doesn't yet expose any XSAVES state save component.
2735 */
2736 .features[FEAT_XSAVE] =
2737 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2738 CPUID_XSAVE_XGETBV1,
2739 .features[FEAT_6_EAX] =
2740 CPUID_6_EAX_ARAT,
2741 .features[FEAT_SVM] =
2742 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2743 .xlevel = 0x8000001E,
2744 .model_id = "AMD EPYC Processor",
2745 .cache_info = &epyc_cache_info,
2746 .versions = (X86CPUVersionDefinition[]) {
2747 { .version = 1 },
2748 {
2749 .version = 2,
2750 .alias = "EPYC-IBPB",
2751 .props = (PropValue[]) {
2752 { "ibpb", "on" },
2753 { "model-id",
2754 "AMD EPYC Processor (with IBPB)" },
2755 { /* end of list */ }
2756 }
2757 },
2758 { /* end of list */ }
2759 }
2760 },
2761 {
2762 .name = "Dhyana",
2763 .level = 0xd,
2764 .vendor = CPUID_VENDOR_HYGON,
2765 .family = 24,
2766 .model = 0,
2767 .stepping = 1,
2768 .features[FEAT_1_EDX] =
2769 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2770 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2771 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2772 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2773 CPUID_VME | CPUID_FP87,
2774 .features[FEAT_1_ECX] =
2775 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2776 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
2777 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2778 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2779 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
2780 .features[FEAT_8000_0001_EDX] =
2781 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2782 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2783 CPUID_EXT2_SYSCALL,
2784 .features[FEAT_8000_0001_ECX] =
2785 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2786 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2787 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2788 CPUID_EXT3_TOPOEXT,
2789 .features[FEAT_8000_0008_EBX] =
2790 CPUID_8000_0008_EBX_IBPB,
2791 .features[FEAT_7_0_EBX] =
2792 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2793 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2794 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
2795 /*
2796 * Missing: XSAVES (not supported by some Linux versions,
2797 * including v4.1 to v4.12).
2798 * KVM doesn't yet expose any XSAVES state save component.
2799 */
2800 .features[FEAT_XSAVE] =
2801 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2802 CPUID_XSAVE_XGETBV1,
2803 .features[FEAT_6_EAX] =
2804 CPUID_6_EAX_ARAT,
2805 .features[FEAT_SVM] =
2806 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2807 .xlevel = 0x8000001E,
2808 .model_id = "Hygon Dhyana Processor",
2809 .cache_info = &epyc_cache_info,
2810 },
2811 };
2812
2813 /* KVM-specific features that are automatically added/removed
2814 * from all CPU models when KVM is enabled.
2815 */
2816 static PropValue kvm_default_props[] = {
2817 { "kvmclock", "on" },
2818 { "kvm-nopiodelay", "on" },
2819 { "kvm-asyncpf", "on" },
2820 { "kvm-steal-time", "on" },
2821 { "kvm-pv-eoi", "on" },
2822 { "kvmclock-stable-bit", "on" },
2823 { "x2apic", "on" },
2824 { "acpi", "off" },
2825 { "monitor", "off" },
2826 { "svm", "off" },
2827 { NULL, NULL },
2828 };
2829
2830 /* TCG-specific defaults that override all CPU models when using TCG
2831 */
2832 static PropValue tcg_default_props[] = {
2833 { "vme", "off" },
2834 { NULL, NULL },
2835 };
2836
2837
2838 static X86CPUVersion x86_cpu_model_last_version(const X86CPUModel *model)
2839 {
2840 int v = 0;
2841 const X86CPUVersionDefinition *vdef =
2842 x86_cpu_def_get_versions(model->cpudef);
2843 while (vdef->version) {
2844 v = vdef->version;
2845 vdef++;
2846 }
2847 return v;
2848 }
2849
2850 /* Return the actual version being used for a specific CPU model */
2851 static X86CPUVersion x86_cpu_model_resolve_version(const X86CPUModel *model)
2852 {
2853 X86CPUVersion v = model->version;
2854 if (v == CPU_VERSION_LATEST) {
2855 return x86_cpu_model_last_version(model);
2856 }
2857 return v;
2858 }
2859
2860 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2861 {
2862 PropValue *pv;
2863 for (pv = kvm_default_props; pv->prop; pv++) {
2864 if (!strcmp(pv->prop, prop)) {
2865 pv->value = value;
2866 break;
2867 }
2868 }
2869
2870 /* It is valid to call this function only for properties that
2871 * are already present in the kvm_default_props table.
2872 */
2873 assert(pv->prop);
2874 }
2875
2876 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2877 bool migratable_only);
2878
2879 static bool lmce_supported(void)
2880 {
2881 uint64_t mce_cap = 0;
2882
2883 #ifdef CONFIG_KVM
2884 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2885 return false;
2886 }
2887 #endif
2888
2889 return !!(mce_cap & MCG_LMCE_P);
2890 }
2891
2892 #define CPUID_MODEL_ID_SZ 48
2893
2894 /**
2895 * cpu_x86_fill_model_id:
2896 * Get CPUID model ID string from host CPU.
2897 *
2898 * @str should have at least CPUID_MODEL_ID_SZ bytes
2899 *
2900 * The function does NOT add a null terminator to the string
2901 * automatically.
2902 */
2903 static int cpu_x86_fill_model_id(char *str)
2904 {
2905 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2906 int i;
2907
2908 for (i = 0; i < 3; i++) {
2909 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2910 memcpy(str + i * 16 + 0, &eax, 4);
2911 memcpy(str + i * 16 + 4, &ebx, 4);
2912 memcpy(str + i * 16 + 8, &ecx, 4);
2913 memcpy(str + i * 16 + 12, &edx, 4);
2914 }
2915 return 0;
2916 }
2917
2918 static Property max_x86_cpu_properties[] = {
2919 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2920 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2921 DEFINE_PROP_END_OF_LIST()
2922 };
2923
2924 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2925 {
2926 DeviceClass *dc = DEVICE_CLASS(oc);
2927 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2928
2929 xcc->ordering = 9;
2930
2931 xcc->model_description =
2932 "Enables all features supported by the accelerator in the current host";
2933
2934 dc->props = max_x86_cpu_properties;
2935 }
2936
2937 static void max_x86_cpu_initfn(Object *obj)
2938 {
2939 X86CPU *cpu = X86_CPU(obj);
2940 CPUX86State *env = &cpu->env;
2941 KVMState *s = kvm_state;
2942
2943 /* We can't fill the features array here because we don't know yet if
2944 * "migratable" is true or false.
2945 */
2946 cpu->max_features = true;
2947
2948 if (accel_uses_host_cpuid()) {
2949 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2950 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2951 int family, model, stepping;
2952
2953 host_vendor_fms(vendor, &family, &model, &stepping);
2954 cpu_x86_fill_model_id(model_id);
2955
2956 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2957 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2958 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2959 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2960 &error_abort);
2961 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2962 &error_abort);
2963
2964 if (kvm_enabled()) {
2965 env->cpuid_min_level =
2966 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2967 env->cpuid_min_xlevel =
2968 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2969 env->cpuid_min_xlevel2 =
2970 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2971 } else {
2972 env->cpuid_min_level =
2973 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2974 env->cpuid_min_xlevel =
2975 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2976 env->cpuid_min_xlevel2 =
2977 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2978 }
2979
2980 if (lmce_supported()) {
2981 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2982 }
2983 } else {
2984 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2985 "vendor", &error_abort);
2986 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2987 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2988 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2989 object_property_set_str(OBJECT(cpu),
2990 "QEMU TCG CPU version " QEMU_HW_VERSION,
2991 "model-id", &error_abort);
2992 }
2993
2994 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2995 }
2996
2997 static const TypeInfo max_x86_cpu_type_info = {
2998 .name = X86_CPU_TYPE_NAME("max"),
2999 .parent = TYPE_X86_CPU,
3000 .instance_init = max_x86_cpu_initfn,
3001 .class_init = max_x86_cpu_class_init,
3002 };
3003
3004 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3005 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3006 {
3007 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3008
3009 xcc->host_cpuid_required = true;
3010 xcc->ordering = 8;
3011
3012 #if defined(CONFIG_KVM)
3013 xcc->model_description =
3014 "KVM processor with all supported host features ";
3015 #elif defined(CONFIG_HVF)
3016 xcc->model_description =
3017 "HVF processor with all supported host features ";
3018 #endif
3019 }
3020
3021 static const TypeInfo host_x86_cpu_type_info = {
3022 .name = X86_CPU_TYPE_NAME("host"),
3023 .parent = X86_CPU_TYPE_NAME("max"),
3024 .class_init = host_x86_cpu_class_init,
3025 };
3026
3027 #endif
3028
3029 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3030 {
3031 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3032
3033 switch (f->type) {
3034 case CPUID_FEATURE_WORD:
3035 {
3036 const char *reg = get_register_name_32(f->cpuid.reg);
3037 assert(reg);
3038 return g_strdup_printf("CPUID.%02XH:%s",
3039 f->cpuid.eax, reg);
3040 }
3041 case MSR_FEATURE_WORD:
3042 return g_strdup_printf("MSR(%02XH)",
3043 f->msr.index);
3044 }
3045
3046 return NULL;
3047 }
3048
3049 static void report_unavailable_features(FeatureWord w, uint32_t mask)
3050 {
3051 FeatureWordInfo *f = &feature_word_info[w];
3052 int i;
3053 char *feat_word_str;
3054
3055 for (i = 0; i < 32; ++i) {
3056 if ((1UL << i) & mask) {
3057 feat_word_str = feature_word_description(f, i);
3058 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3059 accel_uses_host_cpuid() ? "host" : "TCG",
3060 feat_word_str,
3061 f->feat_names[i] ? "." : "",
3062 f->feat_names[i] ? f->feat_names[i] : "", i);
3063 g_free(feat_word_str);
3064 }
3065 }
3066 }
3067
3068 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3069 const char *name, void *opaque,
3070 Error **errp)
3071 {
3072 X86CPU *cpu = X86_CPU(obj);
3073 CPUX86State *env = &cpu->env;
3074 int64_t value;
3075
3076 value = (env->cpuid_version >> 8) & 0xf;
3077 if (value == 0xf) {
3078 value += (env->cpuid_version >> 20) & 0xff;
3079 }
3080 visit_type_int(v, name, &value, errp);
3081 }
3082
3083 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3084 const char *name, void *opaque,
3085 Error **errp)
3086 {
3087 X86CPU *cpu = X86_CPU(obj);
3088 CPUX86State *env = &cpu->env;
3089 const int64_t min = 0;
3090 const int64_t max = 0xff + 0xf;
3091 Error *local_err = NULL;
3092 int64_t value;
3093
3094 visit_type_int(v, name, &value, &local_err);
3095 if (local_err) {
3096 error_propagate(errp, local_err);
3097 return;
3098 }
3099 if (value < min || value > max) {
3100 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3101 name ? name : "null", value, min, max);
3102 return;
3103 }
3104
3105 env->cpuid_version &= ~0xff00f00;
3106 if (value > 0x0f) {
3107 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3108 } else {
3109 env->cpuid_version |= value << 8;
3110 }
3111 }
3112
3113 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3114 const char *name, void *opaque,
3115 Error **errp)
3116 {
3117 X86CPU *cpu = X86_CPU(obj);
3118 CPUX86State *env = &cpu->env;
3119 int64_t value;
3120
3121 value = (env->cpuid_version >> 4) & 0xf;
3122 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3123 visit_type_int(v, name, &value, errp);
3124 }
3125
3126 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3127 const char *name, void *opaque,
3128 Error **errp)
3129 {
3130 X86CPU *cpu = X86_CPU(obj);
3131 CPUX86State *env = &cpu->env;
3132 const int64_t min = 0;
3133 const int64_t max = 0xff;
3134 Error *local_err = NULL;
3135 int64_t value;
3136
3137 visit_type_int(v, name, &value, &local_err);
3138 if (local_err) {
3139 error_propagate(errp, local_err);
3140 return;
3141 }
3142 if (value < min || value > max) {
3143 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3144 name ? name : "null", value, min, max);
3145 return;
3146 }
3147
3148 env->cpuid_version &= ~0xf00f0;
3149 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3150 }
3151
3152 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3153 const char *name, void *opaque,
3154 Error **errp)
3155 {
3156 X86CPU *cpu = X86_CPU(obj);
3157 CPUX86State *env = &cpu->env;
3158 int64_t value;
3159
3160 value = env->cpuid_version & 0xf;
3161 visit_type_int(v, name, &value, errp);
3162 }
3163
3164 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3165 const char *name, void *opaque,
3166 Error **errp)
3167 {
3168 X86CPU *cpu = X86_CPU(obj);
3169 CPUX86State *env = &cpu->env;
3170 const int64_t min = 0;
3171 const int64_t max = 0xf;
3172 Error *local_err = NULL;
3173 int64_t value;
3174
3175 visit_type_int(v, name, &value, &local_err);
3176 if (local_err) {
3177 error_propagate(errp, local_err);
3178 return;
3179 }
3180 if (value < min || value > max) {
3181 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3182 name ? name : "null", value, min, max);
3183 return;
3184 }
3185
3186 env->cpuid_version &= ~0xf;
3187 env->cpuid_version |= value & 0xf;
3188 }
3189
3190 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3191 {
3192 X86CPU *cpu = X86_CPU(obj);
3193 CPUX86State *env = &cpu->env;
3194 char *value;
3195
3196 value = g_malloc(CPUID_VENDOR_SZ + 1);
3197 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3198 env->cpuid_vendor3);
3199 return value;
3200 }
3201
3202 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3203 Error **errp)
3204 {
3205 X86CPU *cpu = X86_CPU(obj);
3206 CPUX86State *env = &cpu->env;
3207 int i;
3208
3209 if (strlen(value) != CPUID_VENDOR_SZ) {
3210 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3211 return;
3212 }
3213
3214 env->cpuid_vendor1 = 0;
3215 env->cpuid_vendor2 = 0;
3216 env->cpuid_vendor3 = 0;
3217 for (i = 0; i < 4; i++) {
3218 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3219 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3220 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3221 }
3222 }
3223
3224 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3225 {
3226 X86CPU *cpu = X86_CPU(obj);
3227 CPUX86State *env = &cpu->env;
3228 char *value;
3229 int i;
3230
3231 value = g_malloc(48 + 1);
3232 for (i = 0; i < 48; i++) {
3233 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3234 }
3235 value[48] = '\0';
3236 return value;
3237 }
3238
3239 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3240 Error **errp)
3241 {
3242 X86CPU *cpu = X86_CPU(obj);
3243 CPUX86State *env = &cpu->env;
3244 int c, len, i;
3245
3246 if (model_id == NULL) {
3247 model_id = "";
3248 }
3249 len = strlen(model_id);
3250 memset(env->cpuid_model, 0, 48);
3251 for (i = 0; i < 48; i++) {
3252 if (i >= len) {
3253 c = '\0';
3254 } else {
3255 c = (uint8_t)model_id[i];
3256 }
3257 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3258 }
3259 }
3260
3261 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3262 void *opaque, Error **errp)
3263 {
3264 X86CPU *cpu = X86_CPU(obj);
3265 int64_t value;
3266
3267 value = cpu->env.tsc_khz * 1000;
3268 visit_type_int(v, name, &value, errp);
3269 }
3270
3271 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3272 void *opaque, Error **errp)
3273 {
3274 X86CPU *cpu = X86_CPU(obj);
3275 const int64_t min = 0;
3276 const int64_t max = INT64_MAX;
3277 Error *local_err = NULL;
3278 int64_t value;
3279
3280 visit_type_int(v, name, &value, &local_err);
3281 if (local_err) {
3282 error_propagate(errp, local_err);
3283 return;
3284 }
3285 if (value < min || value > max) {
3286 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3287 name ? name : "null", value, min, max);
3288 return;
3289 }
3290
3291 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3292 }
3293
3294 /* Generic getter for "feature-words" and "filtered-features" properties */
3295 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3296 const char *name, void *opaque,
3297 Error **errp)
3298 {
3299 uint32_t *array = (uint32_t *)opaque;
3300 FeatureWord w;
3301 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3302 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3303 X86CPUFeatureWordInfoList *list = NULL;
3304
3305 for (w = 0; w < FEATURE_WORDS; w++) {
3306 FeatureWordInfo *wi = &feature_word_info[w];
3307 /*
3308 * We didn't have MSR features when "feature-words" was
3309 * introduced. Therefore skipped other type entries.
3310 */
3311 if (wi->type != CPUID_FEATURE_WORD) {
3312 continue;
3313 }
3314 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3315 qwi->cpuid_input_eax = wi->cpuid.eax;
3316 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3317 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3318 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3319 qwi->features = array[w];
3320
3321 /* List will be in reverse order, but order shouldn't matter */
3322 list_entries[w].next = list;
3323 list_entries[w].value = &word_infos[w];
3324 list = &list_entries[w];
3325 }
3326
3327 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3328 }
3329
3330 /* Convert all '_' in a feature string option name to '-', to make feature
3331 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3332 */
3333 static inline void feat2prop(char *s)
3334 {
3335 while ((s = strchr(s, '_'))) {
3336 *s = '-';
3337 }
3338 }
3339
3340 /* Return the feature property name for a feature flag bit */
3341 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3342 {
3343 /* XSAVE components are automatically enabled by other features,
3344 * so return the original feature name instead
3345 */
3346 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3347 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3348
3349 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3350 x86_ext_save_areas[comp].bits) {
3351 w = x86_ext_save_areas[comp].feature;
3352 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3353 }
3354 }
3355
3356 assert(bitnr < 32);
3357 assert(w < FEATURE_WORDS);
3358 return feature_word_info[w].feat_names[bitnr];
3359 }
3360
3361 /* Compatibily hack to maintain legacy +-feat semantic,
3362 * where +-feat overwrites any feature set by
3363 * feat=on|feat even if the later is parsed after +-feat
3364 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3365 */
3366 static GList *plus_features, *minus_features;
3367
3368 static gint compare_string(gconstpointer a, gconstpointer b)
3369 {
3370 return g_strcmp0(a, b);
3371 }
3372
3373 /* Parse "+feature,-feature,feature=foo" CPU feature string
3374 */
3375 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3376 Error **errp)
3377 {
3378 char *featurestr; /* Single 'key=value" string being parsed */
3379 static bool cpu_globals_initialized;
3380 bool ambiguous = false;
3381
3382 if (cpu_globals_initialized) {
3383 return;
3384 }
3385 cpu_globals_initialized = true;
3386
3387 if (!features) {
3388 return;
3389 }
3390
3391 for (featurestr = strtok(features, ",");
3392 featurestr;
3393 featurestr = strtok(NULL, ",")) {
3394 const char *name;
3395 const char *val = NULL;
3396 char *eq = NULL;
3397 char num[32];
3398 GlobalProperty *prop;
3399
3400 /* Compatibility syntax: */
3401 if (featurestr[0] == '+') {
3402 plus_features = g_list_append(plus_features,
3403 g_strdup(featurestr + 1));
3404 continue;
3405 } else if (featurestr[0] == '-') {
3406 minus_features = g_list_append(minus_features,
3407 g_strdup(featurestr + 1));
3408 continue;
3409 }
3410
3411 eq = strchr(featurestr, '=');
3412 if (eq) {
3413 *eq++ = 0;
3414 val = eq;
3415 } else {
3416 val = "on";
3417 }
3418
3419 feat2prop(featurestr);
3420 name = featurestr;
3421
3422 if (g_list_find_custom(plus_features, name, compare_string)) {
3423 warn_report("Ambiguous CPU model string. "
3424 "Don't mix both \"+%s\" and \"%s=%s\"",
3425 name, name, val);
3426 ambiguous = true;
3427 }
3428 if (g_list_find_custom(minus_features, name, compare_string)) {
3429 warn_report("Ambiguous CPU model string. "
3430 "Don't mix both \"-%s\" and \"%s=%s\"",
3431 name, name, val);
3432 ambiguous = true;
3433 }
3434
3435 /* Special case: */
3436 if (!strcmp(name, "tsc-freq")) {
3437 int ret;
3438 uint64_t tsc_freq;
3439
3440 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3441 if (ret < 0 || tsc_freq > INT64_MAX) {
3442 error_setg(errp, "bad numerical value %s", val);
3443 return;
3444 }
3445 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3446 val = num;
3447 name = "tsc-frequency";
3448 }
3449
3450 prop = g_new0(typeof(*prop), 1);
3451 prop->driver = typename;
3452 prop->property = g_strdup(name);
3453 prop->value = g_strdup(val);
3454 qdev_prop_register_global(prop);
3455 }
3456
3457 if (ambiguous) {
3458 warn_report("Compatibility of ambiguous CPU model "
3459 "strings won't be kept on future QEMU versions");
3460 }
3461 }
3462
3463 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3464 static int x86_cpu_filter_features(X86CPU *cpu);
3465
3466 /* Build a list with the name of all features on a feature word array */
3467 static void x86_cpu_list_feature_names(FeatureWordArray features,
3468 strList **feat_names)
3469 {
3470 FeatureWord w;
3471 strList **next = feat_names;
3472
3473 for (w = 0; w < FEATURE_WORDS; w++) {
3474 uint32_t filtered = features[w];
3475 int i;
3476 for (i = 0; i < 32; i++) {
3477 if (filtered & (1UL << i)) {
3478 strList *new = g_new0(strList, 1);
3479 new->value = g_strdup(x86_cpu_feature_name(w, i));
3480 *next = new;
3481 next = &new->next;
3482 }
3483 }
3484 }
3485 }
3486
3487 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
3488 const char *name, void *opaque,
3489 Error **errp)
3490 {
3491 X86CPU *xc = X86_CPU(obj);
3492 strList *result = NULL;
3493
3494 x86_cpu_list_feature_names(xc->filtered_features, &result);
3495 visit_type_strList(v, "unavailable-features", &result, errp);
3496 }
3497
3498 /* Check for missing features that may prevent the CPU class from
3499 * running using the current machine and accelerator.
3500 */
3501 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3502 strList **missing_feats)
3503 {
3504 X86CPU *xc;
3505 Error *err = NULL;
3506 strList **next = missing_feats;
3507
3508 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3509 strList *new = g_new0(strList, 1);
3510 new->value = g_strdup("kvm");
3511 *missing_feats = new;
3512 return;
3513 }
3514
3515 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3516
3517 x86_cpu_expand_features(xc, &err);
3518 if (err) {
3519 /* Errors at x86_cpu_expand_features should never happen,
3520 * but in case it does, just report the model as not
3521 * runnable at all using the "type" property.
3522 */
3523 strList *new = g_new0(strList, 1);
3524 new->value = g_strdup("type");
3525 *next = new;
3526 next = &new->next;
3527 }
3528
3529 x86_cpu_filter_features(xc);
3530
3531 x86_cpu_list_feature_names(xc->filtered_features, next);
3532
3533 object_unref(OBJECT(xc));
3534 }
3535
3536 /* Print all cpuid feature names in featureset
3537 */
3538 static void listflags(GList *features)
3539 {
3540 size_t len = 0;
3541 GList *tmp;
3542
3543 for (tmp = features; tmp; tmp = tmp->next) {
3544 const char *name = tmp->data;
3545 if ((len + strlen(name) + 1) >= 75) {
3546 qemu_printf("\n");
3547 len = 0;
3548 }
3549 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3550 len += strlen(name) + 1;
3551 }
3552 qemu_printf("\n");
3553 }
3554
3555 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3556 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3557 {
3558 ObjectClass *class_a = (ObjectClass *)a;
3559 ObjectClass *class_b = (ObjectClass *)b;
3560 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3561 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3562 char *name_a, *name_b;
3563 int ret;
3564
3565 if (cc_a->ordering != cc_b->ordering) {
3566 ret = cc_a->ordering - cc_b->ordering;
3567 } else {
3568 name_a = x86_cpu_class_get_model_name(cc_a);
3569 name_b = x86_cpu_class_get_model_name(cc_b);
3570 ret = strcmp(name_a, name_b);
3571 g_free(name_a);
3572 g_free(name_b);
3573 }
3574 return ret;
3575 }
3576
3577 static GSList *get_sorted_cpu_model_list(void)
3578 {
3579 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3580 list = g_slist_sort(list, x86_cpu_list_compare);
3581 return list;
3582 }
3583
3584 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
3585 {
3586 Object *obj = object_new(object_class_get_name(OBJECT_CLASS(xc)));
3587 char *r = object_property_get_str(obj, "model-id", &error_abort);
3588 object_unref(obj);
3589 return r;
3590 }
3591
3592 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3593 {
3594 ObjectClass *oc = data;
3595 X86CPUClass *cc = X86_CPU_CLASS(oc);
3596 char *name = x86_cpu_class_get_model_name(cc);
3597 char *desc = g_strdup(cc->model_description);
3598
3599 if (!desc) {
3600 desc = x86_cpu_class_get_model_id(cc);
3601 }
3602
3603 qemu_printf("x86 %-20s %-48s\n", name, desc);
3604 g_free(name);
3605 g_free(desc);
3606 }
3607
3608 /* list available CPU models and flags */
3609 void x86_cpu_list(void)
3610 {
3611 int i, j;
3612 GSList *list;
3613 GList *names = NULL;
3614
3615 qemu_printf("Available CPUs:\n");
3616 list = get_sorted_cpu_model_list();
3617 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3618 g_slist_free(list);
3619
3620 names = NULL;
3621 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3622 FeatureWordInfo *fw = &feature_word_info[i];
3623 for (j = 0; j < 32; j++) {
3624 if (fw->feat_names[j]) {
3625 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3626 }
3627 }
3628 }
3629
3630 names = g_list_sort(names, (GCompareFunc)strcmp);
3631
3632 qemu_printf("\nRecognized CPUID flags:\n");
3633 listflags(names);
3634 qemu_printf("\n");
3635 g_list_free(names);
3636 }
3637
3638 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3639 {
3640 ObjectClass *oc = data;
3641 X86CPUClass *cc = X86_CPU_CLASS(oc);
3642 CpuDefinitionInfoList **cpu_list = user_data;
3643 CpuDefinitionInfoList *entry;
3644 CpuDefinitionInfo *info;
3645
3646 info = g_malloc0(sizeof(*info));
3647 info->name = x86_cpu_class_get_model_name(cc);
3648 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3649 info->has_unavailable_features = true;
3650 info->q_typename = g_strdup(object_class_get_name(oc));
3651 info->migration_safe = cc->migration_safe;
3652 info->has_migration_safe = true;
3653 info->q_static = cc->static_model;
3654
3655 entry = g_malloc0(sizeof(*entry));
3656 entry->value = info;
3657 entry->next = *cpu_list;
3658 *cpu_list = entry;
3659 }
3660
3661 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
3662 {
3663 CpuDefinitionInfoList *cpu_list = NULL;
3664 GSList *list = get_sorted_cpu_model_list();
3665 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3666 g_slist_free(list);
3667 return cpu_list;
3668 }
3669
3670 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3671 bool migratable_only)
3672 {
3673 FeatureWordInfo *wi = &feature_word_info[w];
3674 uint32_t r = 0;
3675
3676 if (kvm_enabled()) {
3677 switch (wi->type) {
3678 case CPUID_FEATURE_WORD:
3679 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3680 wi->cpuid.ecx,
3681 wi->cpuid.reg);
3682 break;
3683 case MSR_FEATURE_WORD:
3684 r = kvm_arch_get_supported_msr_feature(kvm_state,
3685 wi->msr.index);
3686 break;
3687 }
3688 } else if (hvf_enabled()) {
3689 if (wi->type != CPUID_FEATURE_WORD) {
3690 return 0;
3691 }
3692 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3693 wi->cpuid.ecx,
3694 wi->cpuid.reg);
3695 } else if (tcg_enabled()) {
3696 r = wi->tcg_features;
3697 } else {
3698 return ~0;
3699 }
3700 if (migratable_only) {
3701 r &= x86_cpu_get_migratable_flags(w);
3702 }
3703 return r;
3704 }
3705
3706 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3707 {
3708 FeatureWord w;
3709
3710 for (w = 0; w < FEATURE_WORDS; w++) {
3711 report_unavailable_features(w, cpu->filtered_features[w]);
3712 }
3713 }
3714
3715 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3716 {
3717 PropValue *pv;
3718 for (pv = props; pv->prop; pv++) {
3719 if (!pv->value) {
3720 continue;
3721 }
3722 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3723 &error_abort);
3724 }
3725 }
3726
3727 /* Apply properties for the CPU model version specified in model */
3728 static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model)
3729 {
3730 const X86CPUVersionDefinition *vdef;
3731 X86CPUVersion version = x86_cpu_model_resolve_version(model);
3732
3733 if (version == CPU_VERSION_LEGACY) {
3734 return;
3735 }
3736
3737 for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) {
3738 PropValue *p;
3739
3740 for (p = vdef->props; p && p->prop; p++) {
3741 object_property_parse(OBJECT(cpu), p->value, p->prop,
3742 &error_abort);
3743 }
3744
3745 if (vdef->version == version) {
3746 break;
3747 }
3748 }
3749
3750 /*
3751 * If we reached the end of the list, version number was invalid
3752 */
3753 assert(vdef->version == version);
3754 }
3755
3756 /* Load data from X86CPUDefinition into a X86CPU object
3757 */
3758 static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model, Error **errp)
3759 {
3760 X86CPUDefinition *def = model->cpudef;
3761 CPUX86State *env = &cpu->env;
3762 const char *vendor;
3763 char host_vendor[CPUID_VENDOR_SZ + 1];
3764 FeatureWord w;
3765
3766 /*NOTE: any property set by this function should be returned by
3767 * x86_cpu_static_props(), so static expansion of
3768 * query-cpu-model-expansion is always complete.
3769 */
3770
3771 /* CPU models only set _minimum_ values for level/xlevel: */
3772 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3773 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3774
3775 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3776 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3777 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3778 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3779 for (w = 0; w < FEATURE_WORDS; w++) {
3780 env->features[w] = def->features[w];
3781 }
3782
3783 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3784 cpu->legacy_cache = !def->cache_info;
3785
3786 /* Special cases not set in the X86CPUDefinition structs: */
3787 /* TODO: in-kernel irqchip for hvf */
3788 if (kvm_enabled()) {
3789 if (!kvm_irqchip_in_kernel()) {
3790 x86_cpu_change_kvm_default("x2apic", "off");
3791 }
3792
3793 x86_cpu_apply_props(cpu, kvm_default_props);
3794 } else if (tcg_enabled()) {
3795 x86_cpu_apply_props(cpu, tcg_default_props);
3796 }
3797
3798 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3799
3800 /* sysenter isn't supported in compatibility mode on AMD,
3801 * syscall isn't supported in compatibility mode on Intel.
3802 * Normally we advertise the actual CPU vendor, but you can
3803 * override this using the 'vendor' property if you want to use
3804 * KVM's sysenter/syscall emulation in compatibility mode and
3805 * when doing cross vendor migration
3806 */
3807 vendor = def->vendor;
3808 if (accel_uses_host_cpuid()) {
3809 uint32_t ebx = 0, ecx = 0, edx = 0;
3810 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3811 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3812 vendor = host_vendor;
3813 }
3814
3815 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3816
3817 x86_cpu_apply_version_props(cpu, model);
3818 }
3819
3820 #ifndef CONFIG_USER_ONLY
3821 /* Return a QDict containing keys for all properties that can be included
3822 * in static expansion of CPU models. All properties set by x86_cpu_load_model()
3823 * must be included in the dictionary.
3824 */
3825 static QDict *x86_cpu_static_props(void)
3826 {
3827 FeatureWord w;
3828 int i;
3829 static const char *props[] = {
3830 "min-level",
3831 "min-xlevel",
3832 "family",
3833 "model",
3834 "stepping",
3835 "model-id",
3836 "vendor",
3837 "lmce",
3838 NULL,
3839 };
3840 static QDict *d;
3841
3842 if (d) {
3843 return d;
3844 }
3845
3846 d = qdict_new();
3847 for (i = 0; props[i]; i++) {
3848 qdict_put_null(d, props[i]);
3849 }
3850
3851 for (w = 0; w < FEATURE_WORDS; w++) {
3852 FeatureWordInfo *fi = &feature_word_info[w];
3853 int bit;
3854 for (bit = 0; bit < 32; bit++) {
3855 if (!fi->feat_names[bit]) {
3856 continue;
3857 }
3858 qdict_put_null(d, fi->feat_names[bit]);
3859 }
3860 }
3861
3862 return d;
3863 }
3864
3865 /* Add an entry to @props dict, with the value for property. */
3866 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3867 {
3868 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3869 &error_abort);
3870
3871 qdict_put_obj(props, prop, value);
3872 }
3873
3874 /* Convert CPU model data from X86CPU object to a property dictionary
3875 * that can recreate exactly the same CPU model.
3876 */
3877 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3878 {
3879 QDict *sprops = x86_cpu_static_props();
3880 const QDictEntry *e;
3881
3882 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3883 const char *prop = qdict_entry_key(e);
3884 x86_cpu_expand_prop(cpu, props, prop);
3885 }
3886 }
3887
3888 /* Convert CPU model data from X86CPU object to a property dictionary
3889 * that can recreate exactly the same CPU model, including every
3890 * writeable QOM property.
3891 */
3892 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3893 {
3894 ObjectPropertyIterator iter;
3895 ObjectProperty *prop;
3896
3897 object_property_iter_init(&iter, OBJECT(cpu));
3898 while ((prop = object_property_iter_next(&iter))) {
3899 /* skip read-only or write-only properties */
3900 if (!prop->get || !prop->set) {
3901 continue;
3902 }
3903
3904 /* "hotplugged" is the only property that is configurable
3905 * on the command-line but will be set differently on CPUs
3906 * created using "-cpu ... -smp ..." and by CPUs created
3907 * on the fly by x86_cpu_from_model() for querying. Skip it.
3908 */
3909 if (!strcmp(prop->name, "hotplugged")) {
3910 continue;
3911 }
3912 x86_cpu_expand_prop(cpu, props, prop->name);
3913 }
3914 }
3915
3916 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3917 {
3918 const QDictEntry *prop;
3919 Error *err = NULL;
3920
3921 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3922 object_property_set_qobject(obj, qdict_entry_value(prop),
3923 qdict_entry_key(prop), &err);
3924 if (err) {
3925 break;
3926 }
3927 }
3928
3929 error_propagate(errp, err);
3930 }
3931
3932 /* Create X86CPU object according to model+props specification */
3933 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3934 {
3935 X86CPU *xc = NULL;
3936 X86CPUClass *xcc;
3937 Error *err = NULL;
3938
3939 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3940 if (xcc == NULL) {
3941 error_setg(&err, "CPU model '%s' not found", model);
3942 goto out;
3943 }
3944
3945 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3946 if (props) {
3947 object_apply_props(OBJECT(xc), props, &err);
3948 if (err) {
3949 goto out;
3950 }
3951 }
3952
3953 x86_cpu_expand_features(xc, &err);
3954 if (err) {
3955 goto out;
3956 }
3957
3958 out:
3959 if (err) {
3960 error_propagate(errp, err);
3961 object_unref(OBJECT(xc));
3962 xc = NULL;
3963 }
3964 return xc;
3965 }
3966
3967 CpuModelExpansionInfo *
3968 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
3969 CpuModelInfo *model,
3970 Error **errp)
3971 {
3972 X86CPU *xc = NULL;
3973 Error *err = NULL;
3974 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3975 QDict *props = NULL;
3976 const char *base_name;
3977
3978 xc = x86_cpu_from_model(model->name,
3979 model->has_props ?
3980 qobject_to(QDict, model->props) :
3981 NULL, &err);
3982 if (err) {
3983 goto out;
3984 }
3985
3986 props = qdict_new();
3987 ret->model = g_new0(CpuModelInfo, 1);
3988 ret->model->props = QOBJECT(props);
3989 ret->model->has_props = true;
3990
3991 switch (type) {
3992 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3993 /* Static expansion will be based on "base" only */
3994 base_name = "base";
3995 x86_cpu_to_dict(xc, props);
3996 break;
3997 case CPU_MODEL_EXPANSION_TYPE_FULL:
3998 /* As we don't return every single property, full expansion needs
3999 * to keep the original model name+props, and add extra
4000 * properties on top of that.
4001 */
4002 base_name = model->name;
4003 x86_cpu_to_dict_full(xc, props);
4004 break;
4005 default:
4006 error_setg(&err, "Unsupported expansion type");
4007 goto out;
4008 }
4009
4010 x86_cpu_to_dict(xc, props);
4011
4012 ret->model->name = g_strdup(base_name);
4013
4014 out:
4015 object_unref(OBJECT(xc));
4016 if (err) {
4017 error_propagate(errp, err);
4018 qapi_free_CpuModelExpansionInfo(ret);
4019 ret = NULL;
4020 }
4021 return ret;
4022 }
4023 #endif /* !CONFIG_USER_ONLY */
4024
4025 static gchar *x86_gdb_arch_name(CPUState *cs)
4026 {
4027 #ifdef TARGET_X86_64
4028 return g_strdup("i386:x86-64");
4029 #else
4030 return g_strdup("i386");
4031 #endif
4032 }
4033
4034 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4035 {
4036 X86CPUModel *model = data;
4037 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4038
4039 xcc->model = model;
4040 xcc->migration_safe = true;
4041 }
4042
4043 static void x86_register_cpu_model_type(const char *name, X86CPUModel *model)
4044 {
4045 char *typename = x86_cpu_type_name(name);
4046 TypeInfo ti = {
4047 .name = typename,
4048 .parent = TYPE_X86_CPU,
4049 .class_init = x86_cpu_cpudef_class_init,
4050 .class_data = model,
4051 };
4052
4053 type_register(&ti);
4054 g_free(typename);
4055 }
4056
4057 static void x86_register_cpudef_types(X86CPUDefinition *def)
4058 {
4059 X86CPUModel *m;
4060 const X86CPUVersionDefinition *vdef;
4061 char *name;
4062
4063 /* AMD aliases are handled at runtime based on CPUID vendor, so
4064 * they shouldn't be set on the CPU model table.
4065 */
4066 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4067 /* catch mistakes instead of silently truncating model_id when too long */
4068 assert(def->model_id && strlen(def->model_id) <= 48);
4069
4070 /* Unversioned model: */
4071 m = g_new0(X86CPUModel, 1);
4072 m->cpudef = def;
4073 m->version = CPU_VERSION_LEGACY;
4074 x86_register_cpu_model_type(def->name, m);
4075
4076 /* Versioned models: */
4077
4078 for (vdef = x86_cpu_def_get_versions(def); vdef->version; vdef++) {
4079 X86CPUModel *m = g_new0(X86CPUModel, 1);
4080 m->cpudef = def;
4081 m->version = vdef->version;
4082 name = x86_cpu_versioned_model_name(def, vdef->version);
4083 x86_register_cpu_model_type(name, m);
4084 g_free(name);
4085
4086 if (vdef->alias) {
4087 X86CPUModel *am = g_new0(X86CPUModel, 1);
4088 am->cpudef = def;
4089 am->version = vdef->version;
4090 x86_register_cpu_model_type(vdef->alias, am);
4091 }
4092 }
4093
4094 }
4095
4096 #if !defined(CONFIG_USER_ONLY)
4097
4098 void cpu_clear_apic_feature(CPUX86State *env)
4099 {
4100 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4101 }
4102
4103 #endif /* !CONFIG_USER_ONLY */
4104
4105 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4106 uint32_t *eax, uint32_t *ebx,
4107 uint32_t *ecx, uint32_t *edx)
4108 {
4109 X86CPU *cpu = env_archcpu(env);
4110 CPUState *cs = env_cpu(env);
4111 uint32_t die_offset;
4112 uint32_t limit;
4113 uint32_t signature[3];
4114
4115 /* Calculate & apply limits for different index ranges */
4116 if (index >= 0xC0000000) {
4117 limit = env->cpuid_xlevel2;
4118 } else if (index >= 0x80000000) {
4119 limit = env->cpuid_xlevel;
4120 } else if (index >= 0x40000000) {
4121 limit = 0x40000001;
4122 } else {
4123 limit = env->cpuid_level;
4124 }
4125
4126 if (index > limit) {
4127 /* Intel documentation states that invalid EAX input will
4128 * return the same information as EAX=cpuid_level
4129 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4130 */
4131 index = env->cpuid_level;
4132 }
4133
4134 switch(index) {
4135 case 0:
4136 *eax = env->cpuid_level;
4137 *ebx = env->cpuid_vendor1;
4138 *edx = env->cpuid_vendor2;
4139 *ecx = env->cpuid_vendor3;
4140 break;
4141 case 1:
4142 *eax = env->cpuid_version;
4143 *ebx = (cpu->apic_id << 24) |
4144 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4145 *ecx = env->features[FEAT_1_ECX];
4146 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4147 *ecx |= CPUID_EXT_OSXSAVE;
4148 }
4149 *edx = env->features[FEAT_1_EDX];
4150 if (cs->nr_cores * cs->nr_threads > 1) {
4151 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4152 *edx |= CPUID_HT;
4153 }
4154 break;
4155 case 2:
4156 /* cache info: needed for Pentium Pro compatibility */
4157 if (cpu->cache_info_passthrough) {
4158 host_cpuid(index, 0, eax, ebx, ecx, edx);
4159 break;
4160 }
4161 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4162 *ebx = 0;
4163 if (!cpu->enable_l3_cache) {
4164 *ecx = 0;
4165 } else {
4166 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4167 }
4168 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4169 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4170 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4171 break;
4172 case 4:
4173 /* cache info: needed for Core compatibility */
4174 if (cpu->cache_info_passthrough) {
4175 host_cpuid(index, count, eax, ebx, ecx, edx);
4176 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4177 *eax &= ~0xFC000000;
4178 if ((*eax & 31) && cs->nr_cores > 1) {
4179 *eax |= (cs->nr_cores - 1) << 26;
4180 }
4181 } else {
4182 *eax = 0;
4183 switch (count) {
4184 case 0: /* L1 dcache info */
4185 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4186 1, cs->nr_cores,
4187 eax, ebx, ecx, edx);
4188 break;
4189 case 1: /* L1 icache info */
4190 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4191 1, cs->nr_cores,
4192 eax, ebx, ecx, edx);
4193 break;
4194 case 2: /* L2 cache info */
4195 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4196 cs->nr_threads, cs->nr_cores,
4197 eax, ebx, ecx, edx);
4198 break;
4199 case 3: /* L3 cache info */
4200 die_offset = apicid_die_offset(env->nr_dies,
4201 cs->nr_cores, cs->nr_threads);
4202 if (cpu->enable_l3_cache) {
4203 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4204 (1 << die_offset), cs->nr_cores,
4205 eax, ebx, ecx, edx);
4206 break;
4207 }
4208 /* fall through */
4209 default: /* end of info */
4210 *eax = *ebx = *ecx = *edx = 0;
4211 break;
4212 }
4213 }
4214 break;
4215 case 5:
4216 /* MONITOR/MWAIT Leaf */
4217 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4218 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4219 *ecx = cpu->mwait.ecx; /* flags */
4220 *edx = cpu->mwait.edx; /* mwait substates */
4221 break;
4222 case 6:
4223 /* Thermal and Power Leaf */
4224 *eax = env->features[FEAT_6_EAX];
4225 *ebx = 0;
4226 *ecx = 0;
4227 *edx = 0;
4228 break;
4229 case 7:
4230 /* Structured Extended Feature Flags Enumeration Leaf */
4231 if (count == 0) {
4232 *eax = 0; /* Maximum ECX value for sub-leaves */
4233 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4234 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4235 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4236 *ecx |= CPUID_7_0_ECX_OSPKE;
4237 }
4238 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4239 } else {
4240 *eax = 0;
4241 *ebx = 0;
4242 *ecx = 0;
4243 *edx = 0;
4244 }
4245 break;
4246 case 9:
4247 /* Direct Cache Access Information Leaf */
4248 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4249 *ebx = 0;
4250 *ecx = 0;
4251 *edx = 0;
4252 break;
4253 case 0xA:
4254 /* Architectural Performance Monitoring Leaf */
4255 if (kvm_enabled() && cpu->enable_pmu) {
4256 KVMState *s = cs->kvm_state;
4257
4258 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4259 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4260 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4261 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4262 } else if (hvf_enabled() && cpu->enable_pmu) {
4263 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4264 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4265 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4266 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4267 } else {
4268 *eax = 0;
4269 *ebx = 0;
4270 *ecx = 0;
4271 *edx = 0;
4272 }
4273 break;
4274 case 0xB:
4275 /* Extended Topology Enumeration Leaf */
4276 if (!cpu->enable_cpuid_0xb) {
4277 *eax = *ebx = *ecx = *edx = 0;
4278 break;
4279 }
4280
4281 *ecx = count & 0xff;
4282 *edx = cpu->apic_id;
4283
4284 switch (count) {
4285 case 0:
4286 *eax = apicid_core_offset(env->nr_dies,
4287 cs->nr_cores, cs->nr_threads);
4288 *ebx = cs->nr_threads;
4289 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4290 break;
4291 case 1:
4292 *eax = apicid_pkg_offset(env->nr_dies,
4293 cs->nr_cores, cs->nr_threads);
4294 *ebx = cs->nr_cores * cs->nr_threads;
4295 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4296 break;
4297 default:
4298 *eax = 0;
4299 *ebx = 0;
4300 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4301 }
4302
4303 assert(!(*eax & ~0x1f));
4304 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4305 break;
4306 case 0x1F:
4307 /* V2 Extended Topology Enumeration Leaf */
4308 if (env->nr_dies < 2) {
4309 *eax = *ebx = *ecx = *edx = 0;
4310 break;
4311 }
4312
4313 *ecx = count & 0xff;
4314 *edx = cpu->apic_id;
4315 switch (count) {
4316 case 0:
4317 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores,
4318 cs->nr_threads);
4319 *ebx = cs->nr_threads;
4320 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4321 break;
4322 case 1:
4323 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores,
4324 cs->nr_threads);
4325 *ebx = cs->nr_cores * cs->nr_threads;
4326 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4327 break;
4328 case 2:
4329 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores,
4330 cs->nr_threads);
4331 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
4332 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
4333 break;
4334 default:
4335 *eax = 0;
4336 *ebx = 0;
4337 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4338 }
4339 assert(!(*eax & ~0x1f));
4340 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4341 break;
4342 case 0xD: {
4343 /* Processor Extended State */
4344 *eax = 0;
4345 *ebx = 0;
4346 *ecx = 0;
4347 *edx = 0;
4348 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4349 break;
4350 }
4351
4352 if (count == 0) {
4353 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4354 *eax = env->features[FEAT_XSAVE_COMP_LO];
4355 *edx = env->features[FEAT_XSAVE_COMP_HI];
4356 *ebx = xsave_area_size(env->xcr0);
4357 } else if (count == 1) {
4358 *eax = env->features[FEAT_XSAVE];
4359 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4360 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4361 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4362 *eax = esa->size;
4363 *ebx = esa->offset;
4364 }
4365 }
4366 break;
4367 }
4368 case 0x14: {
4369 /* Intel Processor Trace Enumeration */
4370 *eax = 0;
4371 *ebx = 0;
4372 *ecx = 0;
4373 *edx = 0;
4374 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4375 !kvm_enabled()) {
4376 break;
4377 }
4378
4379 if (count == 0) {
4380 *eax = INTEL_PT_MAX_SUBLEAF;
4381 *ebx = INTEL_PT_MINIMAL_EBX;
4382 *ecx = INTEL_PT_MINIMAL_ECX;
4383 } else if (count == 1) {
4384 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4385 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4386 }
4387 break;
4388 }
4389 case 0x40000000:
4390 /*
4391 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4392 * set here, but we restrict to TCG none the less.
4393 */
4394 if (tcg_enabled() && cpu->expose_tcg) {
4395 memcpy(signature, "TCGTCGTCGTCG", 12);
4396 *eax = 0x40000001;
4397 *ebx = signature[0];
4398 *ecx = signature[1];
4399 *edx = signature[2];
4400 } else {
4401 *eax = 0;
4402 *ebx = 0;
4403 *ecx = 0;
4404 *edx = 0;
4405 }
4406 break;
4407 case 0x40000001:
4408 *eax = 0;
4409 *ebx = 0;
4410 *ecx = 0;
4411 *edx = 0;
4412 break;
4413 case 0x80000000:
4414 *eax = env->cpuid_xlevel;
4415 *ebx = env->cpuid_vendor1;
4416 *edx = env->cpuid_vendor2;
4417 *ecx = env->cpuid_vendor3;
4418 break;
4419 case 0x80000001:
4420 *eax = env->cpuid_version;
4421 *ebx = 0;
4422 *ecx = env->features[FEAT_8000_0001_ECX];
4423 *edx = env->features[FEAT_8000_0001_EDX];
4424
4425 /* The Linux kernel checks for the CMPLegacy bit and
4426 * discards multiple thread information if it is set.
4427 * So don't set it here for Intel to make Linux guests happy.
4428 */
4429 if (cs->nr_cores * cs->nr_threads > 1) {
4430 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4431 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4432 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4433 *ecx |= 1 << 1; /* CmpLegacy bit */
4434 }
4435 }
4436 break;
4437 case 0x80000002:
4438 case 0x80000003:
4439 case 0x80000004:
4440 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4441 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4442 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4443 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4444 break;
4445 case 0x80000005:
4446 /* cache info (L1 cache) */
4447 if (cpu->cache_info_passthrough) {
4448 host_cpuid(index, 0, eax, ebx, ecx, edx);
4449 break;
4450 }
4451 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4452 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4453 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4454 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4455 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4456 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4457 break;
4458 case 0x80000006:
4459 /* cache info (L2 cache) */
4460 if (cpu->cache_info_passthrough) {
4461 host_cpuid(index, 0, eax, ebx, ecx, edx);
4462 break;
4463 }
4464 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4465 (L2_DTLB_2M_ENTRIES << 16) | \
4466 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4467 (L2_ITLB_2M_ENTRIES);
4468 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4469 (L2_DTLB_4K_ENTRIES << 16) | \
4470 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4471 (L2_ITLB_4K_ENTRIES);
4472 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4473 cpu->enable_l3_cache ?
4474 env->cache_info_amd.l3_cache : NULL,
4475 ecx, edx);
4476 break;
4477 case 0x80000007:
4478 *eax = 0;
4479 *ebx = 0;
4480 *ecx = 0;
4481 *edx = env->features[FEAT_8000_0007_EDX];
4482 break;
4483 case 0x80000008:
4484 /* virtual & phys address size in low 2 bytes. */
4485 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4486 /* 64 bit processor */
4487 *eax = cpu->phys_bits; /* configurable physical bits */
4488 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4489 *eax |= 0x00003900; /* 57 bits virtual */
4490 } else {
4491 *eax |= 0x00003000; /* 48 bits virtual */
4492 }
4493 } else {
4494 *eax = cpu->phys_bits;
4495 }
4496 *ebx = env->features[FEAT_8000_0008_EBX];
4497 *ecx = 0;
4498 *edx = 0;
4499 if (cs->nr_cores * cs->nr_threads > 1) {
4500 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4501 }
4502 break;
4503 case 0x8000000A:
4504 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4505 *eax = 0x00000001; /* SVM Revision */
4506 *ebx = 0x00000010; /* nr of ASIDs */
4507 *ecx = 0;
4508 *edx = env->features[FEAT_SVM]; /* optional features */
4509 } else {
4510 *eax = 0;
4511 *ebx = 0;
4512 *ecx = 0;
4513 *edx = 0;
4514 }
4515 break;
4516 case 0x8000001D:
4517 *eax = 0;
4518 if (cpu->cache_info_passthrough) {
4519 host_cpuid(index, count, eax, ebx, ecx, edx);
4520 break;
4521 }
4522 switch (count) {
4523 case 0: /* L1 dcache info */
4524 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4525 eax, ebx, ecx, edx);
4526 break;
4527 case 1: /* L1 icache info */
4528 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4529 eax, ebx, ecx, edx);
4530 break;
4531 case 2: /* L2 cache info */
4532 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4533 eax, ebx, ecx, edx);
4534 break;
4535 case 3: /* L3 cache info */
4536 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4537 eax, ebx, ecx, edx);
4538 break;
4539 default: /* end of info */
4540 *eax = *ebx = *ecx = *edx = 0;
4541 break;
4542 }
4543 break;
4544 case 0x8000001E:
4545 assert(cpu->core_id <= 255);
4546 encode_topo_cpuid8000001e(cs, cpu,
4547 eax, ebx, ecx, edx);
4548 break;
4549 case 0xC0000000:
4550 *eax = env->cpuid_xlevel2;
4551 *ebx = 0;
4552 *ecx = 0;
4553 *edx = 0;
4554 break;
4555 case 0xC0000001:
4556 /* Support for VIA CPU's CPUID instruction */
4557 *eax = env->cpuid_version;
4558 *ebx = 0;
4559 *ecx = 0;
4560 *edx = env->features[FEAT_C000_0001_EDX];
4561 break;
4562 case 0xC0000002:
4563 case 0xC0000003:
4564 case 0xC0000004:
4565 /* Reserved for the future, and now filled with zero */
4566 *eax = 0;
4567 *ebx = 0;
4568 *ecx = 0;
4569 *edx = 0;
4570 break;
4571 case 0x8000001F:
4572 *eax = sev_enabled() ? 0x2 : 0;
4573 *ebx = sev_get_cbit_position();
4574 *ebx |= sev_get_reduced_phys_bits() << 6;
4575 *ecx = 0;
4576 *edx = 0;
4577 break;
4578 default:
4579 /* reserved values: zero */
4580 *eax = 0;
4581 *ebx = 0;
4582 *ecx = 0;
4583 *edx = 0;
4584 break;
4585 }
4586 }
4587
4588 /* CPUClass::reset() */
4589 static void x86_cpu_reset(CPUState *s)
4590 {
4591 X86CPU *cpu = X86_CPU(s);
4592 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4593 CPUX86State *env = &cpu->env;
4594 target_ulong cr4;
4595 uint64_t xcr0;
4596 int i;
4597
4598 xcc->parent_reset(s);
4599
4600 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4601
4602 env->old_exception = -1;
4603
4604 /* init to reset state */
4605
4606 env->hflags2 |= HF2_GIF_MASK;
4607
4608 cpu_x86_update_cr0(env, 0x60000010);
4609 env->a20_mask = ~0x0;
4610 env->smbase = 0x30000;
4611 env->msr_smi_count = 0;
4612
4613 env->idt.limit = 0xffff;
4614 env->gdt.limit = 0xffff;
4615 env->ldt.limit = 0xffff;
4616 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4617 env->tr.limit = 0xffff;
4618 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4619
4620 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4621 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4622 DESC_R_MASK | DESC_A_MASK);
4623 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4624 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4625 DESC_A_MASK);
4626 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4627 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4628 DESC_A_MASK);
4629 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4630 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4631 DESC_A_MASK);
4632 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4633 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4634 DESC_A_MASK);
4635 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4636 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4637 DESC_A_MASK);
4638
4639 env->eip = 0xfff0;
4640 env->regs[R_EDX] = env->cpuid_version;
4641
4642 env->eflags = 0x2;
4643
4644 /* FPU init */
4645 for (i = 0; i < 8; i++) {
4646 env->fptags[i] = 1;
4647 }
4648 cpu_set_fpuc(env, 0x37f);
4649
4650 env->mxcsr = 0x1f80;
4651 /* All units are in INIT state. */
4652 env->xstate_bv = 0;
4653
4654 env->pat = 0x0007040600070406ULL;
4655 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4656 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
4657 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
4658 }
4659
4660 memset(env->dr, 0, sizeof(env->dr));
4661 env->dr[6] = DR6_FIXED_1;
4662 env->dr[7] = DR7_FIXED_1;
4663 cpu_breakpoint_remove_all(s, BP_CPU);
4664 cpu_watchpoint_remove_all(s, BP_CPU);
4665
4666 cr4 = 0;
4667 xcr0 = XSTATE_FP_MASK;
4668
4669 #ifdef CONFIG_USER_ONLY
4670 /* Enable all the features for user-mode. */
4671 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4672 xcr0 |= XSTATE_SSE_MASK;
4673 }
4674 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4675 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4676 if (env->features[esa->feature] & esa->bits) {
4677 xcr0 |= 1ull << i;
4678 }
4679 }
4680
4681 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4682 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4683 }
4684 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4685 cr4 |= CR4_FSGSBASE_MASK;
4686 }
4687 #endif
4688
4689 env->xcr0 = xcr0;
4690 cpu_x86_update_cr4(env, cr4);
4691
4692 /*
4693 * SDM 11.11.5 requires:
4694 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4695 * - IA32_MTRR_PHYSMASKn.V = 0
4696 * All other bits are undefined. For simplification, zero it all.
4697 */
4698 env->mtrr_deftype = 0;
4699 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4700 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4701
4702 env->interrupt_injected = -1;
4703 env->exception_nr = -1;
4704 env->exception_pending = 0;
4705 env->exception_injected = 0;
4706 env->exception_has_payload = false;
4707 env->exception_payload = 0;
4708 env->nmi_injected = false;
4709 #if !defined(CONFIG_USER_ONLY)
4710 /* We hard-wire the BSP to the first CPU. */
4711 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4712
4713 s->halted = !cpu_is_bsp(cpu);
4714
4715 if (kvm_enabled()) {
4716 kvm_arch_reset_vcpu(cpu);
4717 }
4718 else if (hvf_enabled()) {
4719 hvf_reset_vcpu(s);
4720 }
4721 #endif
4722 }
4723
4724 #ifndef CONFIG_USER_ONLY
4725 bool cpu_is_bsp(X86CPU *cpu)
4726 {
4727 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4728 }
4729
4730 /* TODO: remove me, when reset over QOM tree is implemented */
4731 static void x86_cpu_machine_reset_cb(void *opaque)
4732 {
4733 X86CPU *cpu = opaque;
4734 cpu_reset(CPU(cpu));
4735 }
4736 #endif
4737
4738 static void mce_init(X86CPU *cpu)
4739 {
4740 CPUX86State *cenv = &cpu->env;
4741 unsigned int bank;
4742
4743 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4744 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4745 (CPUID_MCE | CPUID_MCA)) {
4746 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4747 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4748 cenv->mcg_ctl = ~(uint64_t)0;
4749 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4750 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4751 }
4752 }
4753 }
4754
4755 #ifndef CONFIG_USER_ONLY
4756 APICCommonClass *apic_get_class(void)
4757 {
4758 const char *apic_type = "apic";
4759
4760 /* TODO: in-kernel irqchip for hvf */
4761 if (kvm_apic_in_kernel()) {
4762 apic_type = "kvm-apic";
4763 } else if (xen_enabled()) {
4764 apic_type = "xen-apic";
4765 }
4766
4767 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4768 }
4769
4770 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4771 {
4772 APICCommonState *apic;
4773 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4774
4775 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4776
4777 object_property_add_child(OBJECT(cpu), "lapic",
4778 OBJECT(cpu->apic_state), &error_abort);
4779 object_unref(OBJECT(cpu->apic_state));
4780
4781 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4782 /* TODO: convert to link<> */
4783 apic = APIC_COMMON(cpu->apic_state);
4784 apic->cpu = cpu;
4785 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4786 }
4787
4788 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4789 {
4790 APICCommonState *apic;
4791 static bool apic_mmio_map_once;
4792
4793 if (cpu->apic_state == NULL) {
4794 return;
4795 }
4796 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4797 errp);
4798
4799 /* Map APIC MMIO area */
4800 apic = APIC_COMMON(cpu->apic_state);
4801 if (!apic_mmio_map_once) {
4802 memory_region_add_subregion_overlap(get_system_memory(),
4803 apic->apicbase &
4804 MSR_IA32_APICBASE_BASE,
4805 &apic->io_memory,
4806 0x1000);
4807 apic_mmio_map_once = true;
4808 }
4809 }
4810
4811 static void x86_cpu_machine_done(Notifier *n, void *unused)
4812 {
4813 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4814 MemoryRegion *smram =
4815 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4816
4817 if (smram) {
4818 cpu->smram = g_new(MemoryRegion, 1);
4819 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4820 smram, 0, 1ull << 32);
4821 memory_region_set_enabled(cpu->smram, true);
4822 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4823 }
4824 }
4825 #else
4826 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4827 {
4828 }
4829 #endif
4830
4831 /* Note: Only safe for use on x86(-64) hosts */
4832 static uint32_t x86_host_phys_bits(void)
4833 {
4834 uint32_t eax;
4835 uint32_t host_phys_bits;
4836
4837 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4838 if (eax >= 0x80000008) {
4839 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4840 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4841 * at 23:16 that can specify a maximum physical address bits for
4842 * the guest that can override this value; but I've not seen
4843 * anything with that set.
4844 */
4845 host_phys_bits = eax & 0xff;
4846 } else {
4847 /* It's an odd 64 bit machine that doesn't have the leaf for
4848 * physical address bits; fall back to 36 that's most older
4849 * Intel.
4850 */
4851 host_phys_bits = 36;
4852 }
4853
4854 return host_phys_bits;
4855 }
4856
4857 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4858 {
4859 if (*min < value) {
4860 *min = value;
4861 }
4862 }
4863
4864 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4865 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4866 {
4867 CPUX86State *env = &cpu->env;
4868 FeatureWordInfo *fi = &feature_word_info[w];
4869 uint32_t eax = fi->cpuid.eax;
4870 uint32_t region = eax & 0xF0000000;
4871
4872 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
4873 if (!env->features[w]) {
4874 return;
4875 }
4876
4877 switch (region) {
4878 case 0x00000000:
4879 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4880 break;
4881 case 0x80000000:
4882 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4883 break;
4884 case 0xC0000000:
4885 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4886 break;
4887 }
4888 }
4889
4890 /* Calculate XSAVE components based on the configured CPU feature flags */
4891 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4892 {
4893 CPUX86State *env = &cpu->env;
4894 int i;
4895 uint64_t mask;
4896
4897 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4898 return;
4899 }
4900
4901 mask = 0;
4902 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4903 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4904 if (env->features[esa->feature] & esa->bits) {
4905 mask |= (1ULL << i);
4906 }
4907 }
4908
4909 env->features[FEAT_XSAVE_COMP_LO] = mask;
4910 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4911 }
4912
4913 /***** Steps involved on loading and filtering CPUID data
4914 *
4915 * When initializing and realizing a CPU object, the steps
4916 * involved in setting up CPUID data are:
4917 *
4918 * 1) Loading CPU model definition (X86CPUDefinition). This is
4919 * implemented by x86_cpu_load_model() and should be completely
4920 * transparent, as it is done automatically by instance_init.
4921 * No code should need to look at X86CPUDefinition structs
4922 * outside instance_init.
4923 *
4924 * 2) CPU expansion. This is done by realize before CPUID
4925 * filtering, and will make sure host/accelerator data is
4926 * loaded for CPU models that depend on host capabilities
4927 * (e.g. "host"). Done by x86_cpu_expand_features().
4928 *
4929 * 3) CPUID filtering. This initializes extra data related to
4930 * CPUID, and checks if the host supports all capabilities
4931 * required by the CPU. Runnability of a CPU model is
4932 * determined at this step. Done by x86_cpu_filter_features().
4933 *
4934 * Some operations don't require all steps to be performed.
4935 * More precisely:
4936 *
4937 * - CPU instance creation (instance_init) will run only CPU
4938 * model loading. CPU expansion can't run at instance_init-time
4939 * because host/accelerator data may be not available yet.
4940 * - CPU realization will perform both CPU model expansion and CPUID
4941 * filtering, and return an error in case one of them fails.
4942 * - query-cpu-definitions needs to run all 3 steps. It needs
4943 * to run CPUID filtering, as the 'unavailable-features'
4944 * field is set based on the filtering results.
4945 * - The query-cpu-model-expansion QMP command only needs to run
4946 * CPU model loading and CPU expansion. It should not filter
4947 * any CPUID data based on host capabilities.
4948 */
4949
4950 /* Expand CPU configuration data, based on configured features
4951 * and host/accelerator capabilities when appropriate.
4952 */
4953 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4954 {
4955 CPUX86State *env = &cpu->env;
4956 FeatureWord w;
4957 GList *l;
4958 Error *local_err = NULL;
4959
4960 /*TODO: Now cpu->max_features doesn't overwrite features
4961 * set using QOM properties, and we can convert
4962 * plus_features & minus_features to global properties
4963 * inside x86_cpu_parse_featurestr() too.
4964 */
4965 if (cpu->max_features) {
4966 for (w = 0; w < FEATURE_WORDS; w++) {
4967 /* Override only features that weren't set explicitly
4968 * by the user.
4969 */
4970 env->features[w] |=
4971 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4972 ~env->user_features[w] & \
4973 ~feature_word_info[w].no_autoenable_flags;
4974 }
4975 }
4976
4977 for (l = plus_features; l; l = l->next) {
4978 const char *prop = l->data;
4979 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4980 if (local_err) {
4981 goto out;
4982 }
4983 }
4984
4985 for (l = minus_features; l; l = l->next) {
4986 const char *prop = l->data;
4987 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4988 if (local_err) {
4989 goto out;
4990 }
4991 }
4992
4993 if (!kvm_enabled() || !cpu->expose_kvm) {
4994 env->features[FEAT_KVM] = 0;
4995 }
4996
4997 x86_cpu_enable_xsave_components(cpu);
4998
4999 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5000 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5001 if (cpu->full_cpuid_auto_level) {
5002 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5003 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5004 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5005 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5006 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5007 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5008 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5009 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5010 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5011 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5012 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5013
5014 /* Intel Processor Trace requires CPUID[0x14] */
5015 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5016 kvm_enabled() && cpu->intel_pt_auto_level) {
5017 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5018 }
5019
5020 /* CPU topology with multi-dies support requires CPUID[0x1F] */
5021 if (env->nr_dies > 1) {
5022 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
5023 }
5024
5025 /* SVM requires CPUID[0x8000000A] */
5026 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5027 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5028 }
5029
5030 /* SEV requires CPUID[0x8000001F] */
5031 if (sev_enabled()) {
5032 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5033 }
5034 }
5035
5036 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5037 if (env->cpuid_level == UINT32_MAX) {
5038 env->cpuid_level = env->cpuid_min_level;
5039 }
5040 if (env->cpuid_xlevel == UINT32_MAX) {
5041 env->cpuid_xlevel = env->cpuid_min_xlevel;
5042 }
5043 if (env->cpuid_xlevel2 == UINT32_MAX) {
5044 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5045 }
5046
5047 out:
5048 if (local_err != NULL) {
5049 error_propagate(errp, local_err);
5050 }
5051 }
5052
5053 /*
5054 * Finishes initialization of CPUID data, filters CPU feature
5055 * words based on host availability of each feature.
5056 *
5057 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5058 */
5059 static int x86_cpu_filter_features(X86CPU *cpu)
5060 {
5061 CPUX86State *env = &cpu->env;
5062 FeatureWord w;
5063 int rv = 0;
5064
5065 for (w = 0; w < FEATURE_WORDS; w++) {
5066 uint32_t host_feat =
5067 x86_cpu_get_supported_feature_word(w, false);
5068 uint32_t requested_features = env->features[w];
5069 uint32_t available_features = requested_features & host_feat;
5070 if (!cpu->force_features) {
5071 env->features[w] = available_features;
5072 }
5073 cpu->filtered_features[w] = requested_features & ~available_features;
5074 if (cpu->filtered_features[w]) {
5075 rv = 1;
5076 }
5077 }
5078
5079 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5080 kvm_enabled()) {
5081 KVMState *s = CPU(cpu)->kvm_state;
5082 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5083 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5084 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5085 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5086 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5087
5088 if (!eax_0 ||
5089 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5090 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5091 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5092 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5093 INTEL_PT_ADDR_RANGES_NUM) ||
5094 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5095 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5096 (ecx_0 & INTEL_PT_IP_LIP)) {
5097 /*
5098 * Processor Trace capabilities aren't configurable, so if the
5099 * host can't emulate the capabilities we report on
5100 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5101 */
5102 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5103 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5104 rv = 1;
5105 }
5106 }
5107
5108 return rv;
5109 }
5110
5111 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5112 {
5113 CPUState *cs = CPU(dev);
5114 X86CPU *cpu = X86_CPU(dev);
5115 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5116 CPUX86State *env = &cpu->env;
5117 Error *local_err = NULL;
5118 static bool ht_warned;
5119
5120 if (xcc->host_cpuid_required) {
5121 if (!accel_uses_host_cpuid()) {
5122 char *name = x86_cpu_class_get_model_name(xcc);
5123 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5124 g_free(name);
5125 goto out;
5126 }
5127
5128 if (enable_cpu_pm) {
5129 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5130 &cpu->mwait.ecx, &cpu->mwait.edx);
5131 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5132 }
5133 }
5134
5135 /* mwait extended info: needed for Core compatibility */
5136 /* We always wake on interrupt even if host does not have the capability */
5137 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5138
5139 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5140 error_setg(errp, "apic-id property was not initialized properly");
5141 return;
5142 }
5143
5144 x86_cpu_expand_features(cpu, &local_err);
5145 if (local_err) {
5146 goto out;
5147 }
5148
5149 if (x86_cpu_filter_features(cpu) &&
5150 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5151 x86_cpu_report_filtered_features(cpu);
5152 if (cpu->enforce_cpuid) {
5153 error_setg(&local_err,
5154 accel_uses_host_cpuid() ?
5155 "Host doesn't support requested features" :
5156 "TCG doesn't support requested features");
5157 goto out;
5158 }
5159 }
5160
5161 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5162 * CPUID[1].EDX.
5163 */
5164 if (IS_AMD_CPU(env)) {
5165 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5166 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5167 & CPUID_EXT2_AMD_ALIASES);
5168 }
5169
5170 /* For 64bit systems think about the number of physical bits to present.
5171 * ideally this should be the same as the host; anything other than matching
5172 * the host can cause incorrect guest behaviour.
5173 * QEMU used to pick the magic value of 40 bits that corresponds to
5174 * consumer AMD devices but nothing else.
5175 */
5176 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5177 if (accel_uses_host_cpuid()) {
5178 uint32_t host_phys_bits = x86_host_phys_bits();
5179 static bool warned;
5180
5181 /* Print a warning if the user set it to a value that's not the
5182 * host value.
5183 */
5184 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5185 !warned) {
5186 warn_report("Host physical bits (%u)"
5187 " does not match phys-bits property (%u)",
5188 host_phys_bits, cpu->phys_bits);
5189 warned = true;
5190 }
5191
5192 if (cpu->host_phys_bits) {
5193 /* The user asked for us to use the host physical bits */
5194 cpu->phys_bits = host_phys_bits;
5195 if (cpu->host_phys_bits_limit &&
5196 cpu->phys_bits > cpu->host_phys_bits_limit) {
5197 cpu->phys_bits = cpu->host_phys_bits_limit;
5198 }
5199 }
5200
5201 if (cpu->phys_bits &&
5202 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5203 cpu->phys_bits < 32)) {
5204 error_setg(errp, "phys-bits should be between 32 and %u "
5205 " (but is %u)",
5206 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5207 return;
5208 }
5209 } else {
5210 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5211 error_setg(errp, "TCG only supports phys-bits=%u",
5212 TCG_PHYS_ADDR_BITS);
5213 return;
5214 }
5215 }
5216 /* 0 means it was not explicitly set by the user (or by machine
5217 * compat_props or by the host code above). In this case, the default
5218 * is the value used by TCG (40).
5219 */
5220 if (cpu->phys_bits == 0) {
5221 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5222 }
5223 } else {
5224 /* For 32 bit systems don't use the user set value, but keep
5225 * phys_bits consistent with what we tell the guest.
5226 */
5227 if (cpu->phys_bits != 0) {
5228 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5229 return;
5230 }
5231
5232 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5233 cpu->phys_bits = 36;
5234 } else {
5235 cpu->phys_bits = 32;
5236 }
5237 }
5238
5239 /* Cache information initialization */
5240 if (!cpu->legacy_cache) {
5241 if (!xcc->model || !xcc->model->cpudef->cache_info) {
5242 char *name = x86_cpu_class_get_model_name(xcc);
5243 error_setg(errp,
5244 "CPU model '%s' doesn't support legacy-cache=off", name);
5245 g_free(name);
5246 return;
5247 }
5248 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5249 *xcc->model->cpudef->cache_info;
5250 } else {
5251 /* Build legacy cache information */
5252 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5253 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5254 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5255 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5256
5257 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5258 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5259 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5260 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5261
5262 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5263 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5264 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5265 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5266 }
5267
5268
5269 cpu_exec_realizefn(cs, &local_err);
5270 if (local_err != NULL) {
5271 error_propagate(errp, local_err);
5272 return;
5273 }
5274
5275 #ifndef CONFIG_USER_ONLY
5276 MachineState *ms = MACHINE(qdev_get_machine());
5277 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5278
5279 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
5280 x86_cpu_apic_create(cpu, &local_err);
5281 if (local_err != NULL) {
5282 goto out;
5283 }
5284 }
5285 #endif
5286
5287 mce_init(cpu);
5288
5289 #ifndef CONFIG_USER_ONLY
5290 if (tcg_enabled()) {
5291 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5292 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5293
5294 /* Outer container... */
5295 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5296 memory_region_set_enabled(cpu->cpu_as_root, true);
5297
5298 /* ... with two regions inside: normal system memory with low
5299 * priority, and...
5300 */
5301 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5302 get_system_memory(), 0, ~0ull);
5303 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5304 memory_region_set_enabled(cpu->cpu_as_mem, true);
5305
5306 cs->num_ases = 2;
5307 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5308 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5309
5310 /* ... SMRAM with higher priority, linked from /machine/smram. */
5311 cpu->machine_done.notify = x86_cpu_machine_done;
5312 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5313 }
5314 #endif
5315
5316 qemu_init_vcpu(cs);
5317
5318 /*
5319 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5320 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5321 * based on inputs (sockets,cores,threads), it is still better to give
5322 * users a warning.
5323 *
5324 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5325 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5326 */
5327 if (IS_AMD_CPU(env) &&
5328 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5329 cs->nr_threads > 1 && !ht_warned) {
5330 warn_report("This family of AMD CPU doesn't support "
5331 "hyperthreading(%d)",
5332 cs->nr_threads);
5333 error_printf("Please configure -smp options properly"
5334 " or try enabling topoext feature.\n");
5335 ht_warned = true;
5336 }
5337
5338 x86_cpu_apic_realize(cpu, &local_err);
5339 if (local_err != NULL) {
5340 goto out;
5341 }
5342 cpu_reset(cs);
5343
5344 xcc->parent_realize(dev, &local_err);
5345
5346 out:
5347 if (local_err != NULL) {
5348 error_propagate(errp, local_err);
5349 return;
5350 }
5351 }
5352
5353 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5354 {
5355 X86CPU *cpu = X86_CPU(dev);
5356 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5357 Error *local_err = NULL;
5358
5359 #ifndef CONFIG_USER_ONLY
5360 cpu_remove_sync(CPU(dev));
5361 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5362 #endif
5363
5364 if (cpu->apic_state) {
5365 object_unparent(OBJECT(cpu->apic_state));
5366 cpu->apic_state = NULL;
5367 }
5368
5369 xcc->parent_unrealize(dev, &local_err);
5370 if (local_err != NULL) {
5371 error_propagate(errp, local_err);
5372 return;
5373 }
5374 }
5375
5376 typedef struct BitProperty {
5377 FeatureWord w;
5378 uint32_t mask;
5379 } BitProperty;
5380
5381 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5382 void *opaque, Error **errp)
5383 {
5384 X86CPU *cpu = X86_CPU(obj);
5385 BitProperty *fp = opaque;
5386 uint32_t f = cpu->env.features[fp->w];
5387 bool value = (f & fp->mask) == fp->mask;
5388 visit_type_bool(v, name, &value, errp);
5389 }
5390
5391 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5392 void *opaque, Error **errp)
5393 {
5394 DeviceState *dev = DEVICE(obj);
5395 X86CPU *cpu = X86_CPU(obj);
5396 BitProperty *fp = opaque;
5397 Error *local_err = NULL;
5398 bool value;
5399
5400 if (dev->realized) {
5401 qdev_prop_set_after_realize(dev, name, errp);
5402 return;
5403 }
5404
5405 visit_type_bool(v, name, &value, &local_err);
5406 if (local_err) {
5407 error_propagate(errp, local_err);
5408 return;
5409 }
5410
5411 if (value) {
5412 cpu->env.features[fp->w] |= fp->mask;
5413 } else {
5414 cpu->env.features[fp->w] &= ~fp->mask;
5415 }
5416 cpu->env.user_features[fp->w] |= fp->mask;
5417 }
5418
5419 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5420 void *opaque)
5421 {
5422 BitProperty *prop = opaque;
5423 g_free(prop);
5424 }
5425
5426 /* Register a boolean property to get/set a single bit in a uint32_t field.
5427 *
5428 * The same property name can be registered multiple times to make it affect
5429 * multiple bits in the same FeatureWord. In that case, the getter will return
5430 * true only if all bits are set.
5431 */
5432 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5433 const char *prop_name,
5434 FeatureWord w,
5435 int bitnr)
5436 {
5437 BitProperty *fp;
5438 ObjectProperty *op;
5439 uint32_t mask = (1UL << bitnr);
5440
5441 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5442 if (op) {
5443 fp = op->opaque;
5444 assert(fp->w == w);
5445 fp->mask |= mask;
5446 } else {
5447 fp = g_new0(BitProperty, 1);
5448 fp->w = w;
5449 fp->mask = mask;
5450 object_property_add(OBJECT(cpu), prop_name, "bool",
5451 x86_cpu_get_bit_prop,
5452 x86_cpu_set_bit_prop,
5453 x86_cpu_release_bit_prop, fp, &error_abort);
5454 }
5455 }
5456
5457 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5458 FeatureWord w,
5459 int bitnr)
5460 {
5461 FeatureWordInfo *fi = &feature_word_info[w];
5462 const char *name = fi->feat_names[bitnr];
5463
5464 if (!name) {
5465 return;
5466 }
5467
5468 /* Property names should use "-" instead of "_".
5469 * Old names containing underscores are registered as aliases
5470 * using object_property_add_alias()
5471 */
5472 assert(!strchr(name, '_'));
5473 /* aliases don't use "|" delimiters anymore, they are registered
5474 * manually using object_property_add_alias() */
5475 assert(!strchr(name, '|'));
5476 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5477 }
5478
5479 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5480 {
5481 X86CPU *cpu = X86_CPU(cs);
5482 CPUX86State *env = &cpu->env;
5483 GuestPanicInformation *panic_info = NULL;
5484
5485 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5486 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5487
5488 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5489
5490 assert(HV_CRASH_PARAMS >= 5);
5491 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5492 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5493 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5494 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5495 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5496 }
5497
5498 return panic_info;
5499 }
5500 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5501 const char *name, void *opaque,
5502 Error **errp)
5503 {
5504 CPUState *cs = CPU(obj);
5505 GuestPanicInformation *panic_info;
5506
5507 if (!cs->crash_occurred) {
5508 error_setg(errp, "No crash occured");
5509 return;
5510 }
5511
5512 panic_info = x86_cpu_get_crash_info(cs);
5513 if (panic_info == NULL) {
5514 error_setg(errp, "No crash information");
5515 return;
5516 }
5517
5518 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5519 errp);
5520 qapi_free_GuestPanicInformation(panic_info);
5521 }
5522
5523 static void x86_cpu_initfn(Object *obj)
5524 {
5525 X86CPU *cpu = X86_CPU(obj);
5526 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5527 CPUX86State *env = &cpu->env;
5528 FeatureWord w;
5529
5530 env->nr_dies = 1;
5531 cpu_set_cpustate_pointers(cpu);
5532
5533 object_property_add(obj, "family", "int",
5534 x86_cpuid_version_get_family,
5535 x86_cpuid_version_set_family, NULL, NULL, NULL);
5536 object_property_add(obj, "model", "int",
5537 x86_cpuid_version_get_model,
5538 x86_cpuid_version_set_model, NULL, NULL, NULL);
5539 object_property_add(obj, "stepping", "int",
5540 x86_cpuid_version_get_stepping,
5541 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5542 object_property_add_str(obj, "vendor",
5543 x86_cpuid_get_vendor,
5544 x86_cpuid_set_vendor, NULL);
5545 object_property_add_str(obj, "model-id",
5546 x86_cpuid_get_model_id,
5547 x86_cpuid_set_model_id, NULL);
5548 object_property_add(obj, "tsc-frequency", "int",
5549 x86_cpuid_get_tsc_freq,
5550 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5551 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5552 x86_cpu_get_feature_words,
5553 NULL, NULL, (void *)env->features, NULL);
5554 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5555 x86_cpu_get_feature_words,
5556 NULL, NULL, (void *)cpu->filtered_features, NULL);
5557 /*
5558 * The "unavailable-features" property has the same semantics as
5559 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
5560 * QMP command: they list the features that would have prevented the
5561 * CPU from running if the "enforce" flag was set.
5562 */
5563 object_property_add(obj, "unavailable-features", "strList",
5564 x86_cpu_get_unavailable_features,
5565 NULL, NULL, NULL, &error_abort);
5566
5567 object_property_add(obj, "crash-information", "GuestPanicInformation",
5568 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5569
5570 for (w = 0; w < FEATURE_WORDS; w++) {
5571 int bitnr;
5572
5573 for (bitnr = 0; bitnr < 32; bitnr++) {
5574 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5575 }
5576 }
5577
5578 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5579 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5580 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5581 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5582 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5583 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5584 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5585
5586 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5587 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5588 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5589 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5590 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5591 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5592 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5593 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5594 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5595 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5596 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5597 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5598 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5599 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5600 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5601 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5602 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5603 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5604 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5605 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5606 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5607
5608 if (xcc->model) {
5609 x86_cpu_load_model(cpu, xcc->model, &error_abort);
5610 }
5611 }
5612
5613 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5614 {
5615 X86CPU *cpu = X86_CPU(cs);
5616
5617 return cpu->apic_id;
5618 }
5619
5620 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5621 {
5622 X86CPU *cpu = X86_CPU(cs);
5623
5624 return cpu->env.cr[0] & CR0_PG_MASK;
5625 }
5626
5627 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5628 {
5629 X86CPU *cpu = X86_CPU(cs);
5630
5631 cpu->env.eip = value;
5632 }
5633
5634 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5635 {
5636 X86CPU *cpu = X86_CPU(cs);
5637
5638 cpu->env.eip = tb->pc - tb->cs_base;
5639 }
5640
5641 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5642 {
5643 X86CPU *cpu = X86_CPU(cs);
5644 CPUX86State *env = &cpu->env;
5645
5646 #if !defined(CONFIG_USER_ONLY)
5647 if (interrupt_request & CPU_INTERRUPT_POLL) {
5648 return CPU_INTERRUPT_POLL;
5649 }
5650 #endif
5651 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5652 return CPU_INTERRUPT_SIPI;
5653 }
5654
5655 if (env->hflags2 & HF2_GIF_MASK) {
5656 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5657 !(env->hflags & HF_SMM_MASK)) {
5658 return CPU_INTERRUPT_SMI;
5659 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5660 !(env->hflags2 & HF2_NMI_MASK)) {
5661 return CPU_INTERRUPT_NMI;
5662 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5663 return CPU_INTERRUPT_MCE;
5664 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5665 (((env->hflags2 & HF2_VINTR_MASK) &&
5666 (env->hflags2 & HF2_HIF_MASK)) ||
5667 (!(env->hflags2 & HF2_VINTR_MASK) &&
5668 (env->eflags & IF_MASK &&
5669 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5670 return CPU_INTERRUPT_HARD;
5671 #if !defined(CONFIG_USER_ONLY)
5672 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5673 (env->eflags & IF_MASK) &&
5674 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5675 return CPU_INTERRUPT_VIRQ;
5676 #endif
5677 }
5678 }
5679
5680 return 0;
5681 }
5682
5683 static bool x86_cpu_has_work(CPUState *cs)
5684 {
5685 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5686 }
5687
5688 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5689 {
5690 X86CPU *cpu = X86_CPU(cs);
5691 CPUX86State *env = &cpu->env;
5692
5693 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5694 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5695 : bfd_mach_i386_i8086);
5696 info->print_insn = print_insn_i386;
5697
5698 info->cap_arch = CS_ARCH_X86;
5699 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5700 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5701 : CS_MODE_16);
5702 info->cap_insn_unit = 1;
5703 info->cap_insn_split = 8;
5704 }
5705
5706 void x86_update_hflags(CPUX86State *env)
5707 {
5708 uint32_t hflags;
5709 #define HFLAG_COPY_MASK \
5710 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5711 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5712 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5713 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5714
5715 hflags = env->hflags & HFLAG_COPY_MASK;
5716 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5717 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5718 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5719 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5720 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5721
5722 if (env->cr[4] & CR4_OSFXSR_MASK) {
5723 hflags |= HF_OSFXSR_MASK;
5724 }
5725
5726 if (env->efer & MSR_EFER_LMA) {
5727 hflags |= HF_LMA_MASK;
5728 }
5729
5730 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5731 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5732 } else {
5733 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5734 (DESC_B_SHIFT - HF_CS32_SHIFT);
5735 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5736 (DESC_B_SHIFT - HF_SS32_SHIFT);
5737 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5738 !(hflags & HF_CS32_MASK)) {
5739 hflags |= HF_ADDSEG_MASK;
5740 } else {
5741 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5742 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5743 }
5744 }
5745 env->hflags = hflags;
5746 }
5747
5748 static Property x86_cpu_properties[] = {
5749 #ifdef CONFIG_USER_ONLY
5750 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5751 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5752 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5753 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5754 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
5755 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5756 #else
5757 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5758 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5759 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5760 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
5761 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5762 #endif
5763 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5764 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5765
5766 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
5767 HYPERV_SPINLOCK_NEVER_RETRY),
5768 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
5769 HYPERV_FEAT_RELAXED, 0),
5770 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
5771 HYPERV_FEAT_VAPIC, 0),
5772 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
5773 HYPERV_FEAT_TIME, 0),
5774 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
5775 HYPERV_FEAT_CRASH, 0),
5776 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
5777 HYPERV_FEAT_RESET, 0),
5778 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
5779 HYPERV_FEAT_VPINDEX, 0),
5780 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
5781 HYPERV_FEAT_RUNTIME, 0),
5782 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
5783 HYPERV_FEAT_SYNIC, 0),
5784 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
5785 HYPERV_FEAT_STIMER, 0),
5786 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
5787 HYPERV_FEAT_FREQUENCIES, 0),
5788 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
5789 HYPERV_FEAT_REENLIGHTENMENT, 0),
5790 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
5791 HYPERV_FEAT_TLBFLUSH, 0),
5792 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
5793 HYPERV_FEAT_EVMCS, 0),
5794 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
5795 HYPERV_FEAT_IPI, 0),
5796 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
5797 HYPERV_FEAT_STIMER_DIRECT, 0),
5798 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
5799
5800 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5801 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5802 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
5803 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5804 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5805 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5806 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
5807 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5808 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5809 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5810 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5811 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5812 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5813 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5814 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5815 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5816 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5817 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5818 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5819 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5820 false),
5821 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5822 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5823 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5824 true),
5825 /*
5826 * lecacy_cache defaults to true unless the CPU model provides its
5827 * own cache information (see x86_cpu_load_def()).
5828 */
5829 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5830
5831 /*
5832 * From "Requirements for Implementing the Microsoft
5833 * Hypervisor Interface":
5834 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5835 *
5836 * "Starting with Windows Server 2012 and Windows 8, if
5837 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5838 * the hypervisor imposes no specific limit to the number of VPs.
5839 * In this case, Windows Server 2012 guest VMs may use more than
5840 * 64 VPs, up to the maximum supported number of processors applicable
5841 * to the specific Windows version being used."
5842 */
5843 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5844 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
5845 false),
5846 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
5847 true),
5848 DEFINE_PROP_END_OF_LIST()
5849 };
5850
5851 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5852 {
5853 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5854 CPUClass *cc = CPU_CLASS(oc);
5855 DeviceClass *dc = DEVICE_CLASS(oc);
5856
5857 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5858 &xcc->parent_realize);
5859 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5860 &xcc->parent_unrealize);
5861 dc->props = x86_cpu_properties;
5862
5863 xcc->parent_reset = cc->reset;
5864 cc->reset = x86_cpu_reset;
5865 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5866
5867 cc->class_by_name = x86_cpu_class_by_name;
5868 cc->parse_features = x86_cpu_parse_featurestr;
5869 cc->has_work = x86_cpu_has_work;
5870 #ifdef CONFIG_TCG
5871 cc->do_interrupt = x86_cpu_do_interrupt;
5872 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5873 #endif
5874 cc->dump_state = x86_cpu_dump_state;
5875 cc->get_crash_info = x86_cpu_get_crash_info;
5876 cc->set_pc = x86_cpu_set_pc;
5877 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5878 cc->gdb_read_register = x86_cpu_gdb_read_register;
5879 cc->gdb_write_register = x86_cpu_gdb_write_register;
5880 cc->get_arch_id = x86_cpu_get_arch_id;
5881 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5882 #ifndef CONFIG_USER_ONLY
5883 cc->asidx_from_attrs = x86_asidx_from_attrs;
5884 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5885 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5886 cc->write_elf64_note = x86_cpu_write_elf64_note;
5887 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5888 cc->write_elf32_note = x86_cpu_write_elf32_note;
5889 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5890 cc->vmsd = &vmstate_x86_cpu;
5891 #endif
5892 cc->gdb_arch_name = x86_gdb_arch_name;
5893 #ifdef TARGET_X86_64
5894 cc->gdb_core_xml_file = "i386-64bit.xml";
5895 cc->gdb_num_core_regs = 66;
5896 #else
5897 cc->gdb_core_xml_file = "i386-32bit.xml";
5898 cc->gdb_num_core_regs = 50;
5899 #endif
5900 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5901 cc->debug_excp_handler = breakpoint_handler;
5902 #endif
5903 cc->cpu_exec_enter = x86_cpu_exec_enter;
5904 cc->cpu_exec_exit = x86_cpu_exec_exit;
5905 #ifdef CONFIG_TCG
5906 cc->tcg_initialize = tcg_x86_init;
5907 cc->tlb_fill = x86_cpu_tlb_fill;
5908 #endif
5909 cc->disas_set_info = x86_disas_set_info;
5910
5911 dc->user_creatable = true;
5912 }
5913
5914 static const TypeInfo x86_cpu_type_info = {
5915 .name = TYPE_X86_CPU,
5916 .parent = TYPE_CPU,
5917 .instance_size = sizeof(X86CPU),
5918 .instance_init = x86_cpu_initfn,
5919 .abstract = true,
5920 .class_size = sizeof(X86CPUClass),
5921 .class_init = x86_cpu_common_class_init,
5922 };
5923
5924
5925 /* "base" CPU model, used by query-cpu-model-expansion */
5926 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5927 {
5928 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5929
5930 xcc->static_model = true;
5931 xcc->migration_safe = true;
5932 xcc->model_description = "base CPU model type with no features enabled";
5933 xcc->ordering = 8;
5934 }
5935
5936 static const TypeInfo x86_base_cpu_type_info = {
5937 .name = X86_CPU_TYPE_NAME("base"),
5938 .parent = TYPE_X86_CPU,
5939 .class_init = x86_cpu_base_class_init,
5940 };
5941
5942 static void x86_cpu_register_types(void)
5943 {
5944 int i;
5945
5946 type_register_static(&x86_cpu_type_info);
5947 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5948 x86_register_cpudef_types(&builtin_x86_defs[i]);
5949 }
5950 type_register_static(&max_x86_cpu_type_info);
5951 type_register_static(&x86_base_cpu_type_info);
5952 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5953 type_register_static(&host_x86_cpu_type_info);
5954 #endif
5955 }
5956
5957 type_init(x86_cpu_register_types)