]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
Merge remote-tracking branch 'remotes/vivier2/tags/trivial-patches-pull-request'...
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24
25 #include "cpu.h"
26 #include "exec/exec-all.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/hvf.h"
29 #include "sysemu/cpus.h"
30 #include "kvm_i386.h"
31 #include "sev_i386.h"
32
33 #include "qemu/error-report.h"
34 #include "qemu/option.h"
35 #include "qemu/config-file.h"
36 #include "qapi/error.h"
37 #include "qapi/qapi-visit-misc.h"
38 #include "qapi/qapi-visit-run-state.h"
39 #include "qapi/qmp/qdict.h"
40 #include "qapi/qmp/qerror.h"
41 #include "qapi/visitor.h"
42 #include "qom/qom-qobject.h"
43 #include "sysemu/arch_init.h"
44
45 #include "standard-headers/asm-x86/kvm_para.h"
46
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
56
57 #include "disas/capstone.h"
58
59 /* Helpers for building CPUID[2] descriptors: */
60
61 struct CPUID2CacheDescriptorInfo {
62 enum CacheType type;
63 int level;
64 int size;
65 int line_size;
66 int associativity;
67 };
68
69 /*
70 * Known CPUID 2 cache descriptors.
71 * From Intel SDM Volume 2A, CPUID instruction
72 */
73 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
74 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
75 .associativity = 4, .line_size = 32, },
76 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
77 .associativity = 4, .line_size = 32, },
78 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
79 .associativity = 4, .line_size = 64, },
80 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
81 .associativity = 2, .line_size = 32, },
82 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
83 .associativity = 4, .line_size = 32, },
84 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
85 .associativity = 4, .line_size = 64, },
86 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
87 .associativity = 6, .line_size = 64, },
88 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
89 .associativity = 2, .line_size = 64, },
90 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
91 .associativity = 8, .line_size = 64, },
92 /* lines per sector is not supported cpuid2_cache_descriptor(),
93 * so descriptors 0x22, 0x23 are not included
94 */
95 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
96 .associativity = 16, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x25, 0x20 are not included
99 */
100 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
101 .associativity = 8, .line_size = 64, },
102 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
103 .associativity = 8, .line_size = 64, },
104 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
105 .associativity = 4, .line_size = 32, },
106 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
107 .associativity = 4, .line_size = 32, },
108 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
109 .associativity = 4, .line_size = 32, },
110 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
111 .associativity = 4, .line_size = 32, },
112 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
113 .associativity = 4, .line_size = 32, },
114 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
115 .associativity = 4, .line_size = 64, },
116 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
117 .associativity = 8, .line_size = 64, },
118 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
119 .associativity = 12, .line_size = 64, },
120 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
121 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
122 .associativity = 12, .line_size = 64, },
123 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
124 .associativity = 16, .line_size = 64, },
125 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
126 .associativity = 12, .line_size = 64, },
127 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
128 .associativity = 16, .line_size = 64, },
129 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
130 .associativity = 24, .line_size = 64, },
131 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
132 .associativity = 8, .line_size = 64, },
133 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
134 .associativity = 4, .line_size = 64, },
135 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
136 .associativity = 4, .line_size = 64, },
137 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
138 .associativity = 4, .line_size = 64, },
139 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
140 .associativity = 4, .line_size = 64, },
141 /* lines per sector is not supported cpuid2_cache_descriptor(),
142 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
143 */
144 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
145 .associativity = 8, .line_size = 64, },
146 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
147 .associativity = 2, .line_size = 64, },
148 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
149 .associativity = 8, .line_size = 64, },
150 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
151 .associativity = 8, .line_size = 32, },
152 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
153 .associativity = 8, .line_size = 32, },
154 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
155 .associativity = 8, .line_size = 32, },
156 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
157 .associativity = 8, .line_size = 32, },
158 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
159 .associativity = 4, .line_size = 64, },
160 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
161 .associativity = 8, .line_size = 64, },
162 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
163 .associativity = 4, .line_size = 64, },
164 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
165 .associativity = 4, .line_size = 64, },
166 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
167 .associativity = 4, .line_size = 64, },
168 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
169 .associativity = 8, .line_size = 64, },
170 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
171 .associativity = 8, .line_size = 64, },
172 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
173 .associativity = 8, .line_size = 64, },
174 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
175 .associativity = 12, .line_size = 64, },
176 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
177 .associativity = 12, .line_size = 64, },
178 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
179 .associativity = 12, .line_size = 64, },
180 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
181 .associativity = 16, .line_size = 64, },
182 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
183 .associativity = 16, .line_size = 64, },
184 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
185 .associativity = 16, .line_size = 64, },
186 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
187 .associativity = 24, .line_size = 64, },
188 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
189 .associativity = 24, .line_size = 64, },
190 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
191 .associativity = 24, .line_size = 64, },
192 };
193
194 /*
195 * "CPUID leaf 2 does not report cache descriptor information,
196 * use CPUID leaf 4 to query cache parameters"
197 */
198 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
199
200 /*
201 * Return a CPUID 2 cache descriptor for a given cache.
202 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
203 */
204 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
205 {
206 int i;
207
208 assert(cache->size > 0);
209 assert(cache->level > 0);
210 assert(cache->line_size > 0);
211 assert(cache->associativity > 0);
212 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
213 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
214 if (d->level == cache->level && d->type == cache->type &&
215 d->size == cache->size && d->line_size == cache->line_size &&
216 d->associativity == cache->associativity) {
217 return i;
218 }
219 }
220
221 return CACHE_DESCRIPTOR_UNAVAILABLE;
222 }
223
224 /* CPUID Leaf 4 constants: */
225
226 /* EAX: */
227 #define CACHE_TYPE_D 1
228 #define CACHE_TYPE_I 2
229 #define CACHE_TYPE_UNIFIED 3
230
231 #define CACHE_LEVEL(l) (l << 5)
232
233 #define CACHE_SELF_INIT_LEVEL (1 << 8)
234
235 /* EDX: */
236 #define CACHE_NO_INVD_SHARING (1 << 0)
237 #define CACHE_INCLUSIVE (1 << 1)
238 #define CACHE_COMPLEX_IDX (1 << 2)
239
240 /* Encode CacheType for CPUID[4].EAX */
241 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
242 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
243 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
244 0 /* Invalid value */)
245
246
247 /* Encode cache info for CPUID[4] */
248 static void encode_cache_cpuid4(CPUCacheInfo *cache,
249 int num_apic_ids, int num_cores,
250 uint32_t *eax, uint32_t *ebx,
251 uint32_t *ecx, uint32_t *edx)
252 {
253 assert(cache->size == cache->line_size * cache->associativity *
254 cache->partitions * cache->sets);
255
256 assert(num_apic_ids > 0);
257 *eax = CACHE_TYPE(cache->type) |
258 CACHE_LEVEL(cache->level) |
259 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
260 ((num_cores - 1) << 26) |
261 ((num_apic_ids - 1) << 14);
262
263 assert(cache->line_size > 0);
264 assert(cache->partitions > 0);
265 assert(cache->associativity > 0);
266 /* We don't implement fully-associative caches */
267 assert(cache->associativity < cache->sets);
268 *ebx = (cache->line_size - 1) |
269 ((cache->partitions - 1) << 12) |
270 ((cache->associativity - 1) << 22);
271
272 assert(cache->sets > 0);
273 *ecx = cache->sets - 1;
274
275 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
276 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
277 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
278 }
279
280 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
281 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
282 {
283 assert(cache->size % 1024 == 0);
284 assert(cache->lines_per_tag > 0);
285 assert(cache->associativity > 0);
286 assert(cache->line_size > 0);
287 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
288 (cache->lines_per_tag << 8) | (cache->line_size);
289 }
290
291 #define ASSOC_FULL 0xFF
292
293 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
294 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
295 a == 2 ? 0x2 : \
296 a == 4 ? 0x4 : \
297 a == 8 ? 0x6 : \
298 a == 16 ? 0x8 : \
299 a == 32 ? 0xA : \
300 a == 48 ? 0xB : \
301 a == 64 ? 0xC : \
302 a == 96 ? 0xD : \
303 a == 128 ? 0xE : \
304 a == ASSOC_FULL ? 0xF : \
305 0 /* invalid value */)
306
307 /*
308 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
309 * @l3 can be NULL.
310 */
311 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
312 CPUCacheInfo *l3,
313 uint32_t *ecx, uint32_t *edx)
314 {
315 assert(l2->size % 1024 == 0);
316 assert(l2->associativity > 0);
317 assert(l2->lines_per_tag > 0);
318 assert(l2->line_size > 0);
319 *ecx = ((l2->size / 1024) << 16) |
320 (AMD_ENC_ASSOC(l2->associativity) << 12) |
321 (l2->lines_per_tag << 8) | (l2->line_size);
322
323 if (l3) {
324 assert(l3->size % (512 * 1024) == 0);
325 assert(l3->associativity > 0);
326 assert(l3->lines_per_tag > 0);
327 assert(l3->line_size > 0);
328 *edx = ((l3->size / (512 * 1024)) << 18) |
329 (AMD_ENC_ASSOC(l3->associativity) << 12) |
330 (l3->lines_per_tag << 8) | (l3->line_size);
331 } else {
332 *edx = 0;
333 }
334 }
335
336 /*
337 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
338 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
339 * Define the constants to build the cpu topology. Right now, TOPOEXT
340 * feature is enabled only on EPYC. So, these constants are based on
341 * EPYC supported configurations. We may need to handle the cases if
342 * these values change in future.
343 */
344 /* Maximum core complexes in a node */
345 #define MAX_CCX 2
346 /* Maximum cores in a core complex */
347 #define MAX_CORES_IN_CCX 4
348 /* Maximum cores in a node */
349 #define MAX_CORES_IN_NODE 8
350 /* Maximum nodes in a socket */
351 #define MAX_NODES_PER_SOCKET 4
352
353 /*
354 * Figure out the number of nodes required to build this config.
355 * Max cores in a node is 8
356 */
357 static int nodes_in_socket(int nr_cores)
358 {
359 int nodes;
360
361 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
362
363 /* Hardware does not support config with 3 nodes, return 4 in that case */
364 return (nodes == 3) ? 4 : nodes;
365 }
366
367 /*
368 * Decide the number of cores in a core complex with the given nr_cores using
369 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
370 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
371 * L3 cache is shared across all cores in a core complex. So, this will also
372 * tell us how many cores are sharing the L3 cache.
373 */
374 static int cores_in_core_complex(int nr_cores)
375 {
376 int nodes;
377
378 /* Check if we can fit all the cores in one core complex */
379 if (nr_cores <= MAX_CORES_IN_CCX) {
380 return nr_cores;
381 }
382 /* Get the number of nodes required to build this config */
383 nodes = nodes_in_socket(nr_cores);
384
385 /*
386 * Divide the cores accros all the core complexes
387 * Return rounded up value
388 */
389 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
390 }
391
392 /* Encode cache info for CPUID[8000001D] */
393 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
394 uint32_t *eax, uint32_t *ebx,
395 uint32_t *ecx, uint32_t *edx)
396 {
397 uint32_t l3_cores;
398 assert(cache->size == cache->line_size * cache->associativity *
399 cache->partitions * cache->sets);
400
401 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
402 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
403
404 /* L3 is shared among multiple cores */
405 if (cache->level == 3) {
406 l3_cores = cores_in_core_complex(cs->nr_cores);
407 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
408 } else {
409 *eax |= ((cs->nr_threads - 1) << 14);
410 }
411
412 assert(cache->line_size > 0);
413 assert(cache->partitions > 0);
414 assert(cache->associativity > 0);
415 /* We don't implement fully-associative caches */
416 assert(cache->associativity < cache->sets);
417 *ebx = (cache->line_size - 1) |
418 ((cache->partitions - 1) << 12) |
419 ((cache->associativity - 1) << 22);
420
421 assert(cache->sets > 0);
422 *ecx = cache->sets - 1;
423
424 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
425 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
426 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
427 }
428
429 /* Data structure to hold the configuration info for a given core index */
430 struct core_topology {
431 /* core complex id of the current core index */
432 int ccx_id;
433 /*
434 * Adjusted core index for this core in the topology
435 * This can be 0,1,2,3 with max 4 cores in a core complex
436 */
437 int core_id;
438 /* Node id for this core index */
439 int node_id;
440 /* Number of nodes in this config */
441 int num_nodes;
442 };
443
444 /*
445 * Build the configuration closely match the EPYC hardware. Using the EPYC
446 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
447 * right now. This could change in future.
448 * nr_cores : Total number of cores in the config
449 * core_id : Core index of the current CPU
450 * topo : Data structure to hold all the config info for this core index
451 */
452 static void build_core_topology(int nr_cores, int core_id,
453 struct core_topology *topo)
454 {
455 int nodes, cores_in_ccx;
456
457 /* First get the number of nodes required */
458 nodes = nodes_in_socket(nr_cores);
459
460 cores_in_ccx = cores_in_core_complex(nr_cores);
461
462 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
463 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
464 topo->core_id = core_id % cores_in_ccx;
465 topo->num_nodes = nodes;
466 }
467
468 /* Encode cache info for CPUID[8000001E] */
469 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
470 uint32_t *eax, uint32_t *ebx,
471 uint32_t *ecx, uint32_t *edx)
472 {
473 struct core_topology topo = {0};
474 unsigned long nodes;
475 int shift;
476
477 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
478 *eax = cpu->apic_id;
479 /*
480 * CPUID_Fn8000001E_EBX
481 * 31:16 Reserved
482 * 15:8 Threads per core (The number of threads per core is
483 * Threads per core + 1)
484 * 7:0 Core id (see bit decoding below)
485 * SMT:
486 * 4:3 node id
487 * 2 Core complex id
488 * 1:0 Core id
489 * Non SMT:
490 * 5:4 node id
491 * 3 Core complex id
492 * 1:0 Core id
493 */
494 if (cs->nr_threads - 1) {
495 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
496 (topo.ccx_id << 2) | topo.core_id;
497 } else {
498 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
499 }
500 /*
501 * CPUID_Fn8000001E_ECX
502 * 31:11 Reserved
503 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
504 * 7:0 Node id (see bit decoding below)
505 * 2 Socket id
506 * 1:0 Node id
507 */
508 if (topo.num_nodes <= 4) {
509 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
510 topo.node_id;
511 } else {
512 /*
513 * Node id fix up. Actual hardware supports up to 4 nodes. But with
514 * more than 32 cores, we may end up with more than 4 nodes.
515 * Node id is a combination of socket id and node id. Only requirement
516 * here is that this number should be unique accross the system.
517 * Shift the socket id to accommodate more nodes. We dont expect both
518 * socket id and node id to be big number at the same time. This is not
519 * an ideal config but we need to to support it. Max nodes we can have
520 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
521 * 5 bits for nodes. Find the left most set bit to represent the total
522 * number of nodes. find_last_bit returns last set bit(0 based). Left
523 * shift(+1) the socket id to represent all the nodes.
524 */
525 nodes = topo.num_nodes - 1;
526 shift = find_last_bit(&nodes, 8);
527 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
528 topo.node_id;
529 }
530 *edx = 0;
531 }
532
533 /*
534 * Definitions of the hardcoded cache entries we expose:
535 * These are legacy cache values. If there is a need to change any
536 * of these values please use builtin_x86_defs
537 */
538
539 /* L1 data cache: */
540 static CPUCacheInfo legacy_l1d_cache = {
541 .type = DATA_CACHE,
542 .level = 1,
543 .size = 32 * KiB,
544 .self_init = 1,
545 .line_size = 64,
546 .associativity = 8,
547 .sets = 64,
548 .partitions = 1,
549 .no_invd_sharing = true,
550 };
551
552 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
553 static CPUCacheInfo legacy_l1d_cache_amd = {
554 .type = DATA_CACHE,
555 .level = 1,
556 .size = 64 * KiB,
557 .self_init = 1,
558 .line_size = 64,
559 .associativity = 2,
560 .sets = 512,
561 .partitions = 1,
562 .lines_per_tag = 1,
563 .no_invd_sharing = true,
564 };
565
566 /* L1 instruction cache: */
567 static CPUCacheInfo legacy_l1i_cache = {
568 .type = INSTRUCTION_CACHE,
569 .level = 1,
570 .size = 32 * KiB,
571 .self_init = 1,
572 .line_size = 64,
573 .associativity = 8,
574 .sets = 64,
575 .partitions = 1,
576 .no_invd_sharing = true,
577 };
578
579 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
580 static CPUCacheInfo legacy_l1i_cache_amd = {
581 .type = INSTRUCTION_CACHE,
582 .level = 1,
583 .size = 64 * KiB,
584 .self_init = 1,
585 .line_size = 64,
586 .associativity = 2,
587 .sets = 512,
588 .partitions = 1,
589 .lines_per_tag = 1,
590 .no_invd_sharing = true,
591 };
592
593 /* Level 2 unified cache: */
594 static CPUCacheInfo legacy_l2_cache = {
595 .type = UNIFIED_CACHE,
596 .level = 2,
597 .size = 4 * MiB,
598 .self_init = 1,
599 .line_size = 64,
600 .associativity = 16,
601 .sets = 4096,
602 .partitions = 1,
603 .no_invd_sharing = true,
604 };
605
606 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
607 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
608 .type = UNIFIED_CACHE,
609 .level = 2,
610 .size = 2 * MiB,
611 .line_size = 64,
612 .associativity = 8,
613 };
614
615
616 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
617 static CPUCacheInfo legacy_l2_cache_amd = {
618 .type = UNIFIED_CACHE,
619 .level = 2,
620 .size = 512 * KiB,
621 .line_size = 64,
622 .lines_per_tag = 1,
623 .associativity = 16,
624 .sets = 512,
625 .partitions = 1,
626 };
627
628 /* Level 3 unified cache: */
629 static CPUCacheInfo legacy_l3_cache = {
630 .type = UNIFIED_CACHE,
631 .level = 3,
632 .size = 16 * MiB,
633 .line_size = 64,
634 .associativity = 16,
635 .sets = 16384,
636 .partitions = 1,
637 .lines_per_tag = 1,
638 .self_init = true,
639 .inclusive = true,
640 .complex_indexing = true,
641 };
642
643 /* TLB definitions: */
644
645 #define L1_DTLB_2M_ASSOC 1
646 #define L1_DTLB_2M_ENTRIES 255
647 #define L1_DTLB_4K_ASSOC 1
648 #define L1_DTLB_4K_ENTRIES 255
649
650 #define L1_ITLB_2M_ASSOC 1
651 #define L1_ITLB_2M_ENTRIES 255
652 #define L1_ITLB_4K_ASSOC 1
653 #define L1_ITLB_4K_ENTRIES 255
654
655 #define L2_DTLB_2M_ASSOC 0 /* disabled */
656 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
657 #define L2_DTLB_4K_ASSOC 4
658 #define L2_DTLB_4K_ENTRIES 512
659
660 #define L2_ITLB_2M_ASSOC 0 /* disabled */
661 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
662 #define L2_ITLB_4K_ASSOC 4
663 #define L2_ITLB_4K_ENTRIES 512
664
665 /* CPUID Leaf 0x14 constants: */
666 #define INTEL_PT_MAX_SUBLEAF 0x1
667 /*
668 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
669 * MSR can be accessed;
670 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
671 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
672 * of Intel PT MSRs across warm reset;
673 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
674 */
675 #define INTEL_PT_MINIMAL_EBX 0xf
676 /*
677 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
678 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
679 * accessed;
680 * bit[01]: ToPA tables can hold any number of output entries, up to the
681 * maximum allowed by the MaskOrTableOffset field of
682 * IA32_RTIT_OUTPUT_MASK_PTRS;
683 * bit[02]: Support Single-Range Output scheme;
684 */
685 #define INTEL_PT_MINIMAL_ECX 0x7
686 /* generated packets which contain IP payloads have LIP values */
687 #define INTEL_PT_IP_LIP (1 << 31)
688 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
689 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
690 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
691 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
692 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
693
694 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
695 uint32_t vendor2, uint32_t vendor3)
696 {
697 int i;
698 for (i = 0; i < 4; i++) {
699 dst[i] = vendor1 >> (8 * i);
700 dst[i + 4] = vendor2 >> (8 * i);
701 dst[i + 8] = vendor3 >> (8 * i);
702 }
703 dst[CPUID_VENDOR_SZ] = '\0';
704 }
705
706 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
707 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
708 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
709 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
710 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
711 CPUID_PSE36 | CPUID_FXSR)
712 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
713 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
714 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
715 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
716 CPUID_PAE | CPUID_SEP | CPUID_APIC)
717
718 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
719 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
720 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
721 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
722 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
723 /* partly implemented:
724 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
725 /* missing:
726 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
727 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
728 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
729 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
730 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
731 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
732 /* missing:
733 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
734 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
735 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
736 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
737 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
738
739 #ifdef TARGET_X86_64
740 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
741 #else
742 #define TCG_EXT2_X86_64_FEATURES 0
743 #endif
744
745 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
746 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
747 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
748 TCG_EXT2_X86_64_FEATURES)
749 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
750 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
751 #define TCG_EXT4_FEATURES 0
752 #define TCG_SVM_FEATURES CPUID_SVM_NPT
753 #define TCG_KVM_FEATURES 0
754 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
755 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
756 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
757 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
758 CPUID_7_0_EBX_ERMS)
759 /* missing:
760 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
761 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
762 CPUID_7_0_EBX_RDSEED */
763 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
764 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
765 CPUID_7_0_ECX_LA57)
766 #define TCG_7_0_EDX_FEATURES 0
767 #define TCG_APM_FEATURES 0
768 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
769 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
770 /* missing:
771 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
772
773 typedef enum FeatureWordType {
774 CPUID_FEATURE_WORD,
775 MSR_FEATURE_WORD,
776 } FeatureWordType;
777
778 typedef struct FeatureWordInfo {
779 FeatureWordType type;
780 /* feature flags names are taken from "Intel Processor Identification and
781 * the CPUID Instruction" and AMD's "CPUID Specification".
782 * In cases of disagreement between feature naming conventions,
783 * aliases may be added.
784 */
785 const char *feat_names[32];
786 union {
787 /* If type==CPUID_FEATURE_WORD */
788 struct {
789 uint32_t eax; /* Input EAX for CPUID */
790 bool needs_ecx; /* CPUID instruction uses ECX as input */
791 uint32_t ecx; /* Input ECX value for CPUID */
792 int reg; /* output register (R_* constant) */
793 } cpuid;
794 /* If type==MSR_FEATURE_WORD */
795 struct {
796 uint32_t index;
797 struct { /*CPUID that enumerate this MSR*/
798 FeatureWord cpuid_class;
799 uint32_t cpuid_flag;
800 } cpuid_dep;
801 } msr;
802 };
803 uint32_t tcg_features; /* Feature flags supported by TCG */
804 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
805 uint32_t migratable_flags; /* Feature flags known to be migratable */
806 /* Features that shouldn't be auto-enabled by "-cpu host" */
807 uint32_t no_autoenable_flags;
808 } FeatureWordInfo;
809
810 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
811 [FEAT_1_EDX] = {
812 .type = CPUID_FEATURE_WORD,
813 .feat_names = {
814 "fpu", "vme", "de", "pse",
815 "tsc", "msr", "pae", "mce",
816 "cx8", "apic", NULL, "sep",
817 "mtrr", "pge", "mca", "cmov",
818 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
819 NULL, "ds" /* Intel dts */, "acpi", "mmx",
820 "fxsr", "sse", "sse2", "ss",
821 "ht" /* Intel htt */, "tm", "ia64", "pbe",
822 },
823 .cpuid = {.eax = 1, .reg = R_EDX, },
824 .tcg_features = TCG_FEATURES,
825 },
826 [FEAT_1_ECX] = {
827 .type = CPUID_FEATURE_WORD,
828 .feat_names = {
829 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
830 "ds-cpl", "vmx", "smx", "est",
831 "tm2", "ssse3", "cid", NULL,
832 "fma", "cx16", "xtpr", "pdcm",
833 NULL, "pcid", "dca", "sse4.1",
834 "sse4.2", "x2apic", "movbe", "popcnt",
835 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
836 "avx", "f16c", "rdrand", "hypervisor",
837 },
838 .cpuid = { .eax = 1, .reg = R_ECX, },
839 .tcg_features = TCG_EXT_FEATURES,
840 },
841 /* Feature names that are already defined on feature_name[] but
842 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
843 * names on feat_names below. They are copied automatically
844 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
845 */
846 [FEAT_8000_0001_EDX] = {
847 .type = CPUID_FEATURE_WORD,
848 .feat_names = {
849 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
850 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
851 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
852 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
853 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
854 "nx", NULL, "mmxext", NULL /* mmx */,
855 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
856 NULL, "lm", "3dnowext", "3dnow",
857 },
858 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
859 .tcg_features = TCG_EXT2_FEATURES,
860 },
861 [FEAT_8000_0001_ECX] = {
862 .type = CPUID_FEATURE_WORD,
863 .feat_names = {
864 "lahf-lm", "cmp-legacy", "svm", "extapic",
865 "cr8legacy", "abm", "sse4a", "misalignsse",
866 "3dnowprefetch", "osvw", "ibs", "xop",
867 "skinit", "wdt", NULL, "lwp",
868 "fma4", "tce", NULL, "nodeid-msr",
869 NULL, "tbm", "topoext", "perfctr-core",
870 "perfctr-nb", NULL, NULL, NULL,
871 NULL, NULL, NULL, NULL,
872 },
873 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
874 .tcg_features = TCG_EXT3_FEATURES,
875 /*
876 * TOPOEXT is always allowed but can't be enabled blindly by
877 * "-cpu host", as it requires consistent cache topology info
878 * to be provided so it doesn't confuse guests.
879 */
880 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
881 },
882 [FEAT_C000_0001_EDX] = {
883 .type = CPUID_FEATURE_WORD,
884 .feat_names = {
885 NULL, NULL, "xstore", "xstore-en",
886 NULL, NULL, "xcrypt", "xcrypt-en",
887 "ace2", "ace2-en", "phe", "phe-en",
888 "pmm", "pmm-en", NULL, NULL,
889 NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
891 NULL, NULL, NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 },
894 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
895 .tcg_features = TCG_EXT4_FEATURES,
896 },
897 [FEAT_KVM] = {
898 .type = CPUID_FEATURE_WORD,
899 .feat_names = {
900 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
901 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
902 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
903 NULL, NULL, NULL, NULL,
904 NULL, NULL, NULL, NULL,
905 NULL, NULL, NULL, NULL,
906 "kvmclock-stable-bit", NULL, NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 },
909 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
910 .tcg_features = TCG_KVM_FEATURES,
911 },
912 [FEAT_KVM_HINTS] = {
913 .type = CPUID_FEATURE_WORD,
914 .feat_names = {
915 "kvm-hint-dedicated", NULL, NULL, NULL,
916 NULL, NULL, NULL, NULL,
917 NULL, NULL, NULL, NULL,
918 NULL, NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 },
924 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
925 .tcg_features = TCG_KVM_FEATURES,
926 /*
927 * KVM hints aren't auto-enabled by -cpu host, they need to be
928 * explicitly enabled in the command-line.
929 */
930 .no_autoenable_flags = ~0U,
931 },
932 /*
933 * .feat_names are commented out for Hyper-V enlightenments because we
934 * don't want to have two different ways for enabling them on QEMU command
935 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
936 * enabling several feature bits simultaneously, exposing these bits
937 * individually may just confuse guests.
938 */
939 [FEAT_HYPERV_EAX] = {
940 .type = CPUID_FEATURE_WORD,
941 .feat_names = {
942 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
943 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
944 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
945 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
946 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
947 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
948 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
949 NULL, NULL,
950 NULL, NULL, NULL, NULL,
951 NULL, NULL, NULL, NULL,
952 NULL, NULL, NULL, NULL,
953 NULL, NULL, NULL, NULL,
954 },
955 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
956 },
957 [FEAT_HYPERV_EBX] = {
958 .type = CPUID_FEATURE_WORD,
959 .feat_names = {
960 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
961 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
962 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
963 NULL /* hv_create_port */, NULL /* hv_connect_port */,
964 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
965 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
966 NULL, NULL,
967 NULL, NULL, NULL, NULL,
968 NULL, NULL, NULL, NULL,
969 NULL, NULL, NULL, NULL,
970 NULL, NULL, NULL, NULL,
971 },
972 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
973 },
974 [FEAT_HYPERV_EDX] = {
975 .type = CPUID_FEATURE_WORD,
976 .feat_names = {
977 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
978 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
979 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
980 NULL, NULL,
981 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
982 NULL, NULL, NULL, NULL,
983 NULL, NULL, NULL, NULL,
984 NULL, NULL, NULL, NULL,
985 NULL, NULL, NULL, NULL,
986 NULL, NULL, NULL, NULL,
987 },
988 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
989 },
990 [FEAT_HV_RECOMM_EAX] = {
991 .type = CPUID_FEATURE_WORD,
992 .feat_names = {
993 NULL /* hv_recommend_pv_as_switch */,
994 NULL /* hv_recommend_pv_tlbflush_local */,
995 NULL /* hv_recommend_pv_tlbflush_remote */,
996 NULL /* hv_recommend_msr_apic_access */,
997 NULL /* hv_recommend_msr_reset */,
998 NULL /* hv_recommend_relaxed_timing */,
999 NULL /* hv_recommend_dma_remapping */,
1000 NULL /* hv_recommend_int_remapping */,
1001 NULL /* hv_recommend_x2apic_msrs */,
1002 NULL /* hv_recommend_autoeoi_deprecation */,
1003 NULL /* hv_recommend_pv_ipi */,
1004 NULL /* hv_recommend_ex_hypercalls */,
1005 NULL /* hv_hypervisor_is_nested */,
1006 NULL /* hv_recommend_int_mbec */,
1007 NULL /* hv_recommend_evmcs */,
1008 NULL,
1009 NULL, NULL, NULL, NULL,
1010 NULL, NULL, NULL, NULL,
1011 NULL, NULL, NULL, NULL,
1012 NULL, NULL, NULL, NULL,
1013 },
1014 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1015 },
1016 [FEAT_HV_NESTED_EAX] = {
1017 .type = CPUID_FEATURE_WORD,
1018 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1019 },
1020 [FEAT_SVM] = {
1021 .type = CPUID_FEATURE_WORD,
1022 .feat_names = {
1023 "npt", "lbrv", "svm-lock", "nrip-save",
1024 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1025 NULL, NULL, "pause-filter", NULL,
1026 "pfthreshold", NULL, NULL, NULL,
1027 NULL, NULL, NULL, NULL,
1028 NULL, NULL, NULL, NULL,
1029 NULL, NULL, NULL, NULL,
1030 NULL, NULL, NULL, NULL,
1031 },
1032 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1033 .tcg_features = TCG_SVM_FEATURES,
1034 },
1035 [FEAT_7_0_EBX] = {
1036 .type = CPUID_FEATURE_WORD,
1037 .feat_names = {
1038 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1039 "hle", "avx2", NULL, "smep",
1040 "bmi2", "erms", "invpcid", "rtm",
1041 NULL, NULL, "mpx", NULL,
1042 "avx512f", "avx512dq", "rdseed", "adx",
1043 "smap", "avx512ifma", "pcommit", "clflushopt",
1044 "clwb", "intel-pt", "avx512pf", "avx512er",
1045 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1046 },
1047 .cpuid = {
1048 .eax = 7,
1049 .needs_ecx = true, .ecx = 0,
1050 .reg = R_EBX,
1051 },
1052 .tcg_features = TCG_7_0_EBX_FEATURES,
1053 },
1054 [FEAT_7_0_ECX] = {
1055 .type = CPUID_FEATURE_WORD,
1056 .feat_names = {
1057 NULL, "avx512vbmi", "umip", "pku",
1058 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1059 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1060 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1061 "la57", NULL, NULL, NULL,
1062 NULL, NULL, "rdpid", NULL,
1063 NULL, "cldemote", NULL, "movdiri",
1064 "movdir64b", NULL, NULL, NULL,
1065 },
1066 .cpuid = {
1067 .eax = 7,
1068 .needs_ecx = true, .ecx = 0,
1069 .reg = R_ECX,
1070 },
1071 .tcg_features = TCG_7_0_ECX_FEATURES,
1072 },
1073 [FEAT_7_0_EDX] = {
1074 .type = CPUID_FEATURE_WORD,
1075 .feat_names = {
1076 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1077 NULL, NULL, NULL, NULL,
1078 NULL, NULL, NULL, NULL,
1079 NULL, NULL, NULL, NULL,
1080 NULL, NULL, NULL, NULL,
1081 NULL, NULL, NULL, NULL,
1082 NULL, NULL, "spec-ctrl", "stibp",
1083 NULL, "arch-capabilities", NULL, "ssbd",
1084 },
1085 .cpuid = {
1086 .eax = 7,
1087 .needs_ecx = true, .ecx = 0,
1088 .reg = R_EDX,
1089 },
1090 .tcg_features = TCG_7_0_EDX_FEATURES,
1091 .unmigratable_flags = CPUID_7_0_EDX_ARCH_CAPABILITIES,
1092 },
1093 [FEAT_8000_0007_EDX] = {
1094 .type = CPUID_FEATURE_WORD,
1095 .feat_names = {
1096 NULL, NULL, NULL, NULL,
1097 NULL, NULL, NULL, NULL,
1098 "invtsc", NULL, NULL, NULL,
1099 NULL, NULL, NULL, NULL,
1100 NULL, NULL, NULL, NULL,
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 NULL, NULL, NULL, NULL,
1104 },
1105 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1106 .tcg_features = TCG_APM_FEATURES,
1107 .unmigratable_flags = CPUID_APM_INVTSC,
1108 },
1109 [FEAT_8000_0008_EBX] = {
1110 .type = CPUID_FEATURE_WORD,
1111 .feat_names = {
1112 NULL, NULL, NULL, NULL,
1113 NULL, NULL, NULL, NULL,
1114 NULL, "wbnoinvd", NULL, NULL,
1115 "ibpb", NULL, NULL, NULL,
1116 NULL, NULL, NULL, NULL,
1117 NULL, NULL, NULL, NULL,
1118 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1119 NULL, NULL, NULL, NULL,
1120 },
1121 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1122 .tcg_features = 0,
1123 .unmigratable_flags = 0,
1124 },
1125 [FEAT_XSAVE] = {
1126 .type = CPUID_FEATURE_WORD,
1127 .feat_names = {
1128 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1129 NULL, NULL, NULL, NULL,
1130 NULL, NULL, NULL, NULL,
1131 NULL, NULL, NULL, NULL,
1132 NULL, NULL, NULL, NULL,
1133 NULL, NULL, NULL, NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 },
1137 .cpuid = {
1138 .eax = 0xd,
1139 .needs_ecx = true, .ecx = 1,
1140 .reg = R_EAX,
1141 },
1142 .tcg_features = TCG_XSAVE_FEATURES,
1143 },
1144 [FEAT_6_EAX] = {
1145 .type = CPUID_FEATURE_WORD,
1146 .feat_names = {
1147 NULL, NULL, "arat", NULL,
1148 NULL, NULL, NULL, NULL,
1149 NULL, NULL, NULL, NULL,
1150 NULL, NULL, NULL, NULL,
1151 NULL, NULL, NULL, NULL,
1152 NULL, NULL, NULL, NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 },
1156 .cpuid = { .eax = 6, .reg = R_EAX, },
1157 .tcg_features = TCG_6_EAX_FEATURES,
1158 },
1159 [FEAT_XSAVE_COMP_LO] = {
1160 .type = CPUID_FEATURE_WORD,
1161 .cpuid = {
1162 .eax = 0xD,
1163 .needs_ecx = true, .ecx = 0,
1164 .reg = R_EAX,
1165 },
1166 .tcg_features = ~0U,
1167 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1168 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1169 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1170 XSTATE_PKRU_MASK,
1171 },
1172 [FEAT_XSAVE_COMP_HI] = {
1173 .type = CPUID_FEATURE_WORD,
1174 .cpuid = {
1175 .eax = 0xD,
1176 .needs_ecx = true, .ecx = 0,
1177 .reg = R_EDX,
1178 },
1179 .tcg_features = ~0U,
1180 },
1181 /*Below are MSR exposed features*/
1182 [FEAT_ARCH_CAPABILITIES] = {
1183 .type = MSR_FEATURE_WORD,
1184 .feat_names = {
1185 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1186 "ssb-no", NULL, NULL, NULL,
1187 NULL, NULL, NULL, NULL,
1188 NULL, NULL, NULL, NULL,
1189 NULL, NULL, NULL, NULL,
1190 NULL, NULL, NULL, NULL,
1191 NULL, NULL, NULL, NULL,
1192 NULL, NULL, NULL, NULL,
1193 },
1194 .msr = {
1195 .index = MSR_IA32_ARCH_CAPABILITIES,
1196 .cpuid_dep = {
1197 FEAT_7_0_EDX,
1198 CPUID_7_0_EDX_ARCH_CAPABILITIES
1199 }
1200 },
1201 },
1202 };
1203
1204 typedef struct X86RegisterInfo32 {
1205 /* Name of register */
1206 const char *name;
1207 /* QAPI enum value register */
1208 X86CPURegister32 qapi_enum;
1209 } X86RegisterInfo32;
1210
1211 #define REGISTER(reg) \
1212 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1213 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1214 REGISTER(EAX),
1215 REGISTER(ECX),
1216 REGISTER(EDX),
1217 REGISTER(EBX),
1218 REGISTER(ESP),
1219 REGISTER(EBP),
1220 REGISTER(ESI),
1221 REGISTER(EDI),
1222 };
1223 #undef REGISTER
1224
1225 typedef struct ExtSaveArea {
1226 uint32_t feature, bits;
1227 uint32_t offset, size;
1228 } ExtSaveArea;
1229
1230 static const ExtSaveArea x86_ext_save_areas[] = {
1231 [XSTATE_FP_BIT] = {
1232 /* x87 FP state component is always enabled if XSAVE is supported */
1233 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1234 /* x87 state is in the legacy region of the XSAVE area */
1235 .offset = 0,
1236 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1237 },
1238 [XSTATE_SSE_BIT] = {
1239 /* SSE state component is always enabled if XSAVE is supported */
1240 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1241 /* SSE state is in the legacy region of the XSAVE area */
1242 .offset = 0,
1243 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1244 },
1245 [XSTATE_YMM_BIT] =
1246 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1247 .offset = offsetof(X86XSaveArea, avx_state),
1248 .size = sizeof(XSaveAVX) },
1249 [XSTATE_BNDREGS_BIT] =
1250 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1251 .offset = offsetof(X86XSaveArea, bndreg_state),
1252 .size = sizeof(XSaveBNDREG) },
1253 [XSTATE_BNDCSR_BIT] =
1254 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1255 .offset = offsetof(X86XSaveArea, bndcsr_state),
1256 .size = sizeof(XSaveBNDCSR) },
1257 [XSTATE_OPMASK_BIT] =
1258 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1259 .offset = offsetof(X86XSaveArea, opmask_state),
1260 .size = sizeof(XSaveOpmask) },
1261 [XSTATE_ZMM_Hi256_BIT] =
1262 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1263 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1264 .size = sizeof(XSaveZMM_Hi256) },
1265 [XSTATE_Hi16_ZMM_BIT] =
1266 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1267 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1268 .size = sizeof(XSaveHi16_ZMM) },
1269 [XSTATE_PKRU_BIT] =
1270 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1271 .offset = offsetof(X86XSaveArea, pkru_state),
1272 .size = sizeof(XSavePKRU) },
1273 };
1274
1275 static uint32_t xsave_area_size(uint64_t mask)
1276 {
1277 int i;
1278 uint64_t ret = 0;
1279
1280 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1281 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1282 if ((mask >> i) & 1) {
1283 ret = MAX(ret, esa->offset + esa->size);
1284 }
1285 }
1286 return ret;
1287 }
1288
1289 static inline bool accel_uses_host_cpuid(void)
1290 {
1291 return kvm_enabled() || hvf_enabled();
1292 }
1293
1294 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1295 {
1296 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1297 cpu->env.features[FEAT_XSAVE_COMP_LO];
1298 }
1299
1300 const char *get_register_name_32(unsigned int reg)
1301 {
1302 if (reg >= CPU_NB_REGS32) {
1303 return NULL;
1304 }
1305 return x86_reg_info_32[reg].name;
1306 }
1307
1308 /*
1309 * Returns the set of feature flags that are supported and migratable by
1310 * QEMU, for a given FeatureWord.
1311 */
1312 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1313 {
1314 FeatureWordInfo *wi = &feature_word_info[w];
1315 uint32_t r = 0;
1316 int i;
1317
1318 for (i = 0; i < 32; i++) {
1319 uint32_t f = 1U << i;
1320
1321 /* If the feature name is known, it is implicitly considered migratable,
1322 * unless it is explicitly set in unmigratable_flags */
1323 if ((wi->migratable_flags & f) ||
1324 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1325 r |= f;
1326 }
1327 }
1328 return r;
1329 }
1330
1331 void host_cpuid(uint32_t function, uint32_t count,
1332 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1333 {
1334 uint32_t vec[4];
1335
1336 #ifdef __x86_64__
1337 asm volatile("cpuid"
1338 : "=a"(vec[0]), "=b"(vec[1]),
1339 "=c"(vec[2]), "=d"(vec[3])
1340 : "0"(function), "c"(count) : "cc");
1341 #elif defined(__i386__)
1342 asm volatile("pusha \n\t"
1343 "cpuid \n\t"
1344 "mov %%eax, 0(%2) \n\t"
1345 "mov %%ebx, 4(%2) \n\t"
1346 "mov %%ecx, 8(%2) \n\t"
1347 "mov %%edx, 12(%2) \n\t"
1348 "popa"
1349 : : "a"(function), "c"(count), "S"(vec)
1350 : "memory", "cc");
1351 #else
1352 abort();
1353 #endif
1354
1355 if (eax)
1356 *eax = vec[0];
1357 if (ebx)
1358 *ebx = vec[1];
1359 if (ecx)
1360 *ecx = vec[2];
1361 if (edx)
1362 *edx = vec[3];
1363 }
1364
1365 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1366 {
1367 uint32_t eax, ebx, ecx, edx;
1368
1369 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1370 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1371
1372 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1373 if (family) {
1374 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1375 }
1376 if (model) {
1377 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1378 }
1379 if (stepping) {
1380 *stepping = eax & 0x0F;
1381 }
1382 }
1383
1384 /* CPU class name definitions: */
1385
1386 /* Return type name for a given CPU model name
1387 * Caller is responsible for freeing the returned string.
1388 */
1389 static char *x86_cpu_type_name(const char *model_name)
1390 {
1391 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1392 }
1393
1394 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1395 {
1396 ObjectClass *oc;
1397 char *typename = x86_cpu_type_name(cpu_model);
1398 oc = object_class_by_name(typename);
1399 g_free(typename);
1400 return oc;
1401 }
1402
1403 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1404 {
1405 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1406 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1407 return g_strndup(class_name,
1408 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1409 }
1410
1411 struct X86CPUDefinition {
1412 const char *name;
1413 uint32_t level;
1414 uint32_t xlevel;
1415 /* vendor is zero-terminated, 12 character ASCII string */
1416 char vendor[CPUID_VENDOR_SZ + 1];
1417 int family;
1418 int model;
1419 int stepping;
1420 FeatureWordArray features;
1421 const char *model_id;
1422 CPUCaches *cache_info;
1423 };
1424
1425 static CPUCaches epyc_cache_info = {
1426 .l1d_cache = &(CPUCacheInfo) {
1427 .type = DATA_CACHE,
1428 .level = 1,
1429 .size = 32 * KiB,
1430 .line_size = 64,
1431 .associativity = 8,
1432 .partitions = 1,
1433 .sets = 64,
1434 .lines_per_tag = 1,
1435 .self_init = 1,
1436 .no_invd_sharing = true,
1437 },
1438 .l1i_cache = &(CPUCacheInfo) {
1439 .type = INSTRUCTION_CACHE,
1440 .level = 1,
1441 .size = 64 * KiB,
1442 .line_size = 64,
1443 .associativity = 4,
1444 .partitions = 1,
1445 .sets = 256,
1446 .lines_per_tag = 1,
1447 .self_init = 1,
1448 .no_invd_sharing = true,
1449 },
1450 .l2_cache = &(CPUCacheInfo) {
1451 .type = UNIFIED_CACHE,
1452 .level = 2,
1453 .size = 512 * KiB,
1454 .line_size = 64,
1455 .associativity = 8,
1456 .partitions = 1,
1457 .sets = 1024,
1458 .lines_per_tag = 1,
1459 },
1460 .l3_cache = &(CPUCacheInfo) {
1461 .type = UNIFIED_CACHE,
1462 .level = 3,
1463 .size = 8 * MiB,
1464 .line_size = 64,
1465 .associativity = 16,
1466 .partitions = 1,
1467 .sets = 8192,
1468 .lines_per_tag = 1,
1469 .self_init = true,
1470 .inclusive = true,
1471 .complex_indexing = true,
1472 },
1473 };
1474
1475 static X86CPUDefinition builtin_x86_defs[] = {
1476 {
1477 .name = "qemu64",
1478 .level = 0xd,
1479 .vendor = CPUID_VENDOR_AMD,
1480 .family = 6,
1481 .model = 6,
1482 .stepping = 3,
1483 .features[FEAT_1_EDX] =
1484 PPRO_FEATURES |
1485 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1486 CPUID_PSE36,
1487 .features[FEAT_1_ECX] =
1488 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1489 .features[FEAT_8000_0001_EDX] =
1490 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1491 .features[FEAT_8000_0001_ECX] =
1492 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1493 .xlevel = 0x8000000A,
1494 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1495 },
1496 {
1497 .name = "phenom",
1498 .level = 5,
1499 .vendor = CPUID_VENDOR_AMD,
1500 .family = 16,
1501 .model = 2,
1502 .stepping = 3,
1503 /* Missing: CPUID_HT */
1504 .features[FEAT_1_EDX] =
1505 PPRO_FEATURES |
1506 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1507 CPUID_PSE36 | CPUID_VME,
1508 .features[FEAT_1_ECX] =
1509 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1510 CPUID_EXT_POPCNT,
1511 .features[FEAT_8000_0001_EDX] =
1512 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1513 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1514 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1515 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1516 CPUID_EXT3_CR8LEG,
1517 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1518 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1519 .features[FEAT_8000_0001_ECX] =
1520 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1521 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1522 /* Missing: CPUID_SVM_LBRV */
1523 .features[FEAT_SVM] =
1524 CPUID_SVM_NPT,
1525 .xlevel = 0x8000001A,
1526 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1527 },
1528 {
1529 .name = "core2duo",
1530 .level = 10,
1531 .vendor = CPUID_VENDOR_INTEL,
1532 .family = 6,
1533 .model = 15,
1534 .stepping = 11,
1535 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1536 .features[FEAT_1_EDX] =
1537 PPRO_FEATURES |
1538 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1539 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1540 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1541 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1542 .features[FEAT_1_ECX] =
1543 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1544 CPUID_EXT_CX16,
1545 .features[FEAT_8000_0001_EDX] =
1546 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1547 .features[FEAT_8000_0001_ECX] =
1548 CPUID_EXT3_LAHF_LM,
1549 .xlevel = 0x80000008,
1550 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1551 },
1552 {
1553 .name = "kvm64",
1554 .level = 0xd,
1555 .vendor = CPUID_VENDOR_INTEL,
1556 .family = 15,
1557 .model = 6,
1558 .stepping = 1,
1559 /* Missing: CPUID_HT */
1560 .features[FEAT_1_EDX] =
1561 PPRO_FEATURES | CPUID_VME |
1562 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1563 CPUID_PSE36,
1564 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1565 .features[FEAT_1_ECX] =
1566 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1567 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1568 .features[FEAT_8000_0001_EDX] =
1569 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1570 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1571 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1572 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1573 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1574 .features[FEAT_8000_0001_ECX] =
1575 0,
1576 .xlevel = 0x80000008,
1577 .model_id = "Common KVM processor"
1578 },
1579 {
1580 .name = "qemu32",
1581 .level = 4,
1582 .vendor = CPUID_VENDOR_INTEL,
1583 .family = 6,
1584 .model = 6,
1585 .stepping = 3,
1586 .features[FEAT_1_EDX] =
1587 PPRO_FEATURES,
1588 .features[FEAT_1_ECX] =
1589 CPUID_EXT_SSE3,
1590 .xlevel = 0x80000004,
1591 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1592 },
1593 {
1594 .name = "kvm32",
1595 .level = 5,
1596 .vendor = CPUID_VENDOR_INTEL,
1597 .family = 15,
1598 .model = 6,
1599 .stepping = 1,
1600 .features[FEAT_1_EDX] =
1601 PPRO_FEATURES | CPUID_VME |
1602 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1603 .features[FEAT_1_ECX] =
1604 CPUID_EXT_SSE3,
1605 .features[FEAT_8000_0001_ECX] =
1606 0,
1607 .xlevel = 0x80000008,
1608 .model_id = "Common 32-bit KVM processor"
1609 },
1610 {
1611 .name = "coreduo",
1612 .level = 10,
1613 .vendor = CPUID_VENDOR_INTEL,
1614 .family = 6,
1615 .model = 14,
1616 .stepping = 8,
1617 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1618 .features[FEAT_1_EDX] =
1619 PPRO_FEATURES | CPUID_VME |
1620 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1621 CPUID_SS,
1622 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1623 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1624 .features[FEAT_1_ECX] =
1625 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1626 .features[FEAT_8000_0001_EDX] =
1627 CPUID_EXT2_NX,
1628 .xlevel = 0x80000008,
1629 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1630 },
1631 {
1632 .name = "486",
1633 .level = 1,
1634 .vendor = CPUID_VENDOR_INTEL,
1635 .family = 4,
1636 .model = 8,
1637 .stepping = 0,
1638 .features[FEAT_1_EDX] =
1639 I486_FEATURES,
1640 .xlevel = 0,
1641 .model_id = "",
1642 },
1643 {
1644 .name = "pentium",
1645 .level = 1,
1646 .vendor = CPUID_VENDOR_INTEL,
1647 .family = 5,
1648 .model = 4,
1649 .stepping = 3,
1650 .features[FEAT_1_EDX] =
1651 PENTIUM_FEATURES,
1652 .xlevel = 0,
1653 .model_id = "",
1654 },
1655 {
1656 .name = "pentium2",
1657 .level = 2,
1658 .vendor = CPUID_VENDOR_INTEL,
1659 .family = 6,
1660 .model = 5,
1661 .stepping = 2,
1662 .features[FEAT_1_EDX] =
1663 PENTIUM2_FEATURES,
1664 .xlevel = 0,
1665 .model_id = "",
1666 },
1667 {
1668 .name = "pentium3",
1669 .level = 3,
1670 .vendor = CPUID_VENDOR_INTEL,
1671 .family = 6,
1672 .model = 7,
1673 .stepping = 3,
1674 .features[FEAT_1_EDX] =
1675 PENTIUM3_FEATURES,
1676 .xlevel = 0,
1677 .model_id = "",
1678 },
1679 {
1680 .name = "athlon",
1681 .level = 2,
1682 .vendor = CPUID_VENDOR_AMD,
1683 .family = 6,
1684 .model = 2,
1685 .stepping = 3,
1686 .features[FEAT_1_EDX] =
1687 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1688 CPUID_MCA,
1689 .features[FEAT_8000_0001_EDX] =
1690 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1691 .xlevel = 0x80000008,
1692 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1693 },
1694 {
1695 .name = "n270",
1696 .level = 10,
1697 .vendor = CPUID_VENDOR_INTEL,
1698 .family = 6,
1699 .model = 28,
1700 .stepping = 2,
1701 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1702 .features[FEAT_1_EDX] =
1703 PPRO_FEATURES |
1704 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1705 CPUID_ACPI | CPUID_SS,
1706 /* Some CPUs got no CPUID_SEP */
1707 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1708 * CPUID_EXT_XTPR */
1709 .features[FEAT_1_ECX] =
1710 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1711 CPUID_EXT_MOVBE,
1712 .features[FEAT_8000_0001_EDX] =
1713 CPUID_EXT2_NX,
1714 .features[FEAT_8000_0001_ECX] =
1715 CPUID_EXT3_LAHF_LM,
1716 .xlevel = 0x80000008,
1717 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1718 },
1719 {
1720 .name = "Conroe",
1721 .level = 10,
1722 .vendor = CPUID_VENDOR_INTEL,
1723 .family = 6,
1724 .model = 15,
1725 .stepping = 3,
1726 .features[FEAT_1_EDX] =
1727 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1728 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1729 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1730 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1731 CPUID_DE | CPUID_FP87,
1732 .features[FEAT_1_ECX] =
1733 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1734 .features[FEAT_8000_0001_EDX] =
1735 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1736 .features[FEAT_8000_0001_ECX] =
1737 CPUID_EXT3_LAHF_LM,
1738 .xlevel = 0x80000008,
1739 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1740 },
1741 {
1742 .name = "Penryn",
1743 .level = 10,
1744 .vendor = CPUID_VENDOR_INTEL,
1745 .family = 6,
1746 .model = 23,
1747 .stepping = 3,
1748 .features[FEAT_1_EDX] =
1749 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1750 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1751 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1752 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1753 CPUID_DE | CPUID_FP87,
1754 .features[FEAT_1_ECX] =
1755 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1756 CPUID_EXT_SSE3,
1757 .features[FEAT_8000_0001_EDX] =
1758 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1759 .features[FEAT_8000_0001_ECX] =
1760 CPUID_EXT3_LAHF_LM,
1761 .xlevel = 0x80000008,
1762 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1763 },
1764 {
1765 .name = "Nehalem",
1766 .level = 11,
1767 .vendor = CPUID_VENDOR_INTEL,
1768 .family = 6,
1769 .model = 26,
1770 .stepping = 3,
1771 .features[FEAT_1_EDX] =
1772 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1773 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1774 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1775 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1776 CPUID_DE | CPUID_FP87,
1777 .features[FEAT_1_ECX] =
1778 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1779 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1780 .features[FEAT_8000_0001_EDX] =
1781 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1782 .features[FEAT_8000_0001_ECX] =
1783 CPUID_EXT3_LAHF_LM,
1784 .xlevel = 0x80000008,
1785 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1786 },
1787 {
1788 .name = "Nehalem-IBRS",
1789 .level = 11,
1790 .vendor = CPUID_VENDOR_INTEL,
1791 .family = 6,
1792 .model = 26,
1793 .stepping = 3,
1794 .features[FEAT_1_EDX] =
1795 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1796 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1797 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1798 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1799 CPUID_DE | CPUID_FP87,
1800 .features[FEAT_1_ECX] =
1801 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1802 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1803 .features[FEAT_7_0_EDX] =
1804 CPUID_7_0_EDX_SPEC_CTRL,
1805 .features[FEAT_8000_0001_EDX] =
1806 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1807 .features[FEAT_8000_0001_ECX] =
1808 CPUID_EXT3_LAHF_LM,
1809 .xlevel = 0x80000008,
1810 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1811 },
1812 {
1813 .name = "Westmere",
1814 .level = 11,
1815 .vendor = CPUID_VENDOR_INTEL,
1816 .family = 6,
1817 .model = 44,
1818 .stepping = 1,
1819 .features[FEAT_1_EDX] =
1820 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1821 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1822 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1823 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1824 CPUID_DE | CPUID_FP87,
1825 .features[FEAT_1_ECX] =
1826 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1827 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1828 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1829 .features[FEAT_8000_0001_EDX] =
1830 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1831 .features[FEAT_8000_0001_ECX] =
1832 CPUID_EXT3_LAHF_LM,
1833 .features[FEAT_6_EAX] =
1834 CPUID_6_EAX_ARAT,
1835 .xlevel = 0x80000008,
1836 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1837 },
1838 {
1839 .name = "Westmere-IBRS",
1840 .level = 11,
1841 .vendor = CPUID_VENDOR_INTEL,
1842 .family = 6,
1843 .model = 44,
1844 .stepping = 1,
1845 .features[FEAT_1_EDX] =
1846 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1847 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1848 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1849 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1850 CPUID_DE | CPUID_FP87,
1851 .features[FEAT_1_ECX] =
1852 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1853 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1854 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1855 .features[FEAT_8000_0001_EDX] =
1856 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1857 .features[FEAT_8000_0001_ECX] =
1858 CPUID_EXT3_LAHF_LM,
1859 .features[FEAT_7_0_EDX] =
1860 CPUID_7_0_EDX_SPEC_CTRL,
1861 .features[FEAT_6_EAX] =
1862 CPUID_6_EAX_ARAT,
1863 .xlevel = 0x80000008,
1864 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1865 },
1866 {
1867 .name = "SandyBridge",
1868 .level = 0xd,
1869 .vendor = CPUID_VENDOR_INTEL,
1870 .family = 6,
1871 .model = 42,
1872 .stepping = 1,
1873 .features[FEAT_1_EDX] =
1874 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1875 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1876 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1877 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1878 CPUID_DE | CPUID_FP87,
1879 .features[FEAT_1_ECX] =
1880 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1881 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1882 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1883 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1884 CPUID_EXT_SSE3,
1885 .features[FEAT_8000_0001_EDX] =
1886 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1887 CPUID_EXT2_SYSCALL,
1888 .features[FEAT_8000_0001_ECX] =
1889 CPUID_EXT3_LAHF_LM,
1890 .features[FEAT_XSAVE] =
1891 CPUID_XSAVE_XSAVEOPT,
1892 .features[FEAT_6_EAX] =
1893 CPUID_6_EAX_ARAT,
1894 .xlevel = 0x80000008,
1895 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1896 },
1897 {
1898 .name = "SandyBridge-IBRS",
1899 .level = 0xd,
1900 .vendor = CPUID_VENDOR_INTEL,
1901 .family = 6,
1902 .model = 42,
1903 .stepping = 1,
1904 .features[FEAT_1_EDX] =
1905 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1906 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1907 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1908 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1909 CPUID_DE | CPUID_FP87,
1910 .features[FEAT_1_ECX] =
1911 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1912 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1913 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1914 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1915 CPUID_EXT_SSE3,
1916 .features[FEAT_8000_0001_EDX] =
1917 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1918 CPUID_EXT2_SYSCALL,
1919 .features[FEAT_8000_0001_ECX] =
1920 CPUID_EXT3_LAHF_LM,
1921 .features[FEAT_7_0_EDX] =
1922 CPUID_7_0_EDX_SPEC_CTRL,
1923 .features[FEAT_XSAVE] =
1924 CPUID_XSAVE_XSAVEOPT,
1925 .features[FEAT_6_EAX] =
1926 CPUID_6_EAX_ARAT,
1927 .xlevel = 0x80000008,
1928 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1929 },
1930 {
1931 .name = "IvyBridge",
1932 .level = 0xd,
1933 .vendor = CPUID_VENDOR_INTEL,
1934 .family = 6,
1935 .model = 58,
1936 .stepping = 9,
1937 .features[FEAT_1_EDX] =
1938 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1939 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1940 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1941 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1942 CPUID_DE | CPUID_FP87,
1943 .features[FEAT_1_ECX] =
1944 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1945 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1946 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1947 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1948 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1949 .features[FEAT_7_0_EBX] =
1950 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1951 CPUID_7_0_EBX_ERMS,
1952 .features[FEAT_8000_0001_EDX] =
1953 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1954 CPUID_EXT2_SYSCALL,
1955 .features[FEAT_8000_0001_ECX] =
1956 CPUID_EXT3_LAHF_LM,
1957 .features[FEAT_XSAVE] =
1958 CPUID_XSAVE_XSAVEOPT,
1959 .features[FEAT_6_EAX] =
1960 CPUID_6_EAX_ARAT,
1961 .xlevel = 0x80000008,
1962 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1963 },
1964 {
1965 .name = "IvyBridge-IBRS",
1966 .level = 0xd,
1967 .vendor = CPUID_VENDOR_INTEL,
1968 .family = 6,
1969 .model = 58,
1970 .stepping = 9,
1971 .features[FEAT_1_EDX] =
1972 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1973 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1974 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1975 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1976 CPUID_DE | CPUID_FP87,
1977 .features[FEAT_1_ECX] =
1978 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1979 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1980 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1981 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1982 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1983 .features[FEAT_7_0_EBX] =
1984 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1985 CPUID_7_0_EBX_ERMS,
1986 .features[FEAT_8000_0001_EDX] =
1987 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1988 CPUID_EXT2_SYSCALL,
1989 .features[FEAT_8000_0001_ECX] =
1990 CPUID_EXT3_LAHF_LM,
1991 .features[FEAT_7_0_EDX] =
1992 CPUID_7_0_EDX_SPEC_CTRL,
1993 .features[FEAT_XSAVE] =
1994 CPUID_XSAVE_XSAVEOPT,
1995 .features[FEAT_6_EAX] =
1996 CPUID_6_EAX_ARAT,
1997 .xlevel = 0x80000008,
1998 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1999 },
2000 {
2001 .name = "Haswell-noTSX",
2002 .level = 0xd,
2003 .vendor = CPUID_VENDOR_INTEL,
2004 .family = 6,
2005 .model = 60,
2006 .stepping = 1,
2007 .features[FEAT_1_EDX] =
2008 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2009 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2010 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2011 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2012 CPUID_DE | CPUID_FP87,
2013 .features[FEAT_1_ECX] =
2014 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2015 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2016 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2017 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2018 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2019 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2020 .features[FEAT_8000_0001_EDX] =
2021 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2022 CPUID_EXT2_SYSCALL,
2023 .features[FEAT_8000_0001_ECX] =
2024 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2025 .features[FEAT_7_0_EBX] =
2026 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2027 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2028 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2029 .features[FEAT_XSAVE] =
2030 CPUID_XSAVE_XSAVEOPT,
2031 .features[FEAT_6_EAX] =
2032 CPUID_6_EAX_ARAT,
2033 .xlevel = 0x80000008,
2034 .model_id = "Intel Core Processor (Haswell, no TSX)",
2035 },
2036 {
2037 .name = "Haswell-noTSX-IBRS",
2038 .level = 0xd,
2039 .vendor = CPUID_VENDOR_INTEL,
2040 .family = 6,
2041 .model = 60,
2042 .stepping = 1,
2043 .features[FEAT_1_EDX] =
2044 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2045 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2046 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2047 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2048 CPUID_DE | CPUID_FP87,
2049 .features[FEAT_1_ECX] =
2050 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2051 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2052 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2053 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2054 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2055 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2056 .features[FEAT_8000_0001_EDX] =
2057 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2058 CPUID_EXT2_SYSCALL,
2059 .features[FEAT_8000_0001_ECX] =
2060 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2061 .features[FEAT_7_0_EDX] =
2062 CPUID_7_0_EDX_SPEC_CTRL,
2063 .features[FEAT_7_0_EBX] =
2064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2065 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2066 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2067 .features[FEAT_XSAVE] =
2068 CPUID_XSAVE_XSAVEOPT,
2069 .features[FEAT_6_EAX] =
2070 CPUID_6_EAX_ARAT,
2071 .xlevel = 0x80000008,
2072 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2073 },
2074 {
2075 .name = "Haswell",
2076 .level = 0xd,
2077 .vendor = CPUID_VENDOR_INTEL,
2078 .family = 6,
2079 .model = 60,
2080 .stepping = 4,
2081 .features[FEAT_1_EDX] =
2082 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2083 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2084 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2085 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2086 CPUID_DE | CPUID_FP87,
2087 .features[FEAT_1_ECX] =
2088 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2089 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2090 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2091 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2092 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2093 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2094 .features[FEAT_8000_0001_EDX] =
2095 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2096 CPUID_EXT2_SYSCALL,
2097 .features[FEAT_8000_0001_ECX] =
2098 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2099 .features[FEAT_7_0_EBX] =
2100 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2101 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2102 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2103 CPUID_7_0_EBX_RTM,
2104 .features[FEAT_XSAVE] =
2105 CPUID_XSAVE_XSAVEOPT,
2106 .features[FEAT_6_EAX] =
2107 CPUID_6_EAX_ARAT,
2108 .xlevel = 0x80000008,
2109 .model_id = "Intel Core Processor (Haswell)",
2110 },
2111 {
2112 .name = "Haswell-IBRS",
2113 .level = 0xd,
2114 .vendor = CPUID_VENDOR_INTEL,
2115 .family = 6,
2116 .model = 60,
2117 .stepping = 4,
2118 .features[FEAT_1_EDX] =
2119 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2120 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2121 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2122 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2123 CPUID_DE | CPUID_FP87,
2124 .features[FEAT_1_ECX] =
2125 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2126 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2127 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2128 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2129 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2130 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2131 .features[FEAT_8000_0001_EDX] =
2132 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2133 CPUID_EXT2_SYSCALL,
2134 .features[FEAT_8000_0001_ECX] =
2135 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2136 .features[FEAT_7_0_EDX] =
2137 CPUID_7_0_EDX_SPEC_CTRL,
2138 .features[FEAT_7_0_EBX] =
2139 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2140 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2141 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2142 CPUID_7_0_EBX_RTM,
2143 .features[FEAT_XSAVE] =
2144 CPUID_XSAVE_XSAVEOPT,
2145 .features[FEAT_6_EAX] =
2146 CPUID_6_EAX_ARAT,
2147 .xlevel = 0x80000008,
2148 .model_id = "Intel Core Processor (Haswell, IBRS)",
2149 },
2150 {
2151 .name = "Broadwell-noTSX",
2152 .level = 0xd,
2153 .vendor = CPUID_VENDOR_INTEL,
2154 .family = 6,
2155 .model = 61,
2156 .stepping = 2,
2157 .features[FEAT_1_EDX] =
2158 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2159 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2160 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2161 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2162 CPUID_DE | CPUID_FP87,
2163 .features[FEAT_1_ECX] =
2164 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2165 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2166 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2167 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2168 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2169 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2170 .features[FEAT_8000_0001_EDX] =
2171 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2172 CPUID_EXT2_SYSCALL,
2173 .features[FEAT_8000_0001_ECX] =
2174 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2175 .features[FEAT_7_0_EBX] =
2176 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2177 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2178 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2179 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2180 CPUID_7_0_EBX_SMAP,
2181 .features[FEAT_XSAVE] =
2182 CPUID_XSAVE_XSAVEOPT,
2183 .features[FEAT_6_EAX] =
2184 CPUID_6_EAX_ARAT,
2185 .xlevel = 0x80000008,
2186 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2187 },
2188 {
2189 .name = "Broadwell-noTSX-IBRS",
2190 .level = 0xd,
2191 .vendor = CPUID_VENDOR_INTEL,
2192 .family = 6,
2193 .model = 61,
2194 .stepping = 2,
2195 .features[FEAT_1_EDX] =
2196 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2197 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2198 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2199 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2200 CPUID_DE | CPUID_FP87,
2201 .features[FEAT_1_ECX] =
2202 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2203 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2204 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2205 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2206 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2207 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2208 .features[FEAT_8000_0001_EDX] =
2209 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2210 CPUID_EXT2_SYSCALL,
2211 .features[FEAT_8000_0001_ECX] =
2212 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2213 .features[FEAT_7_0_EDX] =
2214 CPUID_7_0_EDX_SPEC_CTRL,
2215 .features[FEAT_7_0_EBX] =
2216 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2217 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2218 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2219 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2220 CPUID_7_0_EBX_SMAP,
2221 .features[FEAT_XSAVE] =
2222 CPUID_XSAVE_XSAVEOPT,
2223 .features[FEAT_6_EAX] =
2224 CPUID_6_EAX_ARAT,
2225 .xlevel = 0x80000008,
2226 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2227 },
2228 {
2229 .name = "Broadwell",
2230 .level = 0xd,
2231 .vendor = CPUID_VENDOR_INTEL,
2232 .family = 6,
2233 .model = 61,
2234 .stepping = 2,
2235 .features[FEAT_1_EDX] =
2236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2240 CPUID_DE | CPUID_FP87,
2241 .features[FEAT_1_ECX] =
2242 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2243 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2244 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2245 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2246 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2247 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2248 .features[FEAT_8000_0001_EDX] =
2249 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2250 CPUID_EXT2_SYSCALL,
2251 .features[FEAT_8000_0001_ECX] =
2252 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2253 .features[FEAT_7_0_EBX] =
2254 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2255 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2256 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2257 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2258 CPUID_7_0_EBX_SMAP,
2259 .features[FEAT_XSAVE] =
2260 CPUID_XSAVE_XSAVEOPT,
2261 .features[FEAT_6_EAX] =
2262 CPUID_6_EAX_ARAT,
2263 .xlevel = 0x80000008,
2264 .model_id = "Intel Core Processor (Broadwell)",
2265 },
2266 {
2267 .name = "Broadwell-IBRS",
2268 .level = 0xd,
2269 .vendor = CPUID_VENDOR_INTEL,
2270 .family = 6,
2271 .model = 61,
2272 .stepping = 2,
2273 .features[FEAT_1_EDX] =
2274 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2275 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2276 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2277 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2278 CPUID_DE | CPUID_FP87,
2279 .features[FEAT_1_ECX] =
2280 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2281 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2282 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2283 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2284 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2285 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2286 .features[FEAT_8000_0001_EDX] =
2287 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2288 CPUID_EXT2_SYSCALL,
2289 .features[FEAT_8000_0001_ECX] =
2290 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2291 .features[FEAT_7_0_EDX] =
2292 CPUID_7_0_EDX_SPEC_CTRL,
2293 .features[FEAT_7_0_EBX] =
2294 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2295 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2296 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2297 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2298 CPUID_7_0_EBX_SMAP,
2299 .features[FEAT_XSAVE] =
2300 CPUID_XSAVE_XSAVEOPT,
2301 .features[FEAT_6_EAX] =
2302 CPUID_6_EAX_ARAT,
2303 .xlevel = 0x80000008,
2304 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2305 },
2306 {
2307 .name = "Skylake-Client",
2308 .level = 0xd,
2309 .vendor = CPUID_VENDOR_INTEL,
2310 .family = 6,
2311 .model = 94,
2312 .stepping = 3,
2313 .features[FEAT_1_EDX] =
2314 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2315 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2316 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2317 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2318 CPUID_DE | CPUID_FP87,
2319 .features[FEAT_1_ECX] =
2320 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2321 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2322 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2323 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2324 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2325 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2326 .features[FEAT_8000_0001_EDX] =
2327 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2328 CPUID_EXT2_SYSCALL,
2329 .features[FEAT_8000_0001_ECX] =
2330 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2331 .features[FEAT_7_0_EBX] =
2332 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2333 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2334 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2335 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2336 CPUID_7_0_EBX_SMAP,
2337 /* Missing: XSAVES (not supported by some Linux versions,
2338 * including v4.1 to v4.12).
2339 * KVM doesn't yet expose any XSAVES state save component,
2340 * and the only one defined in Skylake (processor tracing)
2341 * probably will block migration anyway.
2342 */
2343 .features[FEAT_XSAVE] =
2344 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2345 CPUID_XSAVE_XGETBV1,
2346 .features[FEAT_6_EAX] =
2347 CPUID_6_EAX_ARAT,
2348 .xlevel = 0x80000008,
2349 .model_id = "Intel Core Processor (Skylake)",
2350 },
2351 {
2352 .name = "Skylake-Client-IBRS",
2353 .level = 0xd,
2354 .vendor = CPUID_VENDOR_INTEL,
2355 .family = 6,
2356 .model = 94,
2357 .stepping = 3,
2358 .features[FEAT_1_EDX] =
2359 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2360 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2361 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2362 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2363 CPUID_DE | CPUID_FP87,
2364 .features[FEAT_1_ECX] =
2365 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2366 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2367 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2368 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2369 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2370 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2371 .features[FEAT_8000_0001_EDX] =
2372 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2373 CPUID_EXT2_SYSCALL,
2374 .features[FEAT_8000_0001_ECX] =
2375 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2376 .features[FEAT_7_0_EDX] =
2377 CPUID_7_0_EDX_SPEC_CTRL,
2378 .features[FEAT_7_0_EBX] =
2379 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2380 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2381 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2382 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2383 CPUID_7_0_EBX_SMAP,
2384 /* Missing: XSAVES (not supported by some Linux versions,
2385 * including v4.1 to v4.12).
2386 * KVM doesn't yet expose any XSAVES state save component,
2387 * and the only one defined in Skylake (processor tracing)
2388 * probably will block migration anyway.
2389 */
2390 .features[FEAT_XSAVE] =
2391 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2392 CPUID_XSAVE_XGETBV1,
2393 .features[FEAT_6_EAX] =
2394 CPUID_6_EAX_ARAT,
2395 .xlevel = 0x80000008,
2396 .model_id = "Intel Core Processor (Skylake, IBRS)",
2397 },
2398 {
2399 .name = "Skylake-Server",
2400 .level = 0xd,
2401 .vendor = CPUID_VENDOR_INTEL,
2402 .family = 6,
2403 .model = 85,
2404 .stepping = 4,
2405 .features[FEAT_1_EDX] =
2406 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2407 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2408 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2409 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2410 CPUID_DE | CPUID_FP87,
2411 .features[FEAT_1_ECX] =
2412 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2413 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2414 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2415 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2416 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2417 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2418 .features[FEAT_8000_0001_EDX] =
2419 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2420 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2421 .features[FEAT_8000_0001_ECX] =
2422 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2423 .features[FEAT_7_0_EBX] =
2424 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2425 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2426 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2427 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2428 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2429 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2430 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2431 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2432 .features[FEAT_7_0_ECX] =
2433 CPUID_7_0_ECX_PKU,
2434 /* Missing: XSAVES (not supported by some Linux versions,
2435 * including v4.1 to v4.12).
2436 * KVM doesn't yet expose any XSAVES state save component,
2437 * and the only one defined in Skylake (processor tracing)
2438 * probably will block migration anyway.
2439 */
2440 .features[FEAT_XSAVE] =
2441 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2442 CPUID_XSAVE_XGETBV1,
2443 .features[FEAT_6_EAX] =
2444 CPUID_6_EAX_ARAT,
2445 .xlevel = 0x80000008,
2446 .model_id = "Intel Xeon Processor (Skylake)",
2447 },
2448 {
2449 .name = "Skylake-Server-IBRS",
2450 .level = 0xd,
2451 .vendor = CPUID_VENDOR_INTEL,
2452 .family = 6,
2453 .model = 85,
2454 .stepping = 4,
2455 .features[FEAT_1_EDX] =
2456 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2457 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2458 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2459 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2460 CPUID_DE | CPUID_FP87,
2461 .features[FEAT_1_ECX] =
2462 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2463 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2464 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2465 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2466 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2467 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2468 .features[FEAT_8000_0001_EDX] =
2469 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2470 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2471 .features[FEAT_8000_0001_ECX] =
2472 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2473 .features[FEAT_7_0_EDX] =
2474 CPUID_7_0_EDX_SPEC_CTRL,
2475 .features[FEAT_7_0_EBX] =
2476 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2477 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2478 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2479 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2480 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2481 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2482 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2483 CPUID_7_0_EBX_AVX512VL,
2484 .features[FEAT_7_0_ECX] =
2485 CPUID_7_0_ECX_PKU,
2486 /* Missing: XSAVES (not supported by some Linux versions,
2487 * including v4.1 to v4.12).
2488 * KVM doesn't yet expose any XSAVES state save component,
2489 * and the only one defined in Skylake (processor tracing)
2490 * probably will block migration anyway.
2491 */
2492 .features[FEAT_XSAVE] =
2493 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2494 CPUID_XSAVE_XGETBV1,
2495 .features[FEAT_6_EAX] =
2496 CPUID_6_EAX_ARAT,
2497 .xlevel = 0x80000008,
2498 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2499 },
2500 {
2501 .name = "Cascadelake-Server",
2502 .level = 0xd,
2503 .vendor = CPUID_VENDOR_INTEL,
2504 .family = 6,
2505 .model = 85,
2506 .stepping = 6,
2507 .features[FEAT_1_EDX] =
2508 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2509 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2510 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2511 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2512 CPUID_DE | CPUID_FP87,
2513 .features[FEAT_1_ECX] =
2514 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2515 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2516 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2517 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2518 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2519 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2520 .features[FEAT_8000_0001_EDX] =
2521 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2522 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2523 .features[FEAT_8000_0001_ECX] =
2524 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2525 .features[FEAT_7_0_EBX] =
2526 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2527 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2528 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2529 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2530 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2531 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2532 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2533 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2534 .features[FEAT_7_0_ECX] =
2535 CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE |
2536 CPUID_7_0_ECX_AVX512VNNI,
2537 .features[FEAT_7_0_EDX] =
2538 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2539 /* Missing: XSAVES (not supported by some Linux versions,
2540 * including v4.1 to v4.12).
2541 * KVM doesn't yet expose any XSAVES state save component,
2542 * and the only one defined in Skylake (processor tracing)
2543 * probably will block migration anyway.
2544 */
2545 .features[FEAT_XSAVE] =
2546 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2547 CPUID_XSAVE_XGETBV1,
2548 .features[FEAT_6_EAX] =
2549 CPUID_6_EAX_ARAT,
2550 .xlevel = 0x80000008,
2551 .model_id = "Intel Xeon Processor (Cascadelake)",
2552 },
2553 {
2554 .name = "Icelake-Client",
2555 .level = 0xd,
2556 .vendor = CPUID_VENDOR_INTEL,
2557 .family = 6,
2558 .model = 126,
2559 .stepping = 0,
2560 .features[FEAT_1_EDX] =
2561 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2562 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2563 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2564 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2565 CPUID_DE | CPUID_FP87,
2566 .features[FEAT_1_ECX] =
2567 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2568 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2569 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2570 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2571 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2572 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2573 .features[FEAT_8000_0001_EDX] =
2574 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2575 CPUID_EXT2_SYSCALL,
2576 .features[FEAT_8000_0001_ECX] =
2577 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2578 .features[FEAT_8000_0008_EBX] =
2579 CPUID_8000_0008_EBX_WBNOINVD,
2580 .features[FEAT_7_0_EBX] =
2581 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2582 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2583 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2584 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2585 CPUID_7_0_EBX_SMAP,
2586 .features[FEAT_7_0_ECX] =
2587 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2588 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2589 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2590 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2591 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2592 .features[FEAT_7_0_EDX] =
2593 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2594 /* Missing: XSAVES (not supported by some Linux versions,
2595 * including v4.1 to v4.12).
2596 * KVM doesn't yet expose any XSAVES state save component,
2597 * and the only one defined in Skylake (processor tracing)
2598 * probably will block migration anyway.
2599 */
2600 .features[FEAT_XSAVE] =
2601 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2602 CPUID_XSAVE_XGETBV1,
2603 .features[FEAT_6_EAX] =
2604 CPUID_6_EAX_ARAT,
2605 .xlevel = 0x80000008,
2606 .model_id = "Intel Core Processor (Icelake)",
2607 },
2608 {
2609 .name = "Icelake-Server",
2610 .level = 0xd,
2611 .vendor = CPUID_VENDOR_INTEL,
2612 .family = 6,
2613 .model = 134,
2614 .stepping = 0,
2615 .features[FEAT_1_EDX] =
2616 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2617 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2618 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2619 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2620 CPUID_DE | CPUID_FP87,
2621 .features[FEAT_1_ECX] =
2622 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2623 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2624 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2625 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2626 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2627 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2628 .features[FEAT_8000_0001_EDX] =
2629 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2630 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2631 .features[FEAT_8000_0001_ECX] =
2632 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2633 .features[FEAT_8000_0008_EBX] =
2634 CPUID_8000_0008_EBX_WBNOINVD,
2635 .features[FEAT_7_0_EBX] =
2636 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2637 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2638 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2639 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2640 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2641 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2642 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2643 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2644 .features[FEAT_7_0_ECX] =
2645 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2646 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2647 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2648 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2649 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2650 .features[FEAT_7_0_EDX] =
2651 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2652 /* Missing: XSAVES (not supported by some Linux versions,
2653 * including v4.1 to v4.12).
2654 * KVM doesn't yet expose any XSAVES state save component,
2655 * and the only one defined in Skylake (processor tracing)
2656 * probably will block migration anyway.
2657 */
2658 .features[FEAT_XSAVE] =
2659 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2660 CPUID_XSAVE_XGETBV1,
2661 .features[FEAT_6_EAX] =
2662 CPUID_6_EAX_ARAT,
2663 .xlevel = 0x80000008,
2664 .model_id = "Intel Xeon Processor (Icelake)",
2665 },
2666 {
2667 .name = "KnightsMill",
2668 .level = 0xd,
2669 .vendor = CPUID_VENDOR_INTEL,
2670 .family = 6,
2671 .model = 133,
2672 .stepping = 0,
2673 .features[FEAT_1_EDX] =
2674 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2675 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2676 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2677 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2678 CPUID_PSE | CPUID_DE | CPUID_FP87,
2679 .features[FEAT_1_ECX] =
2680 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2681 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2682 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2683 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2684 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2685 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2686 .features[FEAT_8000_0001_EDX] =
2687 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2688 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2689 .features[FEAT_8000_0001_ECX] =
2690 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2691 .features[FEAT_7_0_EBX] =
2692 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2693 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2694 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2695 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2696 CPUID_7_0_EBX_AVX512ER,
2697 .features[FEAT_7_0_ECX] =
2698 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2699 .features[FEAT_7_0_EDX] =
2700 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2701 .features[FEAT_XSAVE] =
2702 CPUID_XSAVE_XSAVEOPT,
2703 .features[FEAT_6_EAX] =
2704 CPUID_6_EAX_ARAT,
2705 .xlevel = 0x80000008,
2706 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2707 },
2708 {
2709 .name = "Opteron_G1",
2710 .level = 5,
2711 .vendor = CPUID_VENDOR_AMD,
2712 .family = 15,
2713 .model = 6,
2714 .stepping = 1,
2715 .features[FEAT_1_EDX] =
2716 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2717 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2718 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2719 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2720 CPUID_DE | CPUID_FP87,
2721 .features[FEAT_1_ECX] =
2722 CPUID_EXT_SSE3,
2723 .features[FEAT_8000_0001_EDX] =
2724 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2725 .xlevel = 0x80000008,
2726 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2727 },
2728 {
2729 .name = "Opteron_G2",
2730 .level = 5,
2731 .vendor = CPUID_VENDOR_AMD,
2732 .family = 15,
2733 .model = 6,
2734 .stepping = 1,
2735 .features[FEAT_1_EDX] =
2736 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2737 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2738 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2739 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2740 CPUID_DE | CPUID_FP87,
2741 .features[FEAT_1_ECX] =
2742 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2743 .features[FEAT_8000_0001_EDX] =
2744 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2745 .features[FEAT_8000_0001_ECX] =
2746 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2747 .xlevel = 0x80000008,
2748 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2749 },
2750 {
2751 .name = "Opteron_G3",
2752 .level = 5,
2753 .vendor = CPUID_VENDOR_AMD,
2754 .family = 16,
2755 .model = 2,
2756 .stepping = 3,
2757 .features[FEAT_1_EDX] =
2758 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2759 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2760 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2761 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2762 CPUID_DE | CPUID_FP87,
2763 .features[FEAT_1_ECX] =
2764 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2765 CPUID_EXT_SSE3,
2766 .features[FEAT_8000_0001_EDX] =
2767 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2768 CPUID_EXT2_RDTSCP,
2769 .features[FEAT_8000_0001_ECX] =
2770 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2771 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2772 .xlevel = 0x80000008,
2773 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2774 },
2775 {
2776 .name = "Opteron_G4",
2777 .level = 0xd,
2778 .vendor = CPUID_VENDOR_AMD,
2779 .family = 21,
2780 .model = 1,
2781 .stepping = 2,
2782 .features[FEAT_1_EDX] =
2783 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2784 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2785 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2786 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2787 CPUID_DE | CPUID_FP87,
2788 .features[FEAT_1_ECX] =
2789 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2790 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2791 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2792 CPUID_EXT_SSE3,
2793 .features[FEAT_8000_0001_EDX] =
2794 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2795 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2796 .features[FEAT_8000_0001_ECX] =
2797 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2798 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2799 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2800 CPUID_EXT3_LAHF_LM,
2801 .features[FEAT_SVM] =
2802 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2803 /* no xsaveopt! */
2804 .xlevel = 0x8000001A,
2805 .model_id = "AMD Opteron 62xx class CPU",
2806 },
2807 {
2808 .name = "Opteron_G5",
2809 .level = 0xd,
2810 .vendor = CPUID_VENDOR_AMD,
2811 .family = 21,
2812 .model = 2,
2813 .stepping = 0,
2814 .features[FEAT_1_EDX] =
2815 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2816 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2817 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2818 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2819 CPUID_DE | CPUID_FP87,
2820 .features[FEAT_1_ECX] =
2821 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2822 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2823 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2824 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2825 .features[FEAT_8000_0001_EDX] =
2826 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2827 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2828 .features[FEAT_8000_0001_ECX] =
2829 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2830 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2831 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2832 CPUID_EXT3_LAHF_LM,
2833 .features[FEAT_SVM] =
2834 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2835 /* no xsaveopt! */
2836 .xlevel = 0x8000001A,
2837 .model_id = "AMD Opteron 63xx class CPU",
2838 },
2839 {
2840 .name = "EPYC",
2841 .level = 0xd,
2842 .vendor = CPUID_VENDOR_AMD,
2843 .family = 23,
2844 .model = 1,
2845 .stepping = 2,
2846 .features[FEAT_1_EDX] =
2847 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2848 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2849 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2850 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2851 CPUID_VME | CPUID_FP87,
2852 .features[FEAT_1_ECX] =
2853 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2854 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2855 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2856 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2857 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2858 .features[FEAT_8000_0001_EDX] =
2859 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2860 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2861 CPUID_EXT2_SYSCALL,
2862 .features[FEAT_8000_0001_ECX] =
2863 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2864 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2865 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2866 CPUID_EXT3_TOPOEXT,
2867 .features[FEAT_7_0_EBX] =
2868 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2869 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2870 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2871 CPUID_7_0_EBX_SHA_NI,
2872 /* Missing: XSAVES (not supported by some Linux versions,
2873 * including v4.1 to v4.12).
2874 * KVM doesn't yet expose any XSAVES state save component.
2875 */
2876 .features[FEAT_XSAVE] =
2877 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2878 CPUID_XSAVE_XGETBV1,
2879 .features[FEAT_6_EAX] =
2880 CPUID_6_EAX_ARAT,
2881 .features[FEAT_SVM] =
2882 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2883 .xlevel = 0x8000001E,
2884 .model_id = "AMD EPYC Processor",
2885 .cache_info = &epyc_cache_info,
2886 },
2887 {
2888 .name = "EPYC-IBPB",
2889 .level = 0xd,
2890 .vendor = CPUID_VENDOR_AMD,
2891 .family = 23,
2892 .model = 1,
2893 .stepping = 2,
2894 .features[FEAT_1_EDX] =
2895 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2896 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2897 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2898 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2899 CPUID_VME | CPUID_FP87,
2900 .features[FEAT_1_ECX] =
2901 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2902 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2903 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2904 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2905 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2906 .features[FEAT_8000_0001_EDX] =
2907 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2908 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2909 CPUID_EXT2_SYSCALL,
2910 .features[FEAT_8000_0001_ECX] =
2911 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2912 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2913 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2914 CPUID_EXT3_TOPOEXT,
2915 .features[FEAT_8000_0008_EBX] =
2916 CPUID_8000_0008_EBX_IBPB,
2917 .features[FEAT_7_0_EBX] =
2918 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2919 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2920 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2921 CPUID_7_0_EBX_SHA_NI,
2922 /* Missing: XSAVES (not supported by some Linux versions,
2923 * including v4.1 to v4.12).
2924 * KVM doesn't yet expose any XSAVES state save component.
2925 */
2926 .features[FEAT_XSAVE] =
2927 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2928 CPUID_XSAVE_XGETBV1,
2929 .features[FEAT_6_EAX] =
2930 CPUID_6_EAX_ARAT,
2931 .features[FEAT_SVM] =
2932 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2933 .xlevel = 0x8000001E,
2934 .model_id = "AMD EPYC Processor (with IBPB)",
2935 .cache_info = &epyc_cache_info,
2936 },
2937 };
2938
2939 typedef struct PropValue {
2940 const char *prop, *value;
2941 } PropValue;
2942
2943 /* KVM-specific features that are automatically added/removed
2944 * from all CPU models when KVM is enabled.
2945 */
2946 static PropValue kvm_default_props[] = {
2947 { "kvmclock", "on" },
2948 { "kvm-nopiodelay", "on" },
2949 { "kvm-asyncpf", "on" },
2950 { "kvm-steal-time", "on" },
2951 { "kvm-pv-eoi", "on" },
2952 { "kvmclock-stable-bit", "on" },
2953 { "x2apic", "on" },
2954 { "acpi", "off" },
2955 { "monitor", "off" },
2956 { "svm", "off" },
2957 { NULL, NULL },
2958 };
2959
2960 /* TCG-specific defaults that override all CPU models when using TCG
2961 */
2962 static PropValue tcg_default_props[] = {
2963 { "vme", "off" },
2964 { NULL, NULL },
2965 };
2966
2967
2968 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2969 {
2970 PropValue *pv;
2971 for (pv = kvm_default_props; pv->prop; pv++) {
2972 if (!strcmp(pv->prop, prop)) {
2973 pv->value = value;
2974 break;
2975 }
2976 }
2977
2978 /* It is valid to call this function only for properties that
2979 * are already present in the kvm_default_props table.
2980 */
2981 assert(pv->prop);
2982 }
2983
2984 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2985 bool migratable_only);
2986
2987 static bool lmce_supported(void)
2988 {
2989 uint64_t mce_cap = 0;
2990
2991 #ifdef CONFIG_KVM
2992 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2993 return false;
2994 }
2995 #endif
2996
2997 return !!(mce_cap & MCG_LMCE_P);
2998 }
2999
3000 #define CPUID_MODEL_ID_SZ 48
3001
3002 /**
3003 * cpu_x86_fill_model_id:
3004 * Get CPUID model ID string from host CPU.
3005 *
3006 * @str should have at least CPUID_MODEL_ID_SZ bytes
3007 *
3008 * The function does NOT add a null terminator to the string
3009 * automatically.
3010 */
3011 static int cpu_x86_fill_model_id(char *str)
3012 {
3013 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3014 int i;
3015
3016 for (i = 0; i < 3; i++) {
3017 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3018 memcpy(str + i * 16 + 0, &eax, 4);
3019 memcpy(str + i * 16 + 4, &ebx, 4);
3020 memcpy(str + i * 16 + 8, &ecx, 4);
3021 memcpy(str + i * 16 + 12, &edx, 4);
3022 }
3023 return 0;
3024 }
3025
3026 static Property max_x86_cpu_properties[] = {
3027 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3028 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3029 DEFINE_PROP_END_OF_LIST()
3030 };
3031
3032 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3033 {
3034 DeviceClass *dc = DEVICE_CLASS(oc);
3035 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3036
3037 xcc->ordering = 9;
3038
3039 xcc->model_description =
3040 "Enables all features supported by the accelerator in the current host";
3041
3042 dc->props = max_x86_cpu_properties;
3043 }
3044
3045 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3046
3047 static void max_x86_cpu_initfn(Object *obj)
3048 {
3049 X86CPU *cpu = X86_CPU(obj);
3050 CPUX86State *env = &cpu->env;
3051 KVMState *s = kvm_state;
3052
3053 /* We can't fill the features array here because we don't know yet if
3054 * "migratable" is true or false.
3055 */
3056 cpu->max_features = true;
3057
3058 if (accel_uses_host_cpuid()) {
3059 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3060 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3061 int family, model, stepping;
3062 X86CPUDefinition host_cpudef = { };
3063 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3064
3065 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
3066 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
3067
3068 host_vendor_fms(vendor, &family, &model, &stepping);
3069
3070 cpu_x86_fill_model_id(model_id);
3071
3072 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3073 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3074 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3075 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3076 &error_abort);
3077 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3078 &error_abort);
3079
3080 if (kvm_enabled()) {
3081 env->cpuid_min_level =
3082 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3083 env->cpuid_min_xlevel =
3084 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3085 env->cpuid_min_xlevel2 =
3086 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3087 } else {
3088 env->cpuid_min_level =
3089 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3090 env->cpuid_min_xlevel =
3091 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3092 env->cpuid_min_xlevel2 =
3093 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3094 }
3095
3096 if (lmce_supported()) {
3097 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3098 }
3099 } else {
3100 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3101 "vendor", &error_abort);
3102 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3103 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3104 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3105 object_property_set_str(OBJECT(cpu),
3106 "QEMU TCG CPU version " QEMU_HW_VERSION,
3107 "model-id", &error_abort);
3108 }
3109
3110 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3111 }
3112
3113 static const TypeInfo max_x86_cpu_type_info = {
3114 .name = X86_CPU_TYPE_NAME("max"),
3115 .parent = TYPE_X86_CPU,
3116 .instance_init = max_x86_cpu_initfn,
3117 .class_init = max_x86_cpu_class_init,
3118 };
3119
3120 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3121 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3122 {
3123 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3124
3125 xcc->host_cpuid_required = true;
3126 xcc->ordering = 8;
3127
3128 #if defined(CONFIG_KVM)
3129 xcc->model_description =
3130 "KVM processor with all supported host features ";
3131 #elif defined(CONFIG_HVF)
3132 xcc->model_description =
3133 "HVF processor with all supported host features ";
3134 #endif
3135 }
3136
3137 static const TypeInfo host_x86_cpu_type_info = {
3138 .name = X86_CPU_TYPE_NAME("host"),
3139 .parent = X86_CPU_TYPE_NAME("max"),
3140 .class_init = host_x86_cpu_class_init,
3141 };
3142
3143 #endif
3144
3145 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3146 {
3147 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3148
3149 switch (f->type) {
3150 case CPUID_FEATURE_WORD:
3151 {
3152 const char *reg = get_register_name_32(f->cpuid.reg);
3153 assert(reg);
3154 return g_strdup_printf("CPUID.%02XH:%s",
3155 f->cpuid.eax, reg);
3156 }
3157 case MSR_FEATURE_WORD:
3158 return g_strdup_printf("MSR(%02XH)",
3159 f->msr.index);
3160 }
3161
3162 return NULL;
3163 }
3164
3165 static void report_unavailable_features(FeatureWord w, uint32_t mask)
3166 {
3167 FeatureWordInfo *f = &feature_word_info[w];
3168 int i;
3169 char *feat_word_str;
3170
3171 for (i = 0; i < 32; ++i) {
3172 if ((1UL << i) & mask) {
3173 feat_word_str = feature_word_description(f, i);
3174 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3175 accel_uses_host_cpuid() ? "host" : "TCG",
3176 feat_word_str,
3177 f->feat_names[i] ? "." : "",
3178 f->feat_names[i] ? f->feat_names[i] : "", i);
3179 g_free(feat_word_str);
3180 }
3181 }
3182 }
3183
3184 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3185 const char *name, void *opaque,
3186 Error **errp)
3187 {
3188 X86CPU *cpu = X86_CPU(obj);
3189 CPUX86State *env = &cpu->env;
3190 int64_t value;
3191
3192 value = (env->cpuid_version >> 8) & 0xf;
3193 if (value == 0xf) {
3194 value += (env->cpuid_version >> 20) & 0xff;
3195 }
3196 visit_type_int(v, name, &value, errp);
3197 }
3198
3199 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3200 const char *name, void *opaque,
3201 Error **errp)
3202 {
3203 X86CPU *cpu = X86_CPU(obj);
3204 CPUX86State *env = &cpu->env;
3205 const int64_t min = 0;
3206 const int64_t max = 0xff + 0xf;
3207 Error *local_err = NULL;
3208 int64_t value;
3209
3210 visit_type_int(v, name, &value, &local_err);
3211 if (local_err) {
3212 error_propagate(errp, local_err);
3213 return;
3214 }
3215 if (value < min || value > max) {
3216 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3217 name ? name : "null", value, min, max);
3218 return;
3219 }
3220
3221 env->cpuid_version &= ~0xff00f00;
3222 if (value > 0x0f) {
3223 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3224 } else {
3225 env->cpuid_version |= value << 8;
3226 }
3227 }
3228
3229 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3230 const char *name, void *opaque,
3231 Error **errp)
3232 {
3233 X86CPU *cpu = X86_CPU(obj);
3234 CPUX86State *env = &cpu->env;
3235 int64_t value;
3236
3237 value = (env->cpuid_version >> 4) & 0xf;
3238 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3239 visit_type_int(v, name, &value, errp);
3240 }
3241
3242 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3243 const char *name, void *opaque,
3244 Error **errp)
3245 {
3246 X86CPU *cpu = X86_CPU(obj);
3247 CPUX86State *env = &cpu->env;
3248 const int64_t min = 0;
3249 const int64_t max = 0xff;
3250 Error *local_err = NULL;
3251 int64_t value;
3252
3253 visit_type_int(v, name, &value, &local_err);
3254 if (local_err) {
3255 error_propagate(errp, local_err);
3256 return;
3257 }
3258 if (value < min || value > max) {
3259 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3260 name ? name : "null", value, min, max);
3261 return;
3262 }
3263
3264 env->cpuid_version &= ~0xf00f0;
3265 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3266 }
3267
3268 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3269 const char *name, void *opaque,
3270 Error **errp)
3271 {
3272 X86CPU *cpu = X86_CPU(obj);
3273 CPUX86State *env = &cpu->env;
3274 int64_t value;
3275
3276 value = env->cpuid_version & 0xf;
3277 visit_type_int(v, name, &value, errp);
3278 }
3279
3280 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3281 const char *name, void *opaque,
3282 Error **errp)
3283 {
3284 X86CPU *cpu = X86_CPU(obj);
3285 CPUX86State *env = &cpu->env;
3286 const int64_t min = 0;
3287 const int64_t max = 0xf;
3288 Error *local_err = NULL;
3289 int64_t value;
3290
3291 visit_type_int(v, name, &value, &local_err);
3292 if (local_err) {
3293 error_propagate(errp, local_err);
3294 return;
3295 }
3296 if (value < min || value > max) {
3297 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3298 name ? name : "null", value, min, max);
3299 return;
3300 }
3301
3302 env->cpuid_version &= ~0xf;
3303 env->cpuid_version |= value & 0xf;
3304 }
3305
3306 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3307 {
3308 X86CPU *cpu = X86_CPU(obj);
3309 CPUX86State *env = &cpu->env;
3310 char *value;
3311
3312 value = g_malloc(CPUID_VENDOR_SZ + 1);
3313 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3314 env->cpuid_vendor3);
3315 return value;
3316 }
3317
3318 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3319 Error **errp)
3320 {
3321 X86CPU *cpu = X86_CPU(obj);
3322 CPUX86State *env = &cpu->env;
3323 int i;
3324
3325 if (strlen(value) != CPUID_VENDOR_SZ) {
3326 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3327 return;
3328 }
3329
3330 env->cpuid_vendor1 = 0;
3331 env->cpuid_vendor2 = 0;
3332 env->cpuid_vendor3 = 0;
3333 for (i = 0; i < 4; i++) {
3334 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3335 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3336 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3337 }
3338 }
3339
3340 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3341 {
3342 X86CPU *cpu = X86_CPU(obj);
3343 CPUX86State *env = &cpu->env;
3344 char *value;
3345 int i;
3346
3347 value = g_malloc(48 + 1);
3348 for (i = 0; i < 48; i++) {
3349 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3350 }
3351 value[48] = '\0';
3352 return value;
3353 }
3354
3355 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3356 Error **errp)
3357 {
3358 X86CPU *cpu = X86_CPU(obj);
3359 CPUX86State *env = &cpu->env;
3360 int c, len, i;
3361
3362 if (model_id == NULL) {
3363 model_id = "";
3364 }
3365 len = strlen(model_id);
3366 memset(env->cpuid_model, 0, 48);
3367 for (i = 0; i < 48; i++) {
3368 if (i >= len) {
3369 c = '\0';
3370 } else {
3371 c = (uint8_t)model_id[i];
3372 }
3373 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3374 }
3375 }
3376
3377 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3378 void *opaque, Error **errp)
3379 {
3380 X86CPU *cpu = X86_CPU(obj);
3381 int64_t value;
3382
3383 value = cpu->env.tsc_khz * 1000;
3384 visit_type_int(v, name, &value, errp);
3385 }
3386
3387 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3388 void *opaque, Error **errp)
3389 {
3390 X86CPU *cpu = X86_CPU(obj);
3391 const int64_t min = 0;
3392 const int64_t max = INT64_MAX;
3393 Error *local_err = NULL;
3394 int64_t value;
3395
3396 visit_type_int(v, name, &value, &local_err);
3397 if (local_err) {
3398 error_propagate(errp, local_err);
3399 return;
3400 }
3401 if (value < min || value > max) {
3402 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3403 name ? name : "null", value, min, max);
3404 return;
3405 }
3406
3407 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3408 }
3409
3410 /* Generic getter for "feature-words" and "filtered-features" properties */
3411 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3412 const char *name, void *opaque,
3413 Error **errp)
3414 {
3415 uint32_t *array = (uint32_t *)opaque;
3416 FeatureWord w;
3417 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3418 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3419 X86CPUFeatureWordInfoList *list = NULL;
3420
3421 for (w = 0; w < FEATURE_WORDS; w++) {
3422 FeatureWordInfo *wi = &feature_word_info[w];
3423 /*
3424 * We didn't have MSR features when "feature-words" was
3425 * introduced. Therefore skipped other type entries.
3426 */
3427 if (wi->type != CPUID_FEATURE_WORD) {
3428 continue;
3429 }
3430 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3431 qwi->cpuid_input_eax = wi->cpuid.eax;
3432 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3433 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3434 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3435 qwi->features = array[w];
3436
3437 /* List will be in reverse order, but order shouldn't matter */
3438 list_entries[w].next = list;
3439 list_entries[w].value = &word_infos[w];
3440 list = &list_entries[w];
3441 }
3442
3443 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3444 }
3445
3446 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3447 void *opaque, Error **errp)
3448 {
3449 X86CPU *cpu = X86_CPU(obj);
3450 int64_t value = cpu->hyperv_spinlock_attempts;
3451
3452 visit_type_int(v, name, &value, errp);
3453 }
3454
3455 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3456 void *opaque, Error **errp)
3457 {
3458 const int64_t min = 0xFFF;
3459 const int64_t max = UINT_MAX;
3460 X86CPU *cpu = X86_CPU(obj);
3461 Error *err = NULL;
3462 int64_t value;
3463
3464 visit_type_int(v, name, &value, &err);
3465 if (err) {
3466 error_propagate(errp, err);
3467 return;
3468 }
3469
3470 if (value < min || value > max) {
3471 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3472 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3473 object_get_typename(obj), name ? name : "null",
3474 value, min, max);
3475 return;
3476 }
3477 cpu->hyperv_spinlock_attempts = value;
3478 }
3479
3480 static const PropertyInfo qdev_prop_spinlocks = {
3481 .name = "int",
3482 .get = x86_get_hv_spinlocks,
3483 .set = x86_set_hv_spinlocks,
3484 };
3485
3486 /* Convert all '_' in a feature string option name to '-', to make feature
3487 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3488 */
3489 static inline void feat2prop(char *s)
3490 {
3491 while ((s = strchr(s, '_'))) {
3492 *s = '-';
3493 }
3494 }
3495
3496 /* Return the feature property name for a feature flag bit */
3497 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3498 {
3499 /* XSAVE components are automatically enabled by other features,
3500 * so return the original feature name instead
3501 */
3502 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3503 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3504
3505 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3506 x86_ext_save_areas[comp].bits) {
3507 w = x86_ext_save_areas[comp].feature;
3508 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3509 }
3510 }
3511
3512 assert(bitnr < 32);
3513 assert(w < FEATURE_WORDS);
3514 return feature_word_info[w].feat_names[bitnr];
3515 }
3516
3517 /* Compatibily hack to maintain legacy +-feat semantic,
3518 * where +-feat overwrites any feature set by
3519 * feat=on|feat even if the later is parsed after +-feat
3520 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3521 */
3522 static GList *plus_features, *minus_features;
3523
3524 static gint compare_string(gconstpointer a, gconstpointer b)
3525 {
3526 return g_strcmp0(a, b);
3527 }
3528
3529 /* Parse "+feature,-feature,feature=foo" CPU feature string
3530 */
3531 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3532 Error **errp)
3533 {
3534 char *featurestr; /* Single 'key=value" string being parsed */
3535 static bool cpu_globals_initialized;
3536 bool ambiguous = false;
3537
3538 if (cpu_globals_initialized) {
3539 return;
3540 }
3541 cpu_globals_initialized = true;
3542
3543 if (!features) {
3544 return;
3545 }
3546
3547 for (featurestr = strtok(features, ",");
3548 featurestr;
3549 featurestr = strtok(NULL, ",")) {
3550 const char *name;
3551 const char *val = NULL;
3552 char *eq = NULL;
3553 char num[32];
3554 GlobalProperty *prop;
3555
3556 /* Compatibility syntax: */
3557 if (featurestr[0] == '+') {
3558 plus_features = g_list_append(plus_features,
3559 g_strdup(featurestr + 1));
3560 continue;
3561 } else if (featurestr[0] == '-') {
3562 minus_features = g_list_append(minus_features,
3563 g_strdup(featurestr + 1));
3564 continue;
3565 }
3566
3567 eq = strchr(featurestr, '=');
3568 if (eq) {
3569 *eq++ = 0;
3570 val = eq;
3571 } else {
3572 val = "on";
3573 }
3574
3575 feat2prop(featurestr);
3576 name = featurestr;
3577
3578 if (g_list_find_custom(plus_features, name, compare_string)) {
3579 warn_report("Ambiguous CPU model string. "
3580 "Don't mix both \"+%s\" and \"%s=%s\"",
3581 name, name, val);
3582 ambiguous = true;
3583 }
3584 if (g_list_find_custom(minus_features, name, compare_string)) {
3585 warn_report("Ambiguous CPU model string. "
3586 "Don't mix both \"-%s\" and \"%s=%s\"",
3587 name, name, val);
3588 ambiguous = true;
3589 }
3590
3591 /* Special case: */
3592 if (!strcmp(name, "tsc-freq")) {
3593 int ret;
3594 uint64_t tsc_freq;
3595
3596 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3597 if (ret < 0 || tsc_freq > INT64_MAX) {
3598 error_setg(errp, "bad numerical value %s", val);
3599 return;
3600 }
3601 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3602 val = num;
3603 name = "tsc-frequency";
3604 }
3605
3606 prop = g_new0(typeof(*prop), 1);
3607 prop->driver = typename;
3608 prop->property = g_strdup(name);
3609 prop->value = g_strdup(val);
3610 qdev_prop_register_global(prop);
3611 }
3612
3613 if (ambiguous) {
3614 warn_report("Compatibility of ambiguous CPU model "
3615 "strings won't be kept on future QEMU versions");
3616 }
3617 }
3618
3619 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3620 static int x86_cpu_filter_features(X86CPU *cpu);
3621
3622 /* Check for missing features that may prevent the CPU class from
3623 * running using the current machine and accelerator.
3624 */
3625 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3626 strList **missing_feats)
3627 {
3628 X86CPU *xc;
3629 FeatureWord w;
3630 Error *err = NULL;
3631 strList **next = missing_feats;
3632
3633 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3634 strList *new = g_new0(strList, 1);
3635 new->value = g_strdup("kvm");
3636 *missing_feats = new;
3637 return;
3638 }
3639
3640 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3641
3642 x86_cpu_expand_features(xc, &err);
3643 if (err) {
3644 /* Errors at x86_cpu_expand_features should never happen,
3645 * but in case it does, just report the model as not
3646 * runnable at all using the "type" property.
3647 */
3648 strList *new = g_new0(strList, 1);
3649 new->value = g_strdup("type");
3650 *next = new;
3651 next = &new->next;
3652 }
3653
3654 x86_cpu_filter_features(xc);
3655
3656 for (w = 0; w < FEATURE_WORDS; w++) {
3657 uint32_t filtered = xc->filtered_features[w];
3658 int i;
3659 for (i = 0; i < 32; i++) {
3660 if (filtered & (1UL << i)) {
3661 strList *new = g_new0(strList, 1);
3662 new->value = g_strdup(x86_cpu_feature_name(w, i));
3663 *next = new;
3664 next = &new->next;
3665 }
3666 }
3667 }
3668
3669 object_unref(OBJECT(xc));
3670 }
3671
3672 /* Print all cpuid feature names in featureset
3673 */
3674 static void listflags(FILE *f, fprintf_function print, GList *features)
3675 {
3676 size_t len = 0;
3677 GList *tmp;
3678
3679 for (tmp = features; tmp; tmp = tmp->next) {
3680 const char *name = tmp->data;
3681 if ((len + strlen(name) + 1) >= 75) {
3682 print(f, "\n");
3683 len = 0;
3684 }
3685 print(f, "%s%s", len == 0 ? " " : " ", name);
3686 len += strlen(name) + 1;
3687 }
3688 print(f, "\n");
3689 }
3690
3691 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3692 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3693 {
3694 ObjectClass *class_a = (ObjectClass *)a;
3695 ObjectClass *class_b = (ObjectClass *)b;
3696 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3697 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3698 char *name_a, *name_b;
3699 int ret;
3700
3701 if (cc_a->ordering != cc_b->ordering) {
3702 ret = cc_a->ordering - cc_b->ordering;
3703 } else {
3704 name_a = x86_cpu_class_get_model_name(cc_a);
3705 name_b = x86_cpu_class_get_model_name(cc_b);
3706 ret = strcmp(name_a, name_b);
3707 g_free(name_a);
3708 g_free(name_b);
3709 }
3710 return ret;
3711 }
3712
3713 static GSList *get_sorted_cpu_model_list(void)
3714 {
3715 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3716 list = g_slist_sort(list, x86_cpu_list_compare);
3717 return list;
3718 }
3719
3720 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3721 {
3722 ObjectClass *oc = data;
3723 X86CPUClass *cc = X86_CPU_CLASS(oc);
3724 CPUListState *s = user_data;
3725 char *name = x86_cpu_class_get_model_name(cc);
3726 const char *desc = cc->model_description;
3727 if (!desc && cc->cpu_def) {
3728 desc = cc->cpu_def->model_id;
3729 }
3730
3731 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3732 name, desc);
3733 g_free(name);
3734 }
3735
3736 /* list available CPU models and flags */
3737 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3738 {
3739 int i, j;
3740 CPUListState s = {
3741 .file = f,
3742 .cpu_fprintf = cpu_fprintf,
3743 };
3744 GSList *list;
3745 GList *names = NULL;
3746
3747 (*cpu_fprintf)(f, "Available CPUs:\n");
3748 list = get_sorted_cpu_model_list();
3749 g_slist_foreach(list, x86_cpu_list_entry, &s);
3750 g_slist_free(list);
3751
3752 names = NULL;
3753 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3754 FeatureWordInfo *fw = &feature_word_info[i];
3755 for (j = 0; j < 32; j++) {
3756 if (fw->feat_names[j]) {
3757 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3758 }
3759 }
3760 }
3761
3762 names = g_list_sort(names, (GCompareFunc)strcmp);
3763
3764 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3765 listflags(f, cpu_fprintf, names);
3766 (*cpu_fprintf)(f, "\n");
3767 g_list_free(names);
3768 }
3769
3770 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3771 {
3772 ObjectClass *oc = data;
3773 X86CPUClass *cc = X86_CPU_CLASS(oc);
3774 CpuDefinitionInfoList **cpu_list = user_data;
3775 CpuDefinitionInfoList *entry;
3776 CpuDefinitionInfo *info;
3777
3778 info = g_malloc0(sizeof(*info));
3779 info->name = x86_cpu_class_get_model_name(cc);
3780 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3781 info->has_unavailable_features = true;
3782 info->q_typename = g_strdup(object_class_get_name(oc));
3783 info->migration_safe = cc->migration_safe;
3784 info->has_migration_safe = true;
3785 info->q_static = cc->static_model;
3786
3787 entry = g_malloc0(sizeof(*entry));
3788 entry->value = info;
3789 entry->next = *cpu_list;
3790 *cpu_list = entry;
3791 }
3792
3793 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3794 {
3795 CpuDefinitionInfoList *cpu_list = NULL;
3796 GSList *list = get_sorted_cpu_model_list();
3797 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3798 g_slist_free(list);
3799 return cpu_list;
3800 }
3801
3802 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3803 bool migratable_only)
3804 {
3805 FeatureWordInfo *wi = &feature_word_info[w];
3806 uint32_t r = 0;
3807
3808 if (kvm_enabled()) {
3809 switch (wi->type) {
3810 case CPUID_FEATURE_WORD:
3811 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3812 wi->cpuid.ecx,
3813 wi->cpuid.reg);
3814 break;
3815 case MSR_FEATURE_WORD:
3816 r = kvm_arch_get_supported_msr_feature(kvm_state,
3817 wi->msr.index);
3818 break;
3819 }
3820 } else if (hvf_enabled()) {
3821 if (wi->type != CPUID_FEATURE_WORD) {
3822 return 0;
3823 }
3824 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3825 wi->cpuid.ecx,
3826 wi->cpuid.reg);
3827 } else if (tcg_enabled()) {
3828 r = wi->tcg_features;
3829 } else {
3830 return ~0;
3831 }
3832 if (migratable_only) {
3833 r &= x86_cpu_get_migratable_flags(w);
3834 }
3835 return r;
3836 }
3837
3838 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3839 {
3840 FeatureWord w;
3841
3842 for (w = 0; w < FEATURE_WORDS; w++) {
3843 report_unavailable_features(w, cpu->filtered_features[w]);
3844 }
3845 }
3846
3847 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3848 {
3849 PropValue *pv;
3850 for (pv = props; pv->prop; pv++) {
3851 if (!pv->value) {
3852 continue;
3853 }
3854 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3855 &error_abort);
3856 }
3857 }
3858
3859 /* Load data from X86CPUDefinition into a X86CPU object
3860 */
3861 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3862 {
3863 CPUX86State *env = &cpu->env;
3864 const char *vendor;
3865 char host_vendor[CPUID_VENDOR_SZ + 1];
3866 FeatureWord w;
3867
3868 /*NOTE: any property set by this function should be returned by
3869 * x86_cpu_static_props(), so static expansion of
3870 * query-cpu-model-expansion is always complete.
3871 */
3872
3873 /* CPU models only set _minimum_ values for level/xlevel: */
3874 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3875 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3876
3877 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3878 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3879 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3880 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3881 for (w = 0; w < FEATURE_WORDS; w++) {
3882 env->features[w] = def->features[w];
3883 }
3884
3885 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3886 cpu->legacy_cache = !def->cache_info;
3887
3888 /* Special cases not set in the X86CPUDefinition structs: */
3889 /* TODO: in-kernel irqchip for hvf */
3890 if (kvm_enabled()) {
3891 if (!kvm_irqchip_in_kernel()) {
3892 x86_cpu_change_kvm_default("x2apic", "off");
3893 }
3894
3895 x86_cpu_apply_props(cpu, kvm_default_props);
3896 } else if (tcg_enabled()) {
3897 x86_cpu_apply_props(cpu, tcg_default_props);
3898 }
3899
3900 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3901
3902 /* sysenter isn't supported in compatibility mode on AMD,
3903 * syscall isn't supported in compatibility mode on Intel.
3904 * Normally we advertise the actual CPU vendor, but you can
3905 * override this using the 'vendor' property if you want to use
3906 * KVM's sysenter/syscall emulation in compatibility mode and
3907 * when doing cross vendor migration
3908 */
3909 vendor = def->vendor;
3910 if (accel_uses_host_cpuid()) {
3911 uint32_t ebx = 0, ecx = 0, edx = 0;
3912 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3913 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3914 vendor = host_vendor;
3915 }
3916
3917 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3918
3919 }
3920
3921 /* Return a QDict containing keys for all properties that can be included
3922 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3923 * must be included in the dictionary.
3924 */
3925 static QDict *x86_cpu_static_props(void)
3926 {
3927 FeatureWord w;
3928 int i;
3929 static const char *props[] = {
3930 "min-level",
3931 "min-xlevel",
3932 "family",
3933 "model",
3934 "stepping",
3935 "model-id",
3936 "vendor",
3937 "lmce",
3938 NULL,
3939 };
3940 static QDict *d;
3941
3942 if (d) {
3943 return d;
3944 }
3945
3946 d = qdict_new();
3947 for (i = 0; props[i]; i++) {
3948 qdict_put_null(d, props[i]);
3949 }
3950
3951 for (w = 0; w < FEATURE_WORDS; w++) {
3952 FeatureWordInfo *fi = &feature_word_info[w];
3953 int bit;
3954 for (bit = 0; bit < 32; bit++) {
3955 if (!fi->feat_names[bit]) {
3956 continue;
3957 }
3958 qdict_put_null(d, fi->feat_names[bit]);
3959 }
3960 }
3961
3962 return d;
3963 }
3964
3965 /* Add an entry to @props dict, with the value for property. */
3966 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3967 {
3968 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3969 &error_abort);
3970
3971 qdict_put_obj(props, prop, value);
3972 }
3973
3974 /* Convert CPU model data from X86CPU object to a property dictionary
3975 * that can recreate exactly the same CPU model.
3976 */
3977 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3978 {
3979 QDict *sprops = x86_cpu_static_props();
3980 const QDictEntry *e;
3981
3982 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3983 const char *prop = qdict_entry_key(e);
3984 x86_cpu_expand_prop(cpu, props, prop);
3985 }
3986 }
3987
3988 /* Convert CPU model data from X86CPU object to a property dictionary
3989 * that can recreate exactly the same CPU model, including every
3990 * writeable QOM property.
3991 */
3992 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3993 {
3994 ObjectPropertyIterator iter;
3995 ObjectProperty *prop;
3996
3997 object_property_iter_init(&iter, OBJECT(cpu));
3998 while ((prop = object_property_iter_next(&iter))) {
3999 /* skip read-only or write-only properties */
4000 if (!prop->get || !prop->set) {
4001 continue;
4002 }
4003
4004 /* "hotplugged" is the only property that is configurable
4005 * on the command-line but will be set differently on CPUs
4006 * created using "-cpu ... -smp ..." and by CPUs created
4007 * on the fly by x86_cpu_from_model() for querying. Skip it.
4008 */
4009 if (!strcmp(prop->name, "hotplugged")) {
4010 continue;
4011 }
4012 x86_cpu_expand_prop(cpu, props, prop->name);
4013 }
4014 }
4015
4016 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4017 {
4018 const QDictEntry *prop;
4019 Error *err = NULL;
4020
4021 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4022 object_property_set_qobject(obj, qdict_entry_value(prop),
4023 qdict_entry_key(prop), &err);
4024 if (err) {
4025 break;
4026 }
4027 }
4028
4029 error_propagate(errp, err);
4030 }
4031
4032 /* Create X86CPU object according to model+props specification */
4033 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4034 {
4035 X86CPU *xc = NULL;
4036 X86CPUClass *xcc;
4037 Error *err = NULL;
4038
4039 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4040 if (xcc == NULL) {
4041 error_setg(&err, "CPU model '%s' not found", model);
4042 goto out;
4043 }
4044
4045 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4046 if (props) {
4047 object_apply_props(OBJECT(xc), props, &err);
4048 if (err) {
4049 goto out;
4050 }
4051 }
4052
4053 x86_cpu_expand_features(xc, &err);
4054 if (err) {
4055 goto out;
4056 }
4057
4058 out:
4059 if (err) {
4060 error_propagate(errp, err);
4061 object_unref(OBJECT(xc));
4062 xc = NULL;
4063 }
4064 return xc;
4065 }
4066
4067 CpuModelExpansionInfo *
4068 arch_query_cpu_model_expansion(CpuModelExpansionType type,
4069 CpuModelInfo *model,
4070 Error **errp)
4071 {
4072 X86CPU *xc = NULL;
4073 Error *err = NULL;
4074 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4075 QDict *props = NULL;
4076 const char *base_name;
4077
4078 xc = x86_cpu_from_model(model->name,
4079 model->has_props ?
4080 qobject_to(QDict, model->props) :
4081 NULL, &err);
4082 if (err) {
4083 goto out;
4084 }
4085
4086 props = qdict_new();
4087 ret->model = g_new0(CpuModelInfo, 1);
4088 ret->model->props = QOBJECT(props);
4089 ret->model->has_props = true;
4090
4091 switch (type) {
4092 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4093 /* Static expansion will be based on "base" only */
4094 base_name = "base";
4095 x86_cpu_to_dict(xc, props);
4096 break;
4097 case CPU_MODEL_EXPANSION_TYPE_FULL:
4098 /* As we don't return every single property, full expansion needs
4099 * to keep the original model name+props, and add extra
4100 * properties on top of that.
4101 */
4102 base_name = model->name;
4103 x86_cpu_to_dict_full(xc, props);
4104 break;
4105 default:
4106 error_setg(&err, "Unsupported expansion type");
4107 goto out;
4108 }
4109
4110 x86_cpu_to_dict(xc, props);
4111
4112 ret->model->name = g_strdup(base_name);
4113
4114 out:
4115 object_unref(OBJECT(xc));
4116 if (err) {
4117 error_propagate(errp, err);
4118 qapi_free_CpuModelExpansionInfo(ret);
4119 ret = NULL;
4120 }
4121 return ret;
4122 }
4123
4124 static gchar *x86_gdb_arch_name(CPUState *cs)
4125 {
4126 #ifdef TARGET_X86_64
4127 return g_strdup("i386:x86-64");
4128 #else
4129 return g_strdup("i386");
4130 #endif
4131 }
4132
4133 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4134 {
4135 X86CPUDefinition *cpudef = data;
4136 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4137
4138 xcc->cpu_def = cpudef;
4139 xcc->migration_safe = true;
4140 }
4141
4142 static void x86_register_cpudef_type(X86CPUDefinition *def)
4143 {
4144 char *typename = x86_cpu_type_name(def->name);
4145 TypeInfo ti = {
4146 .name = typename,
4147 .parent = TYPE_X86_CPU,
4148 .class_init = x86_cpu_cpudef_class_init,
4149 .class_data = def,
4150 };
4151
4152 /* AMD aliases are handled at runtime based on CPUID vendor, so
4153 * they shouldn't be set on the CPU model table.
4154 */
4155 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4156 /* catch mistakes instead of silently truncating model_id when too long */
4157 assert(def->model_id && strlen(def->model_id) <= 48);
4158
4159
4160 type_register(&ti);
4161 g_free(typename);
4162 }
4163
4164 #if !defined(CONFIG_USER_ONLY)
4165
4166 void cpu_clear_apic_feature(CPUX86State *env)
4167 {
4168 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4169 }
4170
4171 #endif /* !CONFIG_USER_ONLY */
4172
4173 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4174 uint32_t *eax, uint32_t *ebx,
4175 uint32_t *ecx, uint32_t *edx)
4176 {
4177 X86CPU *cpu = x86_env_get_cpu(env);
4178 CPUState *cs = CPU(cpu);
4179 uint32_t pkg_offset;
4180 uint32_t limit;
4181 uint32_t signature[3];
4182
4183 /* Calculate & apply limits for different index ranges */
4184 if (index >= 0xC0000000) {
4185 limit = env->cpuid_xlevel2;
4186 } else if (index >= 0x80000000) {
4187 limit = env->cpuid_xlevel;
4188 } else if (index >= 0x40000000) {
4189 limit = 0x40000001;
4190 } else {
4191 limit = env->cpuid_level;
4192 }
4193
4194 if (index > limit) {
4195 /* Intel documentation states that invalid EAX input will
4196 * return the same information as EAX=cpuid_level
4197 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4198 */
4199 index = env->cpuid_level;
4200 }
4201
4202 switch(index) {
4203 case 0:
4204 *eax = env->cpuid_level;
4205 *ebx = env->cpuid_vendor1;
4206 *edx = env->cpuid_vendor2;
4207 *ecx = env->cpuid_vendor3;
4208 break;
4209 case 1:
4210 *eax = env->cpuid_version;
4211 *ebx = (cpu->apic_id << 24) |
4212 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4213 *ecx = env->features[FEAT_1_ECX];
4214 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4215 *ecx |= CPUID_EXT_OSXSAVE;
4216 }
4217 *edx = env->features[FEAT_1_EDX];
4218 if (cs->nr_cores * cs->nr_threads > 1) {
4219 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4220 *edx |= CPUID_HT;
4221 }
4222 break;
4223 case 2:
4224 /* cache info: needed for Pentium Pro compatibility */
4225 if (cpu->cache_info_passthrough) {
4226 host_cpuid(index, 0, eax, ebx, ecx, edx);
4227 break;
4228 }
4229 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4230 *ebx = 0;
4231 if (!cpu->enable_l3_cache) {
4232 *ecx = 0;
4233 } else {
4234 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4235 }
4236 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4237 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4238 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4239 break;
4240 case 4:
4241 /* cache info: needed for Core compatibility */
4242 if (cpu->cache_info_passthrough) {
4243 host_cpuid(index, count, eax, ebx, ecx, edx);
4244 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4245 *eax &= ~0xFC000000;
4246 if ((*eax & 31) && cs->nr_cores > 1) {
4247 *eax |= (cs->nr_cores - 1) << 26;
4248 }
4249 } else {
4250 *eax = 0;
4251 switch (count) {
4252 case 0: /* L1 dcache info */
4253 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4254 1, cs->nr_cores,
4255 eax, ebx, ecx, edx);
4256 break;
4257 case 1: /* L1 icache info */
4258 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4259 1, cs->nr_cores,
4260 eax, ebx, ecx, edx);
4261 break;
4262 case 2: /* L2 cache info */
4263 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4264 cs->nr_threads, cs->nr_cores,
4265 eax, ebx, ecx, edx);
4266 break;
4267 case 3: /* L3 cache info */
4268 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4269 if (cpu->enable_l3_cache) {
4270 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4271 (1 << pkg_offset), cs->nr_cores,
4272 eax, ebx, ecx, edx);
4273 break;
4274 }
4275 /* fall through */
4276 default: /* end of info */
4277 *eax = *ebx = *ecx = *edx = 0;
4278 break;
4279 }
4280 }
4281 break;
4282 case 5:
4283 /* MONITOR/MWAIT Leaf */
4284 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4285 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4286 *ecx = cpu->mwait.ecx; /* flags */
4287 *edx = cpu->mwait.edx; /* mwait substates */
4288 break;
4289 case 6:
4290 /* Thermal and Power Leaf */
4291 *eax = env->features[FEAT_6_EAX];
4292 *ebx = 0;
4293 *ecx = 0;
4294 *edx = 0;
4295 break;
4296 case 7:
4297 /* Structured Extended Feature Flags Enumeration Leaf */
4298 if (count == 0) {
4299 *eax = 0; /* Maximum ECX value for sub-leaves */
4300 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4301 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4302 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4303 *ecx |= CPUID_7_0_ECX_OSPKE;
4304 }
4305 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4306 } else {
4307 *eax = 0;
4308 *ebx = 0;
4309 *ecx = 0;
4310 *edx = 0;
4311 }
4312 break;
4313 case 9:
4314 /* Direct Cache Access Information Leaf */
4315 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4316 *ebx = 0;
4317 *ecx = 0;
4318 *edx = 0;
4319 break;
4320 case 0xA:
4321 /* Architectural Performance Monitoring Leaf */
4322 if (kvm_enabled() && cpu->enable_pmu) {
4323 KVMState *s = cs->kvm_state;
4324
4325 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4326 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4327 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4328 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4329 } else if (hvf_enabled() && cpu->enable_pmu) {
4330 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4331 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4332 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4333 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4334 } else {
4335 *eax = 0;
4336 *ebx = 0;
4337 *ecx = 0;
4338 *edx = 0;
4339 }
4340 break;
4341 case 0xB:
4342 /* Extended Topology Enumeration Leaf */
4343 if (!cpu->enable_cpuid_0xb) {
4344 *eax = *ebx = *ecx = *edx = 0;
4345 break;
4346 }
4347
4348 *ecx = count & 0xff;
4349 *edx = cpu->apic_id;
4350
4351 switch (count) {
4352 case 0:
4353 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4354 *ebx = cs->nr_threads;
4355 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4356 break;
4357 case 1:
4358 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4359 *ebx = cs->nr_cores * cs->nr_threads;
4360 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4361 break;
4362 default:
4363 *eax = 0;
4364 *ebx = 0;
4365 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4366 }
4367
4368 assert(!(*eax & ~0x1f));
4369 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4370 break;
4371 case 0xD: {
4372 /* Processor Extended State */
4373 *eax = 0;
4374 *ebx = 0;
4375 *ecx = 0;
4376 *edx = 0;
4377 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4378 break;
4379 }
4380
4381 if (count == 0) {
4382 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4383 *eax = env->features[FEAT_XSAVE_COMP_LO];
4384 *edx = env->features[FEAT_XSAVE_COMP_HI];
4385 *ebx = xsave_area_size(env->xcr0);
4386 } else if (count == 1) {
4387 *eax = env->features[FEAT_XSAVE];
4388 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4389 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4390 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4391 *eax = esa->size;
4392 *ebx = esa->offset;
4393 }
4394 }
4395 break;
4396 }
4397 case 0x14: {
4398 /* Intel Processor Trace Enumeration */
4399 *eax = 0;
4400 *ebx = 0;
4401 *ecx = 0;
4402 *edx = 0;
4403 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4404 !kvm_enabled()) {
4405 break;
4406 }
4407
4408 if (count == 0) {
4409 *eax = INTEL_PT_MAX_SUBLEAF;
4410 *ebx = INTEL_PT_MINIMAL_EBX;
4411 *ecx = INTEL_PT_MINIMAL_ECX;
4412 } else if (count == 1) {
4413 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4414 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4415 }
4416 break;
4417 }
4418 case 0x40000000:
4419 /*
4420 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4421 * set here, but we restrict to TCG none the less.
4422 */
4423 if (tcg_enabled() && cpu->expose_tcg) {
4424 memcpy(signature, "TCGTCGTCGTCG", 12);
4425 *eax = 0x40000001;
4426 *ebx = signature[0];
4427 *ecx = signature[1];
4428 *edx = signature[2];
4429 } else {
4430 *eax = 0;
4431 *ebx = 0;
4432 *ecx = 0;
4433 *edx = 0;
4434 }
4435 break;
4436 case 0x40000001:
4437 *eax = 0;
4438 *ebx = 0;
4439 *ecx = 0;
4440 *edx = 0;
4441 break;
4442 case 0x80000000:
4443 *eax = env->cpuid_xlevel;
4444 *ebx = env->cpuid_vendor1;
4445 *edx = env->cpuid_vendor2;
4446 *ecx = env->cpuid_vendor3;
4447 break;
4448 case 0x80000001:
4449 *eax = env->cpuid_version;
4450 *ebx = 0;
4451 *ecx = env->features[FEAT_8000_0001_ECX];
4452 *edx = env->features[FEAT_8000_0001_EDX];
4453
4454 /* The Linux kernel checks for the CMPLegacy bit and
4455 * discards multiple thread information if it is set.
4456 * So don't set it here for Intel to make Linux guests happy.
4457 */
4458 if (cs->nr_cores * cs->nr_threads > 1) {
4459 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4460 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4461 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4462 *ecx |= 1 << 1; /* CmpLegacy bit */
4463 }
4464 }
4465 break;
4466 case 0x80000002:
4467 case 0x80000003:
4468 case 0x80000004:
4469 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4470 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4471 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4472 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4473 break;
4474 case 0x80000005:
4475 /* cache info (L1 cache) */
4476 if (cpu->cache_info_passthrough) {
4477 host_cpuid(index, 0, eax, ebx, ecx, edx);
4478 break;
4479 }
4480 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4481 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4482 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4483 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4484 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4485 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4486 break;
4487 case 0x80000006:
4488 /* cache info (L2 cache) */
4489 if (cpu->cache_info_passthrough) {
4490 host_cpuid(index, 0, eax, ebx, ecx, edx);
4491 break;
4492 }
4493 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4494 (L2_DTLB_2M_ENTRIES << 16) | \
4495 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4496 (L2_ITLB_2M_ENTRIES);
4497 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4498 (L2_DTLB_4K_ENTRIES << 16) | \
4499 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4500 (L2_ITLB_4K_ENTRIES);
4501 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4502 cpu->enable_l3_cache ?
4503 env->cache_info_amd.l3_cache : NULL,
4504 ecx, edx);
4505 break;
4506 case 0x80000007:
4507 *eax = 0;
4508 *ebx = 0;
4509 *ecx = 0;
4510 *edx = env->features[FEAT_8000_0007_EDX];
4511 break;
4512 case 0x80000008:
4513 /* virtual & phys address size in low 2 bytes. */
4514 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4515 /* 64 bit processor */
4516 *eax = cpu->phys_bits; /* configurable physical bits */
4517 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4518 *eax |= 0x00003900; /* 57 bits virtual */
4519 } else {
4520 *eax |= 0x00003000; /* 48 bits virtual */
4521 }
4522 } else {
4523 *eax = cpu->phys_bits;
4524 }
4525 *ebx = env->features[FEAT_8000_0008_EBX];
4526 *ecx = 0;
4527 *edx = 0;
4528 if (cs->nr_cores * cs->nr_threads > 1) {
4529 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4530 }
4531 break;
4532 case 0x8000000A:
4533 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4534 *eax = 0x00000001; /* SVM Revision */
4535 *ebx = 0x00000010; /* nr of ASIDs */
4536 *ecx = 0;
4537 *edx = env->features[FEAT_SVM]; /* optional features */
4538 } else {
4539 *eax = 0;
4540 *ebx = 0;
4541 *ecx = 0;
4542 *edx = 0;
4543 }
4544 break;
4545 case 0x8000001D:
4546 *eax = 0;
4547 switch (count) {
4548 case 0: /* L1 dcache info */
4549 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4550 eax, ebx, ecx, edx);
4551 break;
4552 case 1: /* L1 icache info */
4553 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4554 eax, ebx, ecx, edx);
4555 break;
4556 case 2: /* L2 cache info */
4557 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4558 eax, ebx, ecx, edx);
4559 break;
4560 case 3: /* L3 cache info */
4561 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4562 eax, ebx, ecx, edx);
4563 break;
4564 default: /* end of info */
4565 *eax = *ebx = *ecx = *edx = 0;
4566 break;
4567 }
4568 break;
4569 case 0x8000001E:
4570 assert(cpu->core_id <= 255);
4571 encode_topo_cpuid8000001e(cs, cpu,
4572 eax, ebx, ecx, edx);
4573 break;
4574 case 0xC0000000:
4575 *eax = env->cpuid_xlevel2;
4576 *ebx = 0;
4577 *ecx = 0;
4578 *edx = 0;
4579 break;
4580 case 0xC0000001:
4581 /* Support for VIA CPU's CPUID instruction */
4582 *eax = env->cpuid_version;
4583 *ebx = 0;
4584 *ecx = 0;
4585 *edx = env->features[FEAT_C000_0001_EDX];
4586 break;
4587 case 0xC0000002:
4588 case 0xC0000003:
4589 case 0xC0000004:
4590 /* Reserved for the future, and now filled with zero */
4591 *eax = 0;
4592 *ebx = 0;
4593 *ecx = 0;
4594 *edx = 0;
4595 break;
4596 case 0x8000001F:
4597 *eax = sev_enabled() ? 0x2 : 0;
4598 *ebx = sev_get_cbit_position();
4599 *ebx |= sev_get_reduced_phys_bits() << 6;
4600 *ecx = 0;
4601 *edx = 0;
4602 break;
4603 default:
4604 /* reserved values: zero */
4605 *eax = 0;
4606 *ebx = 0;
4607 *ecx = 0;
4608 *edx = 0;
4609 break;
4610 }
4611 }
4612
4613 /* CPUClass::reset() */
4614 static void x86_cpu_reset(CPUState *s)
4615 {
4616 X86CPU *cpu = X86_CPU(s);
4617 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4618 CPUX86State *env = &cpu->env;
4619 target_ulong cr4;
4620 uint64_t xcr0;
4621 int i;
4622
4623 xcc->parent_reset(s);
4624
4625 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4626
4627 env->old_exception = -1;
4628
4629 /* init to reset state */
4630
4631 env->hflags2 |= HF2_GIF_MASK;
4632
4633 cpu_x86_update_cr0(env, 0x60000010);
4634 env->a20_mask = ~0x0;
4635 env->smbase = 0x30000;
4636 env->msr_smi_count = 0;
4637
4638 env->idt.limit = 0xffff;
4639 env->gdt.limit = 0xffff;
4640 env->ldt.limit = 0xffff;
4641 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4642 env->tr.limit = 0xffff;
4643 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4644
4645 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4646 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4647 DESC_R_MASK | DESC_A_MASK);
4648 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4649 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4650 DESC_A_MASK);
4651 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4652 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4653 DESC_A_MASK);
4654 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4655 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4656 DESC_A_MASK);
4657 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4658 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4659 DESC_A_MASK);
4660 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4661 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4662 DESC_A_MASK);
4663
4664 env->eip = 0xfff0;
4665 env->regs[R_EDX] = env->cpuid_version;
4666
4667 env->eflags = 0x2;
4668
4669 /* FPU init */
4670 for (i = 0; i < 8; i++) {
4671 env->fptags[i] = 1;
4672 }
4673 cpu_set_fpuc(env, 0x37f);
4674
4675 env->mxcsr = 0x1f80;
4676 /* All units are in INIT state. */
4677 env->xstate_bv = 0;
4678
4679 env->pat = 0x0007040600070406ULL;
4680 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4681
4682 memset(env->dr, 0, sizeof(env->dr));
4683 env->dr[6] = DR6_FIXED_1;
4684 env->dr[7] = DR7_FIXED_1;
4685 cpu_breakpoint_remove_all(s, BP_CPU);
4686 cpu_watchpoint_remove_all(s, BP_CPU);
4687
4688 cr4 = 0;
4689 xcr0 = XSTATE_FP_MASK;
4690
4691 #ifdef CONFIG_USER_ONLY
4692 /* Enable all the features for user-mode. */
4693 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4694 xcr0 |= XSTATE_SSE_MASK;
4695 }
4696 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4697 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4698 if (env->features[esa->feature] & esa->bits) {
4699 xcr0 |= 1ull << i;
4700 }
4701 }
4702
4703 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4704 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4705 }
4706 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4707 cr4 |= CR4_FSGSBASE_MASK;
4708 }
4709 #endif
4710
4711 env->xcr0 = xcr0;
4712 cpu_x86_update_cr4(env, cr4);
4713
4714 /*
4715 * SDM 11.11.5 requires:
4716 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4717 * - IA32_MTRR_PHYSMASKn.V = 0
4718 * All other bits are undefined. For simplification, zero it all.
4719 */
4720 env->mtrr_deftype = 0;
4721 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4722 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4723
4724 env->interrupt_injected = -1;
4725 env->exception_injected = -1;
4726 env->nmi_injected = false;
4727 #if !defined(CONFIG_USER_ONLY)
4728 /* We hard-wire the BSP to the first CPU. */
4729 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4730
4731 s->halted = !cpu_is_bsp(cpu);
4732
4733 if (kvm_enabled()) {
4734 kvm_arch_reset_vcpu(cpu);
4735 }
4736 else if (hvf_enabled()) {
4737 hvf_reset_vcpu(s);
4738 }
4739 #endif
4740 }
4741
4742 #ifndef CONFIG_USER_ONLY
4743 bool cpu_is_bsp(X86CPU *cpu)
4744 {
4745 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4746 }
4747
4748 /* TODO: remove me, when reset over QOM tree is implemented */
4749 static void x86_cpu_machine_reset_cb(void *opaque)
4750 {
4751 X86CPU *cpu = opaque;
4752 cpu_reset(CPU(cpu));
4753 }
4754 #endif
4755
4756 static void mce_init(X86CPU *cpu)
4757 {
4758 CPUX86State *cenv = &cpu->env;
4759 unsigned int bank;
4760
4761 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4762 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4763 (CPUID_MCE | CPUID_MCA)) {
4764 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4765 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4766 cenv->mcg_ctl = ~(uint64_t)0;
4767 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4768 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4769 }
4770 }
4771 }
4772
4773 #ifndef CONFIG_USER_ONLY
4774 APICCommonClass *apic_get_class(void)
4775 {
4776 const char *apic_type = "apic";
4777
4778 /* TODO: in-kernel irqchip for hvf */
4779 if (kvm_apic_in_kernel()) {
4780 apic_type = "kvm-apic";
4781 } else if (xen_enabled()) {
4782 apic_type = "xen-apic";
4783 }
4784
4785 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4786 }
4787
4788 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4789 {
4790 APICCommonState *apic;
4791 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4792
4793 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4794
4795 object_property_add_child(OBJECT(cpu), "lapic",
4796 OBJECT(cpu->apic_state), &error_abort);
4797 object_unref(OBJECT(cpu->apic_state));
4798
4799 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4800 /* TODO: convert to link<> */
4801 apic = APIC_COMMON(cpu->apic_state);
4802 apic->cpu = cpu;
4803 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4804 }
4805
4806 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4807 {
4808 APICCommonState *apic;
4809 static bool apic_mmio_map_once;
4810
4811 if (cpu->apic_state == NULL) {
4812 return;
4813 }
4814 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4815 errp);
4816
4817 /* Map APIC MMIO area */
4818 apic = APIC_COMMON(cpu->apic_state);
4819 if (!apic_mmio_map_once) {
4820 memory_region_add_subregion_overlap(get_system_memory(),
4821 apic->apicbase &
4822 MSR_IA32_APICBASE_BASE,
4823 &apic->io_memory,
4824 0x1000);
4825 apic_mmio_map_once = true;
4826 }
4827 }
4828
4829 static void x86_cpu_machine_done(Notifier *n, void *unused)
4830 {
4831 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4832 MemoryRegion *smram =
4833 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4834
4835 if (smram) {
4836 cpu->smram = g_new(MemoryRegion, 1);
4837 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4838 smram, 0, 1ull << 32);
4839 memory_region_set_enabled(cpu->smram, true);
4840 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4841 }
4842 }
4843 #else
4844 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4845 {
4846 }
4847 #endif
4848
4849 /* Note: Only safe for use on x86(-64) hosts */
4850 static uint32_t x86_host_phys_bits(void)
4851 {
4852 uint32_t eax;
4853 uint32_t host_phys_bits;
4854
4855 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4856 if (eax >= 0x80000008) {
4857 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4858 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4859 * at 23:16 that can specify a maximum physical address bits for
4860 * the guest that can override this value; but I've not seen
4861 * anything with that set.
4862 */
4863 host_phys_bits = eax & 0xff;
4864 } else {
4865 /* It's an odd 64 bit machine that doesn't have the leaf for
4866 * physical address bits; fall back to 36 that's most older
4867 * Intel.
4868 */
4869 host_phys_bits = 36;
4870 }
4871
4872 return host_phys_bits;
4873 }
4874
4875 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4876 {
4877 if (*min < value) {
4878 *min = value;
4879 }
4880 }
4881
4882 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4883 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4884 {
4885 CPUX86State *env = &cpu->env;
4886 FeatureWordInfo *fi = &feature_word_info[w];
4887 uint32_t eax = fi->cpuid.eax;
4888 uint32_t region = eax & 0xF0000000;
4889
4890 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
4891 if (!env->features[w]) {
4892 return;
4893 }
4894
4895 switch (region) {
4896 case 0x00000000:
4897 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4898 break;
4899 case 0x80000000:
4900 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4901 break;
4902 case 0xC0000000:
4903 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4904 break;
4905 }
4906 }
4907
4908 /* Calculate XSAVE components based on the configured CPU feature flags */
4909 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4910 {
4911 CPUX86State *env = &cpu->env;
4912 int i;
4913 uint64_t mask;
4914
4915 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4916 return;
4917 }
4918
4919 mask = 0;
4920 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4921 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4922 if (env->features[esa->feature] & esa->bits) {
4923 mask |= (1ULL << i);
4924 }
4925 }
4926
4927 env->features[FEAT_XSAVE_COMP_LO] = mask;
4928 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4929 }
4930
4931 /***** Steps involved on loading and filtering CPUID data
4932 *
4933 * When initializing and realizing a CPU object, the steps
4934 * involved in setting up CPUID data are:
4935 *
4936 * 1) Loading CPU model definition (X86CPUDefinition). This is
4937 * implemented by x86_cpu_load_def() and should be completely
4938 * transparent, as it is done automatically by instance_init.
4939 * No code should need to look at X86CPUDefinition structs
4940 * outside instance_init.
4941 *
4942 * 2) CPU expansion. This is done by realize before CPUID
4943 * filtering, and will make sure host/accelerator data is
4944 * loaded for CPU models that depend on host capabilities
4945 * (e.g. "host"). Done by x86_cpu_expand_features().
4946 *
4947 * 3) CPUID filtering. This initializes extra data related to
4948 * CPUID, and checks if the host supports all capabilities
4949 * required by the CPU. Runnability of a CPU model is
4950 * determined at this step. Done by x86_cpu_filter_features().
4951 *
4952 * Some operations don't require all steps to be performed.
4953 * More precisely:
4954 *
4955 * - CPU instance creation (instance_init) will run only CPU
4956 * model loading. CPU expansion can't run at instance_init-time
4957 * because host/accelerator data may be not available yet.
4958 * - CPU realization will perform both CPU model expansion and CPUID
4959 * filtering, and return an error in case one of them fails.
4960 * - query-cpu-definitions needs to run all 3 steps. It needs
4961 * to run CPUID filtering, as the 'unavailable-features'
4962 * field is set based on the filtering results.
4963 * - The query-cpu-model-expansion QMP command only needs to run
4964 * CPU model loading and CPU expansion. It should not filter
4965 * any CPUID data based on host capabilities.
4966 */
4967
4968 /* Expand CPU configuration data, based on configured features
4969 * and host/accelerator capabilities when appropriate.
4970 */
4971 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4972 {
4973 CPUX86State *env = &cpu->env;
4974 FeatureWord w;
4975 GList *l;
4976 Error *local_err = NULL;
4977
4978 /*TODO: Now cpu->max_features doesn't overwrite features
4979 * set using QOM properties, and we can convert
4980 * plus_features & minus_features to global properties
4981 * inside x86_cpu_parse_featurestr() too.
4982 */
4983 if (cpu->max_features) {
4984 for (w = 0; w < FEATURE_WORDS; w++) {
4985 /* Override only features that weren't set explicitly
4986 * by the user.
4987 */
4988 env->features[w] |=
4989 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4990 ~env->user_features[w] & \
4991 ~feature_word_info[w].no_autoenable_flags;
4992 }
4993 }
4994
4995 for (l = plus_features; l; l = l->next) {
4996 const char *prop = l->data;
4997 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4998 if (local_err) {
4999 goto out;
5000 }
5001 }
5002
5003 for (l = minus_features; l; l = l->next) {
5004 const char *prop = l->data;
5005 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5006 if (local_err) {
5007 goto out;
5008 }
5009 }
5010
5011 if (!kvm_enabled() || !cpu->expose_kvm) {
5012 env->features[FEAT_KVM] = 0;
5013 }
5014
5015 x86_cpu_enable_xsave_components(cpu);
5016
5017 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5018 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5019 if (cpu->full_cpuid_auto_level) {
5020 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5021 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5022 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5023 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5024 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5025 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5026 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5027 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5028 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5029 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5030 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5031 /* SVM requires CPUID[0x8000000A] */
5032 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5033 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5034 }
5035
5036 /* SEV requires CPUID[0x8000001F] */
5037 if (sev_enabled()) {
5038 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5039 }
5040 }
5041
5042 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5043 if (env->cpuid_level == UINT32_MAX) {
5044 env->cpuid_level = env->cpuid_min_level;
5045 }
5046 if (env->cpuid_xlevel == UINT32_MAX) {
5047 env->cpuid_xlevel = env->cpuid_min_xlevel;
5048 }
5049 if (env->cpuid_xlevel2 == UINT32_MAX) {
5050 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5051 }
5052
5053 out:
5054 if (local_err != NULL) {
5055 error_propagate(errp, local_err);
5056 }
5057 }
5058
5059 /*
5060 * Finishes initialization of CPUID data, filters CPU feature
5061 * words based on host availability of each feature.
5062 *
5063 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5064 */
5065 static int x86_cpu_filter_features(X86CPU *cpu)
5066 {
5067 CPUX86State *env = &cpu->env;
5068 FeatureWord w;
5069 int rv = 0;
5070
5071 for (w = 0; w < FEATURE_WORDS; w++) {
5072 uint32_t host_feat =
5073 x86_cpu_get_supported_feature_word(w, false);
5074 uint32_t requested_features = env->features[w];
5075 env->features[w] &= host_feat;
5076 cpu->filtered_features[w] = requested_features & ~env->features[w];
5077 if (cpu->filtered_features[w]) {
5078 rv = 1;
5079 }
5080 }
5081
5082 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5083 kvm_enabled()) {
5084 KVMState *s = CPU(cpu)->kvm_state;
5085 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5086 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5087 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5088 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5089 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5090
5091 if (!eax_0 ||
5092 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5093 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5094 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5095 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5096 INTEL_PT_ADDR_RANGES_NUM) ||
5097 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5098 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5099 (ecx_0 & INTEL_PT_IP_LIP)) {
5100 /*
5101 * Processor Trace capabilities aren't configurable, so if the
5102 * host can't emulate the capabilities we report on
5103 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5104 */
5105 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5106 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5107 rv = 1;
5108 }
5109 }
5110
5111 return rv;
5112 }
5113
5114 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
5115 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
5116 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
5117 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
5118 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
5119 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
5120 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5121 {
5122 CPUState *cs = CPU(dev);
5123 X86CPU *cpu = X86_CPU(dev);
5124 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5125 CPUX86State *env = &cpu->env;
5126 Error *local_err = NULL;
5127 static bool ht_warned;
5128
5129 if (xcc->host_cpuid_required) {
5130 if (!accel_uses_host_cpuid()) {
5131 char *name = x86_cpu_class_get_model_name(xcc);
5132 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5133 g_free(name);
5134 goto out;
5135 }
5136
5137 if (enable_cpu_pm) {
5138 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5139 &cpu->mwait.ecx, &cpu->mwait.edx);
5140 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5141 }
5142 }
5143
5144 /* mwait extended info: needed for Core compatibility */
5145 /* We always wake on interrupt even if host does not have the capability */
5146 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5147
5148 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5149 error_setg(errp, "apic-id property was not initialized properly");
5150 return;
5151 }
5152
5153 x86_cpu_expand_features(cpu, &local_err);
5154 if (local_err) {
5155 goto out;
5156 }
5157
5158 if (x86_cpu_filter_features(cpu) &&
5159 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5160 x86_cpu_report_filtered_features(cpu);
5161 if (cpu->enforce_cpuid) {
5162 error_setg(&local_err,
5163 accel_uses_host_cpuid() ?
5164 "Host doesn't support requested features" :
5165 "TCG doesn't support requested features");
5166 goto out;
5167 }
5168 }
5169
5170 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5171 * CPUID[1].EDX.
5172 */
5173 if (IS_AMD_CPU(env)) {
5174 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5175 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5176 & CPUID_EXT2_AMD_ALIASES);
5177 }
5178
5179 /* For 64bit systems think about the number of physical bits to present.
5180 * ideally this should be the same as the host; anything other than matching
5181 * the host can cause incorrect guest behaviour.
5182 * QEMU used to pick the magic value of 40 bits that corresponds to
5183 * consumer AMD devices but nothing else.
5184 */
5185 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5186 if (accel_uses_host_cpuid()) {
5187 uint32_t host_phys_bits = x86_host_phys_bits();
5188 static bool warned;
5189
5190 if (cpu->host_phys_bits) {
5191 /* The user asked for us to use the host physical bits */
5192 cpu->phys_bits = host_phys_bits;
5193 if (cpu->host_phys_bits_limit &&
5194 cpu->phys_bits > cpu->host_phys_bits_limit) {
5195 cpu->phys_bits = cpu->host_phys_bits_limit;
5196 }
5197 }
5198
5199 /* Print a warning if the user set it to a value that's not the
5200 * host value.
5201 */
5202 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5203 !warned) {
5204 warn_report("Host physical bits (%u)"
5205 " does not match phys-bits property (%u)",
5206 host_phys_bits, cpu->phys_bits);
5207 warned = true;
5208 }
5209
5210 if (cpu->phys_bits &&
5211 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5212 cpu->phys_bits < 32)) {
5213 error_setg(errp, "phys-bits should be between 32 and %u "
5214 " (but is %u)",
5215 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5216 return;
5217 }
5218 } else {
5219 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5220 error_setg(errp, "TCG only supports phys-bits=%u",
5221 TCG_PHYS_ADDR_BITS);
5222 return;
5223 }
5224 }
5225 /* 0 means it was not explicitly set by the user (or by machine
5226 * compat_props or by the host code above). In this case, the default
5227 * is the value used by TCG (40).
5228 */
5229 if (cpu->phys_bits == 0) {
5230 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5231 }
5232 } else {
5233 /* For 32 bit systems don't use the user set value, but keep
5234 * phys_bits consistent with what we tell the guest.
5235 */
5236 if (cpu->phys_bits != 0) {
5237 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5238 return;
5239 }
5240
5241 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5242 cpu->phys_bits = 36;
5243 } else {
5244 cpu->phys_bits = 32;
5245 }
5246 }
5247
5248 /* Cache information initialization */
5249 if (!cpu->legacy_cache) {
5250 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5251 char *name = x86_cpu_class_get_model_name(xcc);
5252 error_setg(errp,
5253 "CPU model '%s' doesn't support legacy-cache=off", name);
5254 g_free(name);
5255 return;
5256 }
5257 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5258 *xcc->cpu_def->cache_info;
5259 } else {
5260 /* Build legacy cache information */
5261 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5262 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5263 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5264 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5265
5266 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5267 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5268 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5269 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5270
5271 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5272 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5273 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5274 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5275 }
5276
5277
5278 cpu_exec_realizefn(cs, &local_err);
5279 if (local_err != NULL) {
5280 error_propagate(errp, local_err);
5281 return;
5282 }
5283
5284 #ifndef CONFIG_USER_ONLY
5285 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5286
5287 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
5288 x86_cpu_apic_create(cpu, &local_err);
5289 if (local_err != NULL) {
5290 goto out;
5291 }
5292 }
5293 #endif
5294
5295 mce_init(cpu);
5296
5297 #ifndef CONFIG_USER_ONLY
5298 if (tcg_enabled()) {
5299 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5300 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5301
5302 /* Outer container... */
5303 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5304 memory_region_set_enabled(cpu->cpu_as_root, true);
5305
5306 /* ... with two regions inside: normal system memory with low
5307 * priority, and...
5308 */
5309 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5310 get_system_memory(), 0, ~0ull);
5311 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5312 memory_region_set_enabled(cpu->cpu_as_mem, true);
5313
5314 cs->num_ases = 2;
5315 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5316 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5317
5318 /* ... SMRAM with higher priority, linked from /machine/smram. */
5319 cpu->machine_done.notify = x86_cpu_machine_done;
5320 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5321 }
5322 #endif
5323
5324 qemu_init_vcpu(cs);
5325
5326 /*
5327 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5328 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5329 * based on inputs (sockets,cores,threads), it is still better to give
5330 * users a warning.
5331 *
5332 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5333 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5334 */
5335 if (IS_AMD_CPU(env) &&
5336 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5337 cs->nr_threads > 1 && !ht_warned) {
5338 warn_report("This family of AMD CPU doesn't support "
5339 "hyperthreading(%d)",
5340 cs->nr_threads);
5341 error_printf("Please configure -smp options properly"
5342 " or try enabling topoext feature.\n");
5343 ht_warned = true;
5344 }
5345
5346 x86_cpu_apic_realize(cpu, &local_err);
5347 if (local_err != NULL) {
5348 goto out;
5349 }
5350 cpu_reset(cs);
5351
5352 xcc->parent_realize(dev, &local_err);
5353
5354 out:
5355 if (local_err != NULL) {
5356 error_propagate(errp, local_err);
5357 return;
5358 }
5359 }
5360
5361 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5362 {
5363 X86CPU *cpu = X86_CPU(dev);
5364 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5365 Error *local_err = NULL;
5366
5367 #ifndef CONFIG_USER_ONLY
5368 cpu_remove_sync(CPU(dev));
5369 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5370 #endif
5371
5372 if (cpu->apic_state) {
5373 object_unparent(OBJECT(cpu->apic_state));
5374 cpu->apic_state = NULL;
5375 }
5376
5377 xcc->parent_unrealize(dev, &local_err);
5378 if (local_err != NULL) {
5379 error_propagate(errp, local_err);
5380 return;
5381 }
5382 }
5383
5384 typedef struct BitProperty {
5385 FeatureWord w;
5386 uint32_t mask;
5387 } BitProperty;
5388
5389 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5390 void *opaque, Error **errp)
5391 {
5392 X86CPU *cpu = X86_CPU(obj);
5393 BitProperty *fp = opaque;
5394 uint32_t f = cpu->env.features[fp->w];
5395 bool value = (f & fp->mask) == fp->mask;
5396 visit_type_bool(v, name, &value, errp);
5397 }
5398
5399 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5400 void *opaque, Error **errp)
5401 {
5402 DeviceState *dev = DEVICE(obj);
5403 X86CPU *cpu = X86_CPU(obj);
5404 BitProperty *fp = opaque;
5405 Error *local_err = NULL;
5406 bool value;
5407
5408 if (dev->realized) {
5409 qdev_prop_set_after_realize(dev, name, errp);
5410 return;
5411 }
5412
5413 visit_type_bool(v, name, &value, &local_err);
5414 if (local_err) {
5415 error_propagate(errp, local_err);
5416 return;
5417 }
5418
5419 if (value) {
5420 cpu->env.features[fp->w] |= fp->mask;
5421 } else {
5422 cpu->env.features[fp->w] &= ~fp->mask;
5423 }
5424 cpu->env.user_features[fp->w] |= fp->mask;
5425 }
5426
5427 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5428 void *opaque)
5429 {
5430 BitProperty *prop = opaque;
5431 g_free(prop);
5432 }
5433
5434 /* Register a boolean property to get/set a single bit in a uint32_t field.
5435 *
5436 * The same property name can be registered multiple times to make it affect
5437 * multiple bits in the same FeatureWord. In that case, the getter will return
5438 * true only if all bits are set.
5439 */
5440 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5441 const char *prop_name,
5442 FeatureWord w,
5443 int bitnr)
5444 {
5445 BitProperty *fp;
5446 ObjectProperty *op;
5447 uint32_t mask = (1UL << bitnr);
5448
5449 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5450 if (op) {
5451 fp = op->opaque;
5452 assert(fp->w == w);
5453 fp->mask |= mask;
5454 } else {
5455 fp = g_new0(BitProperty, 1);
5456 fp->w = w;
5457 fp->mask = mask;
5458 object_property_add(OBJECT(cpu), prop_name, "bool",
5459 x86_cpu_get_bit_prop,
5460 x86_cpu_set_bit_prop,
5461 x86_cpu_release_bit_prop, fp, &error_abort);
5462 }
5463 }
5464
5465 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5466 FeatureWord w,
5467 int bitnr)
5468 {
5469 FeatureWordInfo *fi = &feature_word_info[w];
5470 const char *name = fi->feat_names[bitnr];
5471
5472 if (!name) {
5473 return;
5474 }
5475
5476 /* Property names should use "-" instead of "_".
5477 * Old names containing underscores are registered as aliases
5478 * using object_property_add_alias()
5479 */
5480 assert(!strchr(name, '_'));
5481 /* aliases don't use "|" delimiters anymore, they are registered
5482 * manually using object_property_add_alias() */
5483 assert(!strchr(name, '|'));
5484 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5485 }
5486
5487 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5488 {
5489 X86CPU *cpu = X86_CPU(cs);
5490 CPUX86State *env = &cpu->env;
5491 GuestPanicInformation *panic_info = NULL;
5492
5493 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5494 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5495
5496 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5497
5498 assert(HV_CRASH_PARAMS >= 5);
5499 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5500 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5501 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5502 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5503 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5504 }
5505
5506 return panic_info;
5507 }
5508 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5509 const char *name, void *opaque,
5510 Error **errp)
5511 {
5512 CPUState *cs = CPU(obj);
5513 GuestPanicInformation *panic_info;
5514
5515 if (!cs->crash_occurred) {
5516 error_setg(errp, "No crash occured");
5517 return;
5518 }
5519
5520 panic_info = x86_cpu_get_crash_info(cs);
5521 if (panic_info == NULL) {
5522 error_setg(errp, "No crash information");
5523 return;
5524 }
5525
5526 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5527 errp);
5528 qapi_free_GuestPanicInformation(panic_info);
5529 }
5530
5531 static void x86_cpu_initfn(Object *obj)
5532 {
5533 CPUState *cs = CPU(obj);
5534 X86CPU *cpu = X86_CPU(obj);
5535 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5536 CPUX86State *env = &cpu->env;
5537 FeatureWord w;
5538
5539 cs->env_ptr = env;
5540
5541 object_property_add(obj, "family", "int",
5542 x86_cpuid_version_get_family,
5543 x86_cpuid_version_set_family, NULL, NULL, NULL);
5544 object_property_add(obj, "model", "int",
5545 x86_cpuid_version_get_model,
5546 x86_cpuid_version_set_model, NULL, NULL, NULL);
5547 object_property_add(obj, "stepping", "int",
5548 x86_cpuid_version_get_stepping,
5549 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5550 object_property_add_str(obj, "vendor",
5551 x86_cpuid_get_vendor,
5552 x86_cpuid_set_vendor, NULL);
5553 object_property_add_str(obj, "model-id",
5554 x86_cpuid_get_model_id,
5555 x86_cpuid_set_model_id, NULL);
5556 object_property_add(obj, "tsc-frequency", "int",
5557 x86_cpuid_get_tsc_freq,
5558 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5559 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5560 x86_cpu_get_feature_words,
5561 NULL, NULL, (void *)env->features, NULL);
5562 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5563 x86_cpu_get_feature_words,
5564 NULL, NULL, (void *)cpu->filtered_features, NULL);
5565
5566 object_property_add(obj, "crash-information", "GuestPanicInformation",
5567 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5568
5569 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5570
5571 for (w = 0; w < FEATURE_WORDS; w++) {
5572 int bitnr;
5573
5574 for (bitnr = 0; bitnr < 32; bitnr++) {
5575 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5576 }
5577 }
5578
5579 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5580 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5581 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5582 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5583 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5584 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5585 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5586
5587 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5588 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5589 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5590 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5591 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5592 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5593 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5594 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5595 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5596 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5597 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5598 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5599 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5600 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5601 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5602 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5603 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5604 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5605 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5606 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5607 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5608
5609 if (xcc->cpu_def) {
5610 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5611 }
5612 }
5613
5614 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5615 {
5616 X86CPU *cpu = X86_CPU(cs);
5617
5618 return cpu->apic_id;
5619 }
5620
5621 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5622 {
5623 X86CPU *cpu = X86_CPU(cs);
5624
5625 return cpu->env.cr[0] & CR0_PG_MASK;
5626 }
5627
5628 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5629 {
5630 X86CPU *cpu = X86_CPU(cs);
5631
5632 cpu->env.eip = value;
5633 }
5634
5635 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5636 {
5637 X86CPU *cpu = X86_CPU(cs);
5638
5639 cpu->env.eip = tb->pc - tb->cs_base;
5640 }
5641
5642 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5643 {
5644 X86CPU *cpu = X86_CPU(cs);
5645 CPUX86State *env = &cpu->env;
5646
5647 #if !defined(CONFIG_USER_ONLY)
5648 if (interrupt_request & CPU_INTERRUPT_POLL) {
5649 return CPU_INTERRUPT_POLL;
5650 }
5651 #endif
5652 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5653 return CPU_INTERRUPT_SIPI;
5654 }
5655
5656 if (env->hflags2 & HF2_GIF_MASK) {
5657 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5658 !(env->hflags & HF_SMM_MASK)) {
5659 return CPU_INTERRUPT_SMI;
5660 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5661 !(env->hflags2 & HF2_NMI_MASK)) {
5662 return CPU_INTERRUPT_NMI;
5663 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5664 return CPU_INTERRUPT_MCE;
5665 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5666 (((env->hflags2 & HF2_VINTR_MASK) &&
5667 (env->hflags2 & HF2_HIF_MASK)) ||
5668 (!(env->hflags2 & HF2_VINTR_MASK) &&
5669 (env->eflags & IF_MASK &&
5670 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5671 return CPU_INTERRUPT_HARD;
5672 #if !defined(CONFIG_USER_ONLY)
5673 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5674 (env->eflags & IF_MASK) &&
5675 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5676 return CPU_INTERRUPT_VIRQ;
5677 #endif
5678 }
5679 }
5680
5681 return 0;
5682 }
5683
5684 static bool x86_cpu_has_work(CPUState *cs)
5685 {
5686 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5687 }
5688
5689 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5690 {
5691 X86CPU *cpu = X86_CPU(cs);
5692 CPUX86State *env = &cpu->env;
5693
5694 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5695 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5696 : bfd_mach_i386_i8086);
5697 info->print_insn = print_insn_i386;
5698
5699 info->cap_arch = CS_ARCH_X86;
5700 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5701 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5702 : CS_MODE_16);
5703 info->cap_insn_unit = 1;
5704 info->cap_insn_split = 8;
5705 }
5706
5707 void x86_update_hflags(CPUX86State *env)
5708 {
5709 uint32_t hflags;
5710 #define HFLAG_COPY_MASK \
5711 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5712 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5713 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5714 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5715
5716 hflags = env->hflags & HFLAG_COPY_MASK;
5717 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5718 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5719 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5720 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5721 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5722
5723 if (env->cr[4] & CR4_OSFXSR_MASK) {
5724 hflags |= HF_OSFXSR_MASK;
5725 }
5726
5727 if (env->efer & MSR_EFER_LMA) {
5728 hflags |= HF_LMA_MASK;
5729 }
5730
5731 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5732 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5733 } else {
5734 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5735 (DESC_B_SHIFT - HF_CS32_SHIFT);
5736 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5737 (DESC_B_SHIFT - HF_SS32_SHIFT);
5738 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5739 !(hflags & HF_CS32_MASK)) {
5740 hflags |= HF_ADDSEG_MASK;
5741 } else {
5742 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5743 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5744 }
5745 }
5746 env->hflags = hflags;
5747 }
5748
5749 static Property x86_cpu_properties[] = {
5750 #ifdef CONFIG_USER_ONLY
5751 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5752 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5753 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5754 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5755 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5756 #else
5757 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5758 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5759 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5760 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5761 #endif
5762 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5763 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5764 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5765 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5766 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5767 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5768 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5769 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5770 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5771 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5772 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5773 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5774 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5775 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5776 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
5777 DEFINE_PROP_BOOL("hv-evmcs", X86CPU, hyperv_evmcs, false),
5778 DEFINE_PROP_BOOL("hv-ipi", X86CPU, hyperv_ipi, false),
5779 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5780 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5781 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5782 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5783 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5784 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
5785 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5786 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5787 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5788 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5789 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5790 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5791 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5792 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5793 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5794 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5795 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5796 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5797 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5798 false),
5799 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5800 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5801 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5802 true),
5803 /*
5804 * lecacy_cache defaults to true unless the CPU model provides its
5805 * own cache information (see x86_cpu_load_def()).
5806 */
5807 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5808
5809 /*
5810 * From "Requirements for Implementing the Microsoft
5811 * Hypervisor Interface":
5812 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5813 *
5814 * "Starting with Windows Server 2012 and Windows 8, if
5815 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5816 * the hypervisor imposes no specific limit to the number of VPs.
5817 * In this case, Windows Server 2012 guest VMs may use more than
5818 * 64 VPs, up to the maximum supported number of processors applicable
5819 * to the specific Windows version being used."
5820 */
5821 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5822 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
5823 false),
5824 DEFINE_PROP_END_OF_LIST()
5825 };
5826
5827 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5828 {
5829 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5830 CPUClass *cc = CPU_CLASS(oc);
5831 DeviceClass *dc = DEVICE_CLASS(oc);
5832
5833 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5834 &xcc->parent_realize);
5835 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5836 &xcc->parent_unrealize);
5837 dc->props = x86_cpu_properties;
5838
5839 xcc->parent_reset = cc->reset;
5840 cc->reset = x86_cpu_reset;
5841 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5842
5843 cc->class_by_name = x86_cpu_class_by_name;
5844 cc->parse_features = x86_cpu_parse_featurestr;
5845 cc->has_work = x86_cpu_has_work;
5846 #ifdef CONFIG_TCG
5847 cc->do_interrupt = x86_cpu_do_interrupt;
5848 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5849 #endif
5850 cc->dump_state = x86_cpu_dump_state;
5851 cc->get_crash_info = x86_cpu_get_crash_info;
5852 cc->set_pc = x86_cpu_set_pc;
5853 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5854 cc->gdb_read_register = x86_cpu_gdb_read_register;
5855 cc->gdb_write_register = x86_cpu_gdb_write_register;
5856 cc->get_arch_id = x86_cpu_get_arch_id;
5857 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5858 #ifdef CONFIG_USER_ONLY
5859 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5860 #else
5861 cc->asidx_from_attrs = x86_asidx_from_attrs;
5862 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5863 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5864 cc->write_elf64_note = x86_cpu_write_elf64_note;
5865 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5866 cc->write_elf32_note = x86_cpu_write_elf32_note;
5867 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5868 cc->vmsd = &vmstate_x86_cpu;
5869 #endif
5870 cc->gdb_arch_name = x86_gdb_arch_name;
5871 #ifdef TARGET_X86_64
5872 cc->gdb_core_xml_file = "i386-64bit.xml";
5873 cc->gdb_num_core_regs = 66;
5874 #else
5875 cc->gdb_core_xml_file = "i386-32bit.xml";
5876 cc->gdb_num_core_regs = 50;
5877 #endif
5878 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5879 cc->debug_excp_handler = breakpoint_handler;
5880 #endif
5881 cc->cpu_exec_enter = x86_cpu_exec_enter;
5882 cc->cpu_exec_exit = x86_cpu_exec_exit;
5883 #ifdef CONFIG_TCG
5884 cc->tcg_initialize = tcg_x86_init;
5885 #endif
5886 cc->disas_set_info = x86_disas_set_info;
5887
5888 dc->user_creatable = true;
5889 }
5890
5891 static const TypeInfo x86_cpu_type_info = {
5892 .name = TYPE_X86_CPU,
5893 .parent = TYPE_CPU,
5894 .instance_size = sizeof(X86CPU),
5895 .instance_init = x86_cpu_initfn,
5896 .abstract = true,
5897 .class_size = sizeof(X86CPUClass),
5898 .class_init = x86_cpu_common_class_init,
5899 };
5900
5901
5902 /* "base" CPU model, used by query-cpu-model-expansion */
5903 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5904 {
5905 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5906
5907 xcc->static_model = true;
5908 xcc->migration_safe = true;
5909 xcc->model_description = "base CPU model type with no features enabled";
5910 xcc->ordering = 8;
5911 }
5912
5913 static const TypeInfo x86_base_cpu_type_info = {
5914 .name = X86_CPU_TYPE_NAME("base"),
5915 .parent = TYPE_X86_CPU,
5916 .class_init = x86_cpu_base_class_init,
5917 };
5918
5919 static void x86_cpu_register_types(void)
5920 {
5921 int i;
5922
5923 type_register_static(&x86_cpu_type_info);
5924 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5925 x86_register_cpudef_type(&builtin_x86_defs[i]);
5926 }
5927 type_register_static(&max_x86_cpu_type_info);
5928 type_register_static(&x86_base_cpu_type_info);
5929 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5930 type_register_static(&host_x86_cpu_type_info);
5931 #endif
5932 }
5933
5934 type_init(x86_cpu_register_types)