]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386/kvm: expose HV_CPUID_ENLIGHTMENT_INFO.EAX and HV_CPUID_NESTED_FEATURES.EAX as...
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24
25 #include "cpu.h"
26 #include "exec/exec-all.h"
27 #include "sysemu/kvm.h"
28 #include "sysemu/hvf.h"
29 #include "sysemu/cpus.h"
30 #include "kvm_i386.h"
31 #include "sev_i386.h"
32
33 #include "qemu/error-report.h"
34 #include "qemu/option.h"
35 #include "qemu/config-file.h"
36 #include "qapi/error.h"
37 #include "qapi/qapi-visit-misc.h"
38 #include "qapi/qapi-visit-run-state.h"
39 #include "qapi/qmp/qdict.h"
40 #include "qapi/qmp/qerror.h"
41 #include "qapi/visitor.h"
42 #include "qom/qom-qobject.h"
43 #include "sysemu/arch_init.h"
44
45 #include "standard-headers/asm-x86/kvm_para.h"
46
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
56
57 #include "disas/capstone.h"
58
59 /* Helpers for building CPUID[2] descriptors: */
60
61 struct CPUID2CacheDescriptorInfo {
62 enum CacheType type;
63 int level;
64 int size;
65 int line_size;
66 int associativity;
67 };
68
69 /*
70 * Known CPUID 2 cache descriptors.
71 * From Intel SDM Volume 2A, CPUID instruction
72 */
73 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
74 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
75 .associativity = 4, .line_size = 32, },
76 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
77 .associativity = 4, .line_size = 32, },
78 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
79 .associativity = 4, .line_size = 64, },
80 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
81 .associativity = 2, .line_size = 32, },
82 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
83 .associativity = 4, .line_size = 32, },
84 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
85 .associativity = 4, .line_size = 64, },
86 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
87 .associativity = 6, .line_size = 64, },
88 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
89 .associativity = 2, .line_size = 64, },
90 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
91 .associativity = 8, .line_size = 64, },
92 /* lines per sector is not supported cpuid2_cache_descriptor(),
93 * so descriptors 0x22, 0x23 are not included
94 */
95 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
96 .associativity = 16, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x25, 0x20 are not included
99 */
100 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
101 .associativity = 8, .line_size = 64, },
102 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
103 .associativity = 8, .line_size = 64, },
104 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
105 .associativity = 4, .line_size = 32, },
106 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
107 .associativity = 4, .line_size = 32, },
108 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
109 .associativity = 4, .line_size = 32, },
110 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
111 .associativity = 4, .line_size = 32, },
112 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
113 .associativity = 4, .line_size = 32, },
114 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
115 .associativity = 4, .line_size = 64, },
116 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
117 .associativity = 8, .line_size = 64, },
118 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
119 .associativity = 12, .line_size = 64, },
120 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
121 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
122 .associativity = 12, .line_size = 64, },
123 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
124 .associativity = 16, .line_size = 64, },
125 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
126 .associativity = 12, .line_size = 64, },
127 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
128 .associativity = 16, .line_size = 64, },
129 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
130 .associativity = 24, .line_size = 64, },
131 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
132 .associativity = 8, .line_size = 64, },
133 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
134 .associativity = 4, .line_size = 64, },
135 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
136 .associativity = 4, .line_size = 64, },
137 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
138 .associativity = 4, .line_size = 64, },
139 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
140 .associativity = 4, .line_size = 64, },
141 /* lines per sector is not supported cpuid2_cache_descriptor(),
142 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
143 */
144 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
145 .associativity = 8, .line_size = 64, },
146 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
147 .associativity = 2, .line_size = 64, },
148 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
149 .associativity = 8, .line_size = 64, },
150 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
151 .associativity = 8, .line_size = 32, },
152 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
153 .associativity = 8, .line_size = 32, },
154 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
155 .associativity = 8, .line_size = 32, },
156 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
157 .associativity = 8, .line_size = 32, },
158 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
159 .associativity = 4, .line_size = 64, },
160 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
161 .associativity = 8, .line_size = 64, },
162 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
163 .associativity = 4, .line_size = 64, },
164 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
165 .associativity = 4, .line_size = 64, },
166 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
167 .associativity = 4, .line_size = 64, },
168 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
169 .associativity = 8, .line_size = 64, },
170 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
171 .associativity = 8, .line_size = 64, },
172 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
173 .associativity = 8, .line_size = 64, },
174 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
175 .associativity = 12, .line_size = 64, },
176 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
177 .associativity = 12, .line_size = 64, },
178 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
179 .associativity = 12, .line_size = 64, },
180 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
181 .associativity = 16, .line_size = 64, },
182 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
183 .associativity = 16, .line_size = 64, },
184 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
185 .associativity = 16, .line_size = 64, },
186 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
187 .associativity = 24, .line_size = 64, },
188 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
189 .associativity = 24, .line_size = 64, },
190 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
191 .associativity = 24, .line_size = 64, },
192 };
193
194 /*
195 * "CPUID leaf 2 does not report cache descriptor information,
196 * use CPUID leaf 4 to query cache parameters"
197 */
198 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
199
200 /*
201 * Return a CPUID 2 cache descriptor for a given cache.
202 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
203 */
204 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
205 {
206 int i;
207
208 assert(cache->size > 0);
209 assert(cache->level > 0);
210 assert(cache->line_size > 0);
211 assert(cache->associativity > 0);
212 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
213 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
214 if (d->level == cache->level && d->type == cache->type &&
215 d->size == cache->size && d->line_size == cache->line_size &&
216 d->associativity == cache->associativity) {
217 return i;
218 }
219 }
220
221 return CACHE_DESCRIPTOR_UNAVAILABLE;
222 }
223
224 /* CPUID Leaf 4 constants: */
225
226 /* EAX: */
227 #define CACHE_TYPE_D 1
228 #define CACHE_TYPE_I 2
229 #define CACHE_TYPE_UNIFIED 3
230
231 #define CACHE_LEVEL(l) (l << 5)
232
233 #define CACHE_SELF_INIT_LEVEL (1 << 8)
234
235 /* EDX: */
236 #define CACHE_NO_INVD_SHARING (1 << 0)
237 #define CACHE_INCLUSIVE (1 << 1)
238 #define CACHE_COMPLEX_IDX (1 << 2)
239
240 /* Encode CacheType for CPUID[4].EAX */
241 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
242 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
243 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
244 0 /* Invalid value */)
245
246
247 /* Encode cache info for CPUID[4] */
248 static void encode_cache_cpuid4(CPUCacheInfo *cache,
249 int num_apic_ids, int num_cores,
250 uint32_t *eax, uint32_t *ebx,
251 uint32_t *ecx, uint32_t *edx)
252 {
253 assert(cache->size == cache->line_size * cache->associativity *
254 cache->partitions * cache->sets);
255
256 assert(num_apic_ids > 0);
257 *eax = CACHE_TYPE(cache->type) |
258 CACHE_LEVEL(cache->level) |
259 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
260 ((num_cores - 1) << 26) |
261 ((num_apic_ids - 1) << 14);
262
263 assert(cache->line_size > 0);
264 assert(cache->partitions > 0);
265 assert(cache->associativity > 0);
266 /* We don't implement fully-associative caches */
267 assert(cache->associativity < cache->sets);
268 *ebx = (cache->line_size - 1) |
269 ((cache->partitions - 1) << 12) |
270 ((cache->associativity - 1) << 22);
271
272 assert(cache->sets > 0);
273 *ecx = cache->sets - 1;
274
275 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
276 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
277 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
278 }
279
280 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
281 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
282 {
283 assert(cache->size % 1024 == 0);
284 assert(cache->lines_per_tag > 0);
285 assert(cache->associativity > 0);
286 assert(cache->line_size > 0);
287 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
288 (cache->lines_per_tag << 8) | (cache->line_size);
289 }
290
291 #define ASSOC_FULL 0xFF
292
293 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
294 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
295 a == 2 ? 0x2 : \
296 a == 4 ? 0x4 : \
297 a == 8 ? 0x6 : \
298 a == 16 ? 0x8 : \
299 a == 32 ? 0xA : \
300 a == 48 ? 0xB : \
301 a == 64 ? 0xC : \
302 a == 96 ? 0xD : \
303 a == 128 ? 0xE : \
304 a == ASSOC_FULL ? 0xF : \
305 0 /* invalid value */)
306
307 /*
308 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
309 * @l3 can be NULL.
310 */
311 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
312 CPUCacheInfo *l3,
313 uint32_t *ecx, uint32_t *edx)
314 {
315 assert(l2->size % 1024 == 0);
316 assert(l2->associativity > 0);
317 assert(l2->lines_per_tag > 0);
318 assert(l2->line_size > 0);
319 *ecx = ((l2->size / 1024) << 16) |
320 (AMD_ENC_ASSOC(l2->associativity) << 12) |
321 (l2->lines_per_tag << 8) | (l2->line_size);
322
323 if (l3) {
324 assert(l3->size % (512 * 1024) == 0);
325 assert(l3->associativity > 0);
326 assert(l3->lines_per_tag > 0);
327 assert(l3->line_size > 0);
328 *edx = ((l3->size / (512 * 1024)) << 18) |
329 (AMD_ENC_ASSOC(l3->associativity) << 12) |
330 (l3->lines_per_tag << 8) | (l3->line_size);
331 } else {
332 *edx = 0;
333 }
334 }
335
336 /*
337 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
338 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
339 * Define the constants to build the cpu topology. Right now, TOPOEXT
340 * feature is enabled only on EPYC. So, these constants are based on
341 * EPYC supported configurations. We may need to handle the cases if
342 * these values change in future.
343 */
344 /* Maximum core complexes in a node */
345 #define MAX_CCX 2
346 /* Maximum cores in a core complex */
347 #define MAX_CORES_IN_CCX 4
348 /* Maximum cores in a node */
349 #define MAX_CORES_IN_NODE 8
350 /* Maximum nodes in a socket */
351 #define MAX_NODES_PER_SOCKET 4
352
353 /*
354 * Figure out the number of nodes required to build this config.
355 * Max cores in a node is 8
356 */
357 static int nodes_in_socket(int nr_cores)
358 {
359 int nodes;
360
361 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
362
363 /* Hardware does not support config with 3 nodes, return 4 in that case */
364 return (nodes == 3) ? 4 : nodes;
365 }
366
367 /*
368 * Decide the number of cores in a core complex with the given nr_cores using
369 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
370 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
371 * L3 cache is shared across all cores in a core complex. So, this will also
372 * tell us how many cores are sharing the L3 cache.
373 */
374 static int cores_in_core_complex(int nr_cores)
375 {
376 int nodes;
377
378 /* Check if we can fit all the cores in one core complex */
379 if (nr_cores <= MAX_CORES_IN_CCX) {
380 return nr_cores;
381 }
382 /* Get the number of nodes required to build this config */
383 nodes = nodes_in_socket(nr_cores);
384
385 /*
386 * Divide the cores accros all the core complexes
387 * Return rounded up value
388 */
389 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
390 }
391
392 /* Encode cache info for CPUID[8000001D] */
393 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
394 uint32_t *eax, uint32_t *ebx,
395 uint32_t *ecx, uint32_t *edx)
396 {
397 uint32_t l3_cores;
398 assert(cache->size == cache->line_size * cache->associativity *
399 cache->partitions * cache->sets);
400
401 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
402 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
403
404 /* L3 is shared among multiple cores */
405 if (cache->level == 3) {
406 l3_cores = cores_in_core_complex(cs->nr_cores);
407 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
408 } else {
409 *eax |= ((cs->nr_threads - 1) << 14);
410 }
411
412 assert(cache->line_size > 0);
413 assert(cache->partitions > 0);
414 assert(cache->associativity > 0);
415 /* We don't implement fully-associative caches */
416 assert(cache->associativity < cache->sets);
417 *ebx = (cache->line_size - 1) |
418 ((cache->partitions - 1) << 12) |
419 ((cache->associativity - 1) << 22);
420
421 assert(cache->sets > 0);
422 *ecx = cache->sets - 1;
423
424 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
425 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
426 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
427 }
428
429 /* Data structure to hold the configuration info for a given core index */
430 struct core_topology {
431 /* core complex id of the current core index */
432 int ccx_id;
433 /*
434 * Adjusted core index for this core in the topology
435 * This can be 0,1,2,3 with max 4 cores in a core complex
436 */
437 int core_id;
438 /* Node id for this core index */
439 int node_id;
440 /* Number of nodes in this config */
441 int num_nodes;
442 };
443
444 /*
445 * Build the configuration closely match the EPYC hardware. Using the EPYC
446 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
447 * right now. This could change in future.
448 * nr_cores : Total number of cores in the config
449 * core_id : Core index of the current CPU
450 * topo : Data structure to hold all the config info for this core index
451 */
452 static void build_core_topology(int nr_cores, int core_id,
453 struct core_topology *topo)
454 {
455 int nodes, cores_in_ccx;
456
457 /* First get the number of nodes required */
458 nodes = nodes_in_socket(nr_cores);
459
460 cores_in_ccx = cores_in_core_complex(nr_cores);
461
462 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
463 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
464 topo->core_id = core_id % cores_in_ccx;
465 topo->num_nodes = nodes;
466 }
467
468 /* Encode cache info for CPUID[8000001E] */
469 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
470 uint32_t *eax, uint32_t *ebx,
471 uint32_t *ecx, uint32_t *edx)
472 {
473 struct core_topology topo = {0};
474 unsigned long nodes;
475 int shift;
476
477 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
478 *eax = cpu->apic_id;
479 /*
480 * CPUID_Fn8000001E_EBX
481 * 31:16 Reserved
482 * 15:8 Threads per core (The number of threads per core is
483 * Threads per core + 1)
484 * 7:0 Core id (see bit decoding below)
485 * SMT:
486 * 4:3 node id
487 * 2 Core complex id
488 * 1:0 Core id
489 * Non SMT:
490 * 5:4 node id
491 * 3 Core complex id
492 * 1:0 Core id
493 */
494 if (cs->nr_threads - 1) {
495 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
496 (topo.ccx_id << 2) | topo.core_id;
497 } else {
498 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
499 }
500 /*
501 * CPUID_Fn8000001E_ECX
502 * 31:11 Reserved
503 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
504 * 7:0 Node id (see bit decoding below)
505 * 2 Socket id
506 * 1:0 Node id
507 */
508 if (topo.num_nodes <= 4) {
509 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
510 topo.node_id;
511 } else {
512 /*
513 * Node id fix up. Actual hardware supports up to 4 nodes. But with
514 * more than 32 cores, we may end up with more than 4 nodes.
515 * Node id is a combination of socket id and node id. Only requirement
516 * here is that this number should be unique accross the system.
517 * Shift the socket id to accommodate more nodes. We dont expect both
518 * socket id and node id to be big number at the same time. This is not
519 * an ideal config but we need to to support it. Max nodes we can have
520 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
521 * 5 bits for nodes. Find the left most set bit to represent the total
522 * number of nodes. find_last_bit returns last set bit(0 based). Left
523 * shift(+1) the socket id to represent all the nodes.
524 */
525 nodes = topo.num_nodes - 1;
526 shift = find_last_bit(&nodes, 8);
527 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
528 topo.node_id;
529 }
530 *edx = 0;
531 }
532
533 /*
534 * Definitions of the hardcoded cache entries we expose:
535 * These are legacy cache values. If there is a need to change any
536 * of these values please use builtin_x86_defs
537 */
538
539 /* L1 data cache: */
540 static CPUCacheInfo legacy_l1d_cache = {
541 .type = DATA_CACHE,
542 .level = 1,
543 .size = 32 * KiB,
544 .self_init = 1,
545 .line_size = 64,
546 .associativity = 8,
547 .sets = 64,
548 .partitions = 1,
549 .no_invd_sharing = true,
550 };
551
552 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
553 static CPUCacheInfo legacy_l1d_cache_amd = {
554 .type = DATA_CACHE,
555 .level = 1,
556 .size = 64 * KiB,
557 .self_init = 1,
558 .line_size = 64,
559 .associativity = 2,
560 .sets = 512,
561 .partitions = 1,
562 .lines_per_tag = 1,
563 .no_invd_sharing = true,
564 };
565
566 /* L1 instruction cache: */
567 static CPUCacheInfo legacy_l1i_cache = {
568 .type = INSTRUCTION_CACHE,
569 .level = 1,
570 .size = 32 * KiB,
571 .self_init = 1,
572 .line_size = 64,
573 .associativity = 8,
574 .sets = 64,
575 .partitions = 1,
576 .no_invd_sharing = true,
577 };
578
579 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
580 static CPUCacheInfo legacy_l1i_cache_amd = {
581 .type = INSTRUCTION_CACHE,
582 .level = 1,
583 .size = 64 * KiB,
584 .self_init = 1,
585 .line_size = 64,
586 .associativity = 2,
587 .sets = 512,
588 .partitions = 1,
589 .lines_per_tag = 1,
590 .no_invd_sharing = true,
591 };
592
593 /* Level 2 unified cache: */
594 static CPUCacheInfo legacy_l2_cache = {
595 .type = UNIFIED_CACHE,
596 .level = 2,
597 .size = 4 * MiB,
598 .self_init = 1,
599 .line_size = 64,
600 .associativity = 16,
601 .sets = 4096,
602 .partitions = 1,
603 .no_invd_sharing = true,
604 };
605
606 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
607 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
608 .type = UNIFIED_CACHE,
609 .level = 2,
610 .size = 2 * MiB,
611 .line_size = 64,
612 .associativity = 8,
613 };
614
615
616 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
617 static CPUCacheInfo legacy_l2_cache_amd = {
618 .type = UNIFIED_CACHE,
619 .level = 2,
620 .size = 512 * KiB,
621 .line_size = 64,
622 .lines_per_tag = 1,
623 .associativity = 16,
624 .sets = 512,
625 .partitions = 1,
626 };
627
628 /* Level 3 unified cache: */
629 static CPUCacheInfo legacy_l3_cache = {
630 .type = UNIFIED_CACHE,
631 .level = 3,
632 .size = 16 * MiB,
633 .line_size = 64,
634 .associativity = 16,
635 .sets = 16384,
636 .partitions = 1,
637 .lines_per_tag = 1,
638 .self_init = true,
639 .inclusive = true,
640 .complex_indexing = true,
641 };
642
643 /* TLB definitions: */
644
645 #define L1_DTLB_2M_ASSOC 1
646 #define L1_DTLB_2M_ENTRIES 255
647 #define L1_DTLB_4K_ASSOC 1
648 #define L1_DTLB_4K_ENTRIES 255
649
650 #define L1_ITLB_2M_ASSOC 1
651 #define L1_ITLB_2M_ENTRIES 255
652 #define L1_ITLB_4K_ASSOC 1
653 #define L1_ITLB_4K_ENTRIES 255
654
655 #define L2_DTLB_2M_ASSOC 0 /* disabled */
656 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
657 #define L2_DTLB_4K_ASSOC 4
658 #define L2_DTLB_4K_ENTRIES 512
659
660 #define L2_ITLB_2M_ASSOC 0 /* disabled */
661 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
662 #define L2_ITLB_4K_ASSOC 4
663 #define L2_ITLB_4K_ENTRIES 512
664
665 /* CPUID Leaf 0x14 constants: */
666 #define INTEL_PT_MAX_SUBLEAF 0x1
667 /*
668 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
669 * MSR can be accessed;
670 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
671 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
672 * of Intel PT MSRs across warm reset;
673 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
674 */
675 #define INTEL_PT_MINIMAL_EBX 0xf
676 /*
677 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
678 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
679 * accessed;
680 * bit[01]: ToPA tables can hold any number of output entries, up to the
681 * maximum allowed by the MaskOrTableOffset field of
682 * IA32_RTIT_OUTPUT_MASK_PTRS;
683 * bit[02]: Support Single-Range Output scheme;
684 */
685 #define INTEL_PT_MINIMAL_ECX 0x7
686 /* generated packets which contain IP payloads have LIP values */
687 #define INTEL_PT_IP_LIP (1 << 31)
688 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
689 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
690 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
691 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
692 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
693
694 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
695 uint32_t vendor2, uint32_t vendor3)
696 {
697 int i;
698 for (i = 0; i < 4; i++) {
699 dst[i] = vendor1 >> (8 * i);
700 dst[i + 4] = vendor2 >> (8 * i);
701 dst[i + 8] = vendor3 >> (8 * i);
702 }
703 dst[CPUID_VENDOR_SZ] = '\0';
704 }
705
706 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
707 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
708 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
709 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
710 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
711 CPUID_PSE36 | CPUID_FXSR)
712 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
713 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
714 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
715 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
716 CPUID_PAE | CPUID_SEP | CPUID_APIC)
717
718 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
719 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
720 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
721 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
722 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
723 /* partly implemented:
724 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
725 /* missing:
726 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
727 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
728 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
729 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
730 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
731 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
732 /* missing:
733 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
734 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
735 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
736 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
737 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
738
739 #ifdef TARGET_X86_64
740 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
741 #else
742 #define TCG_EXT2_X86_64_FEATURES 0
743 #endif
744
745 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
746 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
747 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
748 TCG_EXT2_X86_64_FEATURES)
749 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
750 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
751 #define TCG_EXT4_FEATURES 0
752 #define TCG_SVM_FEATURES CPUID_SVM_NPT
753 #define TCG_KVM_FEATURES 0
754 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
755 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
756 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
757 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
758 CPUID_7_0_EBX_ERMS)
759 /* missing:
760 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
761 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
762 CPUID_7_0_EBX_RDSEED */
763 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
764 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
765 CPUID_7_0_ECX_LA57)
766 #define TCG_7_0_EDX_FEATURES 0
767 #define TCG_APM_FEATURES 0
768 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
769 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
770 /* missing:
771 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
772
773 typedef enum FeatureWordType {
774 CPUID_FEATURE_WORD,
775 MSR_FEATURE_WORD,
776 } FeatureWordType;
777
778 typedef struct FeatureWordInfo {
779 FeatureWordType type;
780 /* feature flags names are taken from "Intel Processor Identification and
781 * the CPUID Instruction" and AMD's "CPUID Specification".
782 * In cases of disagreement between feature naming conventions,
783 * aliases may be added.
784 */
785 const char *feat_names[32];
786 union {
787 /* If type==CPUID_FEATURE_WORD */
788 struct {
789 uint32_t eax; /* Input EAX for CPUID */
790 bool needs_ecx; /* CPUID instruction uses ECX as input */
791 uint32_t ecx; /* Input ECX value for CPUID */
792 int reg; /* output register (R_* constant) */
793 } cpuid;
794 /* If type==MSR_FEATURE_WORD */
795 struct {
796 uint32_t index;
797 struct { /*CPUID that enumerate this MSR*/
798 FeatureWord cpuid_class;
799 uint32_t cpuid_flag;
800 } cpuid_dep;
801 } msr;
802 };
803 uint32_t tcg_features; /* Feature flags supported by TCG */
804 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
805 uint32_t migratable_flags; /* Feature flags known to be migratable */
806 /* Features that shouldn't be auto-enabled by "-cpu host" */
807 uint32_t no_autoenable_flags;
808 } FeatureWordInfo;
809
810 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
811 [FEAT_1_EDX] = {
812 .type = CPUID_FEATURE_WORD,
813 .feat_names = {
814 "fpu", "vme", "de", "pse",
815 "tsc", "msr", "pae", "mce",
816 "cx8", "apic", NULL, "sep",
817 "mtrr", "pge", "mca", "cmov",
818 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
819 NULL, "ds" /* Intel dts */, "acpi", "mmx",
820 "fxsr", "sse", "sse2", "ss",
821 "ht" /* Intel htt */, "tm", "ia64", "pbe",
822 },
823 .cpuid = {.eax = 1, .reg = R_EDX, },
824 .tcg_features = TCG_FEATURES,
825 },
826 [FEAT_1_ECX] = {
827 .type = CPUID_FEATURE_WORD,
828 .feat_names = {
829 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
830 "ds-cpl", "vmx", "smx", "est",
831 "tm2", "ssse3", "cid", NULL,
832 "fma", "cx16", "xtpr", "pdcm",
833 NULL, "pcid", "dca", "sse4.1",
834 "sse4.2", "x2apic", "movbe", "popcnt",
835 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
836 "avx", "f16c", "rdrand", "hypervisor",
837 },
838 .cpuid = { .eax = 1, .reg = R_ECX, },
839 .tcg_features = TCG_EXT_FEATURES,
840 },
841 /* Feature names that are already defined on feature_name[] but
842 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
843 * names on feat_names below. They are copied automatically
844 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
845 */
846 [FEAT_8000_0001_EDX] = {
847 .type = CPUID_FEATURE_WORD,
848 .feat_names = {
849 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
850 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
851 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
852 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
853 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
854 "nx", NULL, "mmxext", NULL /* mmx */,
855 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
856 NULL, "lm", "3dnowext", "3dnow",
857 },
858 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
859 .tcg_features = TCG_EXT2_FEATURES,
860 },
861 [FEAT_8000_0001_ECX] = {
862 .type = CPUID_FEATURE_WORD,
863 .feat_names = {
864 "lahf-lm", "cmp-legacy", "svm", "extapic",
865 "cr8legacy", "abm", "sse4a", "misalignsse",
866 "3dnowprefetch", "osvw", "ibs", "xop",
867 "skinit", "wdt", NULL, "lwp",
868 "fma4", "tce", NULL, "nodeid-msr",
869 NULL, "tbm", "topoext", "perfctr-core",
870 "perfctr-nb", NULL, NULL, NULL,
871 NULL, NULL, NULL, NULL,
872 },
873 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
874 .tcg_features = TCG_EXT3_FEATURES,
875 /*
876 * TOPOEXT is always allowed but can't be enabled blindly by
877 * "-cpu host", as it requires consistent cache topology info
878 * to be provided so it doesn't confuse guests.
879 */
880 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
881 },
882 [FEAT_C000_0001_EDX] = {
883 .type = CPUID_FEATURE_WORD,
884 .feat_names = {
885 NULL, NULL, "xstore", "xstore-en",
886 NULL, NULL, "xcrypt", "xcrypt-en",
887 "ace2", "ace2-en", "phe", "phe-en",
888 "pmm", "pmm-en", NULL, NULL,
889 NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
891 NULL, NULL, NULL, NULL,
892 NULL, NULL, NULL, NULL,
893 },
894 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
895 .tcg_features = TCG_EXT4_FEATURES,
896 },
897 [FEAT_KVM] = {
898 .type = CPUID_FEATURE_WORD,
899 .feat_names = {
900 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
901 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
902 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
903 NULL, NULL, NULL, NULL,
904 NULL, NULL, NULL, NULL,
905 NULL, NULL, NULL, NULL,
906 "kvmclock-stable-bit", NULL, NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 },
909 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
910 .tcg_features = TCG_KVM_FEATURES,
911 },
912 [FEAT_KVM_HINTS] = {
913 .type = CPUID_FEATURE_WORD,
914 .feat_names = {
915 "kvm-hint-dedicated", NULL, NULL, NULL,
916 NULL, NULL, NULL, NULL,
917 NULL, NULL, NULL, NULL,
918 NULL, NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 },
924 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
925 .tcg_features = TCG_KVM_FEATURES,
926 /*
927 * KVM hints aren't auto-enabled by -cpu host, they need to be
928 * explicitly enabled in the command-line.
929 */
930 .no_autoenable_flags = ~0U,
931 },
932 [FEAT_HYPERV_EAX] = {
933 .type = CPUID_FEATURE_WORD,
934 .feat_names = {
935 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
936 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
937 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
938 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
939 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
940 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
941 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
942 NULL, NULL,
943 NULL, NULL, NULL, NULL,
944 NULL, NULL, NULL, NULL,
945 NULL, NULL, NULL, NULL,
946 NULL, NULL, NULL, NULL,
947 },
948 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
949 },
950 [FEAT_HYPERV_EBX] = {
951 .type = CPUID_FEATURE_WORD,
952 .feat_names = {
953 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
954 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
955 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
956 NULL /* hv_create_port */, NULL /* hv_connect_port */,
957 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
958 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
959 NULL, NULL,
960 NULL, NULL, NULL, NULL,
961 NULL, NULL, NULL, NULL,
962 NULL, NULL, NULL, NULL,
963 NULL, NULL, NULL, NULL,
964 },
965 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
966 },
967 [FEAT_HYPERV_EDX] = {
968 .type = CPUID_FEATURE_WORD,
969 .feat_names = {
970 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
971 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
972 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
973 NULL, NULL,
974 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
977 NULL, NULL, NULL, NULL,
978 NULL, NULL, NULL, NULL,
979 NULL, NULL, NULL, NULL,
980 },
981 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
982 },
983 [FEAT_HV_RECOMM_EAX] = {
984 .type = CPUID_FEATURE_WORD,
985 .feat_names = {
986 NULL /* hv_recommend_pv_as_switch */,
987 NULL /* hv_recommend_pv_tlbflush_local */,
988 NULL /* hv_recommend_pv_tlbflush_remote */,
989 NULL /* hv_recommend_msr_apic_access */,
990 NULL /* hv_recommend_msr_reset */,
991 NULL /* hv_recommend_relaxed_timing */,
992 NULL /* hv_recommend_dma_remapping */,
993 NULL /* hv_recommend_int_remapping */,
994 NULL /* hv_recommend_x2apic_msrs */,
995 NULL /* hv_recommend_autoeoi_deprecation */,
996 NULL /* hv_recommend_pv_ipi */,
997 NULL /* hv_recommend_ex_hypercalls */,
998 NULL /* hv_hypervisor_is_nested */,
999 NULL /* hv_recommend_int_mbec */,
1000 NULL /* hv_recommend_evmcs */,
1001 NULL,
1002 NULL, NULL, NULL, NULL,
1003 NULL, NULL, NULL, NULL,
1004 NULL, NULL, NULL, NULL,
1005 NULL, NULL, NULL, NULL,
1006 },
1007 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1008 },
1009 [FEAT_HV_NESTED_EAX] = {
1010 .type = CPUID_FEATURE_WORD,
1011 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1012 },
1013 [FEAT_SVM] = {
1014 .type = CPUID_FEATURE_WORD,
1015 .feat_names = {
1016 "npt", "lbrv", "svm-lock", "nrip-save",
1017 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1018 NULL, NULL, "pause-filter", NULL,
1019 "pfthreshold", NULL, NULL, NULL,
1020 NULL, NULL, NULL, NULL,
1021 NULL, NULL, NULL, NULL,
1022 NULL, NULL, NULL, NULL,
1023 NULL, NULL, NULL, NULL,
1024 },
1025 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1026 .tcg_features = TCG_SVM_FEATURES,
1027 },
1028 [FEAT_7_0_EBX] = {
1029 .type = CPUID_FEATURE_WORD,
1030 .feat_names = {
1031 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1032 "hle", "avx2", NULL, "smep",
1033 "bmi2", "erms", "invpcid", "rtm",
1034 NULL, NULL, "mpx", NULL,
1035 "avx512f", "avx512dq", "rdseed", "adx",
1036 "smap", "avx512ifma", "pcommit", "clflushopt",
1037 "clwb", "intel-pt", "avx512pf", "avx512er",
1038 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1039 },
1040 .cpuid = {
1041 .eax = 7,
1042 .needs_ecx = true, .ecx = 0,
1043 .reg = R_EBX,
1044 },
1045 .tcg_features = TCG_7_0_EBX_FEATURES,
1046 },
1047 [FEAT_7_0_ECX] = {
1048 .type = CPUID_FEATURE_WORD,
1049 .feat_names = {
1050 NULL, "avx512vbmi", "umip", "pku",
1051 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1052 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1053 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1054 "la57", NULL, NULL, NULL,
1055 NULL, NULL, "rdpid", NULL,
1056 NULL, "cldemote", NULL, "movdiri",
1057 "movdir64b", NULL, NULL, NULL,
1058 },
1059 .cpuid = {
1060 .eax = 7,
1061 .needs_ecx = true, .ecx = 0,
1062 .reg = R_ECX,
1063 },
1064 .tcg_features = TCG_7_0_ECX_FEATURES,
1065 },
1066 [FEAT_7_0_EDX] = {
1067 .type = CPUID_FEATURE_WORD,
1068 .feat_names = {
1069 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1070 NULL, NULL, NULL, NULL,
1071 NULL, NULL, NULL, NULL,
1072 NULL, NULL, NULL, NULL,
1073 NULL, NULL, "pconfig", NULL,
1074 NULL, NULL, NULL, NULL,
1075 NULL, NULL, "spec-ctrl", "stibp",
1076 NULL, "arch-capabilities", NULL, "ssbd",
1077 },
1078 .cpuid = {
1079 .eax = 7,
1080 .needs_ecx = true, .ecx = 0,
1081 .reg = R_EDX,
1082 },
1083 .tcg_features = TCG_7_0_EDX_FEATURES,
1084 .unmigratable_flags = CPUID_7_0_EDX_ARCH_CAPABILITIES,
1085 },
1086 [FEAT_8000_0007_EDX] = {
1087 .type = CPUID_FEATURE_WORD,
1088 .feat_names = {
1089 NULL, NULL, NULL, NULL,
1090 NULL, NULL, NULL, NULL,
1091 "invtsc", NULL, NULL, NULL,
1092 NULL, NULL, NULL, NULL,
1093 NULL, NULL, NULL, NULL,
1094 NULL, NULL, NULL, NULL,
1095 NULL, NULL, NULL, NULL,
1096 NULL, NULL, NULL, NULL,
1097 },
1098 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1099 .tcg_features = TCG_APM_FEATURES,
1100 .unmigratable_flags = CPUID_APM_INVTSC,
1101 },
1102 [FEAT_8000_0008_EBX] = {
1103 .type = CPUID_FEATURE_WORD,
1104 .feat_names = {
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 NULL, "wbnoinvd", NULL, NULL,
1108 "ibpb", NULL, NULL, NULL,
1109 NULL, NULL, NULL, NULL,
1110 NULL, NULL, NULL, NULL,
1111 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1112 NULL, NULL, NULL, NULL,
1113 },
1114 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1115 .tcg_features = 0,
1116 .unmigratable_flags = 0,
1117 },
1118 [FEAT_XSAVE] = {
1119 .type = CPUID_FEATURE_WORD,
1120 .feat_names = {
1121 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1122 NULL, NULL, NULL, NULL,
1123 NULL, NULL, NULL, NULL,
1124 NULL, NULL, NULL, NULL,
1125 NULL, NULL, NULL, NULL,
1126 NULL, NULL, NULL, NULL,
1127 NULL, NULL, NULL, NULL,
1128 NULL, NULL, NULL, NULL,
1129 },
1130 .cpuid = {
1131 .eax = 0xd,
1132 .needs_ecx = true, .ecx = 1,
1133 .reg = R_EAX,
1134 },
1135 .tcg_features = TCG_XSAVE_FEATURES,
1136 },
1137 [FEAT_6_EAX] = {
1138 .type = CPUID_FEATURE_WORD,
1139 .feat_names = {
1140 NULL, NULL, "arat", NULL,
1141 NULL, NULL, NULL, NULL,
1142 NULL, NULL, NULL, NULL,
1143 NULL, NULL, NULL, NULL,
1144 NULL, NULL, NULL, NULL,
1145 NULL, NULL, NULL, NULL,
1146 NULL, NULL, NULL, NULL,
1147 NULL, NULL, NULL, NULL,
1148 },
1149 .cpuid = { .eax = 6, .reg = R_EAX, },
1150 .tcg_features = TCG_6_EAX_FEATURES,
1151 },
1152 [FEAT_XSAVE_COMP_LO] = {
1153 .type = CPUID_FEATURE_WORD,
1154 .cpuid = {
1155 .eax = 0xD,
1156 .needs_ecx = true, .ecx = 0,
1157 .reg = R_EAX,
1158 },
1159 .tcg_features = ~0U,
1160 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1161 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1162 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1163 XSTATE_PKRU_MASK,
1164 },
1165 [FEAT_XSAVE_COMP_HI] = {
1166 .type = CPUID_FEATURE_WORD,
1167 .cpuid = {
1168 .eax = 0xD,
1169 .needs_ecx = true, .ecx = 0,
1170 .reg = R_EDX,
1171 },
1172 .tcg_features = ~0U,
1173 },
1174 /*Below are MSR exposed features*/
1175 [FEAT_ARCH_CAPABILITIES] = {
1176 .type = MSR_FEATURE_WORD,
1177 .feat_names = {
1178 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1179 "ssb-no", NULL, NULL, NULL,
1180 NULL, NULL, NULL, NULL,
1181 NULL, NULL, NULL, NULL,
1182 NULL, NULL, NULL, NULL,
1183 NULL, NULL, NULL, NULL,
1184 NULL, NULL, NULL, NULL,
1185 NULL, NULL, NULL, NULL,
1186 },
1187 .msr = {
1188 .index = MSR_IA32_ARCH_CAPABILITIES,
1189 .cpuid_dep = {
1190 FEAT_7_0_EDX,
1191 CPUID_7_0_EDX_ARCH_CAPABILITIES
1192 }
1193 },
1194 },
1195 };
1196
1197 typedef struct X86RegisterInfo32 {
1198 /* Name of register */
1199 const char *name;
1200 /* QAPI enum value register */
1201 X86CPURegister32 qapi_enum;
1202 } X86RegisterInfo32;
1203
1204 #define REGISTER(reg) \
1205 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1206 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1207 REGISTER(EAX),
1208 REGISTER(ECX),
1209 REGISTER(EDX),
1210 REGISTER(EBX),
1211 REGISTER(ESP),
1212 REGISTER(EBP),
1213 REGISTER(ESI),
1214 REGISTER(EDI),
1215 };
1216 #undef REGISTER
1217
1218 typedef struct ExtSaveArea {
1219 uint32_t feature, bits;
1220 uint32_t offset, size;
1221 } ExtSaveArea;
1222
1223 static const ExtSaveArea x86_ext_save_areas[] = {
1224 [XSTATE_FP_BIT] = {
1225 /* x87 FP state component is always enabled if XSAVE is supported */
1226 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1227 /* x87 state is in the legacy region of the XSAVE area */
1228 .offset = 0,
1229 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1230 },
1231 [XSTATE_SSE_BIT] = {
1232 /* SSE state component is always enabled if XSAVE is supported */
1233 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1234 /* SSE state is in the legacy region of the XSAVE area */
1235 .offset = 0,
1236 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1237 },
1238 [XSTATE_YMM_BIT] =
1239 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1240 .offset = offsetof(X86XSaveArea, avx_state),
1241 .size = sizeof(XSaveAVX) },
1242 [XSTATE_BNDREGS_BIT] =
1243 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1244 .offset = offsetof(X86XSaveArea, bndreg_state),
1245 .size = sizeof(XSaveBNDREG) },
1246 [XSTATE_BNDCSR_BIT] =
1247 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1248 .offset = offsetof(X86XSaveArea, bndcsr_state),
1249 .size = sizeof(XSaveBNDCSR) },
1250 [XSTATE_OPMASK_BIT] =
1251 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1252 .offset = offsetof(X86XSaveArea, opmask_state),
1253 .size = sizeof(XSaveOpmask) },
1254 [XSTATE_ZMM_Hi256_BIT] =
1255 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1256 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1257 .size = sizeof(XSaveZMM_Hi256) },
1258 [XSTATE_Hi16_ZMM_BIT] =
1259 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1260 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1261 .size = sizeof(XSaveHi16_ZMM) },
1262 [XSTATE_PKRU_BIT] =
1263 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1264 .offset = offsetof(X86XSaveArea, pkru_state),
1265 .size = sizeof(XSavePKRU) },
1266 };
1267
1268 static uint32_t xsave_area_size(uint64_t mask)
1269 {
1270 int i;
1271 uint64_t ret = 0;
1272
1273 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1274 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1275 if ((mask >> i) & 1) {
1276 ret = MAX(ret, esa->offset + esa->size);
1277 }
1278 }
1279 return ret;
1280 }
1281
1282 static inline bool accel_uses_host_cpuid(void)
1283 {
1284 return kvm_enabled() || hvf_enabled();
1285 }
1286
1287 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1288 {
1289 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1290 cpu->env.features[FEAT_XSAVE_COMP_LO];
1291 }
1292
1293 const char *get_register_name_32(unsigned int reg)
1294 {
1295 if (reg >= CPU_NB_REGS32) {
1296 return NULL;
1297 }
1298 return x86_reg_info_32[reg].name;
1299 }
1300
1301 /*
1302 * Returns the set of feature flags that are supported and migratable by
1303 * QEMU, for a given FeatureWord.
1304 */
1305 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1306 {
1307 FeatureWordInfo *wi = &feature_word_info[w];
1308 uint32_t r = 0;
1309 int i;
1310
1311 for (i = 0; i < 32; i++) {
1312 uint32_t f = 1U << i;
1313
1314 /* If the feature name is known, it is implicitly considered migratable,
1315 * unless it is explicitly set in unmigratable_flags */
1316 if ((wi->migratable_flags & f) ||
1317 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1318 r |= f;
1319 }
1320 }
1321 return r;
1322 }
1323
1324 void host_cpuid(uint32_t function, uint32_t count,
1325 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1326 {
1327 uint32_t vec[4];
1328
1329 #ifdef __x86_64__
1330 asm volatile("cpuid"
1331 : "=a"(vec[0]), "=b"(vec[1]),
1332 "=c"(vec[2]), "=d"(vec[3])
1333 : "0"(function), "c"(count) : "cc");
1334 #elif defined(__i386__)
1335 asm volatile("pusha \n\t"
1336 "cpuid \n\t"
1337 "mov %%eax, 0(%2) \n\t"
1338 "mov %%ebx, 4(%2) \n\t"
1339 "mov %%ecx, 8(%2) \n\t"
1340 "mov %%edx, 12(%2) \n\t"
1341 "popa"
1342 : : "a"(function), "c"(count), "S"(vec)
1343 : "memory", "cc");
1344 #else
1345 abort();
1346 #endif
1347
1348 if (eax)
1349 *eax = vec[0];
1350 if (ebx)
1351 *ebx = vec[1];
1352 if (ecx)
1353 *ecx = vec[2];
1354 if (edx)
1355 *edx = vec[3];
1356 }
1357
1358 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1359 {
1360 uint32_t eax, ebx, ecx, edx;
1361
1362 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1363 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1364
1365 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1366 if (family) {
1367 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1368 }
1369 if (model) {
1370 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1371 }
1372 if (stepping) {
1373 *stepping = eax & 0x0F;
1374 }
1375 }
1376
1377 /* CPU class name definitions: */
1378
1379 /* Return type name for a given CPU model name
1380 * Caller is responsible for freeing the returned string.
1381 */
1382 static char *x86_cpu_type_name(const char *model_name)
1383 {
1384 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1385 }
1386
1387 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1388 {
1389 ObjectClass *oc;
1390 char *typename = x86_cpu_type_name(cpu_model);
1391 oc = object_class_by_name(typename);
1392 g_free(typename);
1393 return oc;
1394 }
1395
1396 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1397 {
1398 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1399 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1400 return g_strndup(class_name,
1401 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1402 }
1403
1404 struct X86CPUDefinition {
1405 const char *name;
1406 uint32_t level;
1407 uint32_t xlevel;
1408 /* vendor is zero-terminated, 12 character ASCII string */
1409 char vendor[CPUID_VENDOR_SZ + 1];
1410 int family;
1411 int model;
1412 int stepping;
1413 FeatureWordArray features;
1414 const char *model_id;
1415 CPUCaches *cache_info;
1416 };
1417
1418 static CPUCaches epyc_cache_info = {
1419 .l1d_cache = &(CPUCacheInfo) {
1420 .type = DATA_CACHE,
1421 .level = 1,
1422 .size = 32 * KiB,
1423 .line_size = 64,
1424 .associativity = 8,
1425 .partitions = 1,
1426 .sets = 64,
1427 .lines_per_tag = 1,
1428 .self_init = 1,
1429 .no_invd_sharing = true,
1430 },
1431 .l1i_cache = &(CPUCacheInfo) {
1432 .type = INSTRUCTION_CACHE,
1433 .level = 1,
1434 .size = 64 * KiB,
1435 .line_size = 64,
1436 .associativity = 4,
1437 .partitions = 1,
1438 .sets = 256,
1439 .lines_per_tag = 1,
1440 .self_init = 1,
1441 .no_invd_sharing = true,
1442 },
1443 .l2_cache = &(CPUCacheInfo) {
1444 .type = UNIFIED_CACHE,
1445 .level = 2,
1446 .size = 512 * KiB,
1447 .line_size = 64,
1448 .associativity = 8,
1449 .partitions = 1,
1450 .sets = 1024,
1451 .lines_per_tag = 1,
1452 },
1453 .l3_cache = &(CPUCacheInfo) {
1454 .type = UNIFIED_CACHE,
1455 .level = 3,
1456 .size = 8 * MiB,
1457 .line_size = 64,
1458 .associativity = 16,
1459 .partitions = 1,
1460 .sets = 8192,
1461 .lines_per_tag = 1,
1462 .self_init = true,
1463 .inclusive = true,
1464 .complex_indexing = true,
1465 },
1466 };
1467
1468 static X86CPUDefinition builtin_x86_defs[] = {
1469 {
1470 .name = "qemu64",
1471 .level = 0xd,
1472 .vendor = CPUID_VENDOR_AMD,
1473 .family = 6,
1474 .model = 6,
1475 .stepping = 3,
1476 .features[FEAT_1_EDX] =
1477 PPRO_FEATURES |
1478 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1479 CPUID_PSE36,
1480 .features[FEAT_1_ECX] =
1481 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1482 .features[FEAT_8000_0001_EDX] =
1483 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1484 .features[FEAT_8000_0001_ECX] =
1485 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1486 .xlevel = 0x8000000A,
1487 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1488 },
1489 {
1490 .name = "phenom",
1491 .level = 5,
1492 .vendor = CPUID_VENDOR_AMD,
1493 .family = 16,
1494 .model = 2,
1495 .stepping = 3,
1496 /* Missing: CPUID_HT */
1497 .features[FEAT_1_EDX] =
1498 PPRO_FEATURES |
1499 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1500 CPUID_PSE36 | CPUID_VME,
1501 .features[FEAT_1_ECX] =
1502 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1503 CPUID_EXT_POPCNT,
1504 .features[FEAT_8000_0001_EDX] =
1505 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1506 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1507 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1508 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1509 CPUID_EXT3_CR8LEG,
1510 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1511 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1512 .features[FEAT_8000_0001_ECX] =
1513 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1514 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1515 /* Missing: CPUID_SVM_LBRV */
1516 .features[FEAT_SVM] =
1517 CPUID_SVM_NPT,
1518 .xlevel = 0x8000001A,
1519 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1520 },
1521 {
1522 .name = "core2duo",
1523 .level = 10,
1524 .vendor = CPUID_VENDOR_INTEL,
1525 .family = 6,
1526 .model = 15,
1527 .stepping = 11,
1528 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1529 .features[FEAT_1_EDX] =
1530 PPRO_FEATURES |
1531 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1532 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1533 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1534 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1535 .features[FEAT_1_ECX] =
1536 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1537 CPUID_EXT_CX16,
1538 .features[FEAT_8000_0001_EDX] =
1539 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1540 .features[FEAT_8000_0001_ECX] =
1541 CPUID_EXT3_LAHF_LM,
1542 .xlevel = 0x80000008,
1543 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1544 },
1545 {
1546 .name = "kvm64",
1547 .level = 0xd,
1548 .vendor = CPUID_VENDOR_INTEL,
1549 .family = 15,
1550 .model = 6,
1551 .stepping = 1,
1552 /* Missing: CPUID_HT */
1553 .features[FEAT_1_EDX] =
1554 PPRO_FEATURES | CPUID_VME |
1555 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1556 CPUID_PSE36,
1557 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1558 .features[FEAT_1_ECX] =
1559 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1560 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1561 .features[FEAT_8000_0001_EDX] =
1562 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1563 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1564 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1565 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1566 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1567 .features[FEAT_8000_0001_ECX] =
1568 0,
1569 .xlevel = 0x80000008,
1570 .model_id = "Common KVM processor"
1571 },
1572 {
1573 .name = "qemu32",
1574 .level = 4,
1575 .vendor = CPUID_VENDOR_INTEL,
1576 .family = 6,
1577 .model = 6,
1578 .stepping = 3,
1579 .features[FEAT_1_EDX] =
1580 PPRO_FEATURES,
1581 .features[FEAT_1_ECX] =
1582 CPUID_EXT_SSE3,
1583 .xlevel = 0x80000004,
1584 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1585 },
1586 {
1587 .name = "kvm32",
1588 .level = 5,
1589 .vendor = CPUID_VENDOR_INTEL,
1590 .family = 15,
1591 .model = 6,
1592 .stepping = 1,
1593 .features[FEAT_1_EDX] =
1594 PPRO_FEATURES | CPUID_VME |
1595 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1596 .features[FEAT_1_ECX] =
1597 CPUID_EXT_SSE3,
1598 .features[FEAT_8000_0001_ECX] =
1599 0,
1600 .xlevel = 0x80000008,
1601 .model_id = "Common 32-bit KVM processor"
1602 },
1603 {
1604 .name = "coreduo",
1605 .level = 10,
1606 .vendor = CPUID_VENDOR_INTEL,
1607 .family = 6,
1608 .model = 14,
1609 .stepping = 8,
1610 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1611 .features[FEAT_1_EDX] =
1612 PPRO_FEATURES | CPUID_VME |
1613 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1614 CPUID_SS,
1615 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1616 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1617 .features[FEAT_1_ECX] =
1618 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1619 .features[FEAT_8000_0001_EDX] =
1620 CPUID_EXT2_NX,
1621 .xlevel = 0x80000008,
1622 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1623 },
1624 {
1625 .name = "486",
1626 .level = 1,
1627 .vendor = CPUID_VENDOR_INTEL,
1628 .family = 4,
1629 .model = 8,
1630 .stepping = 0,
1631 .features[FEAT_1_EDX] =
1632 I486_FEATURES,
1633 .xlevel = 0,
1634 .model_id = "",
1635 },
1636 {
1637 .name = "pentium",
1638 .level = 1,
1639 .vendor = CPUID_VENDOR_INTEL,
1640 .family = 5,
1641 .model = 4,
1642 .stepping = 3,
1643 .features[FEAT_1_EDX] =
1644 PENTIUM_FEATURES,
1645 .xlevel = 0,
1646 .model_id = "",
1647 },
1648 {
1649 .name = "pentium2",
1650 .level = 2,
1651 .vendor = CPUID_VENDOR_INTEL,
1652 .family = 6,
1653 .model = 5,
1654 .stepping = 2,
1655 .features[FEAT_1_EDX] =
1656 PENTIUM2_FEATURES,
1657 .xlevel = 0,
1658 .model_id = "",
1659 },
1660 {
1661 .name = "pentium3",
1662 .level = 3,
1663 .vendor = CPUID_VENDOR_INTEL,
1664 .family = 6,
1665 .model = 7,
1666 .stepping = 3,
1667 .features[FEAT_1_EDX] =
1668 PENTIUM3_FEATURES,
1669 .xlevel = 0,
1670 .model_id = "",
1671 },
1672 {
1673 .name = "athlon",
1674 .level = 2,
1675 .vendor = CPUID_VENDOR_AMD,
1676 .family = 6,
1677 .model = 2,
1678 .stepping = 3,
1679 .features[FEAT_1_EDX] =
1680 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1681 CPUID_MCA,
1682 .features[FEAT_8000_0001_EDX] =
1683 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1684 .xlevel = 0x80000008,
1685 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1686 },
1687 {
1688 .name = "n270",
1689 .level = 10,
1690 .vendor = CPUID_VENDOR_INTEL,
1691 .family = 6,
1692 .model = 28,
1693 .stepping = 2,
1694 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1695 .features[FEAT_1_EDX] =
1696 PPRO_FEATURES |
1697 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1698 CPUID_ACPI | CPUID_SS,
1699 /* Some CPUs got no CPUID_SEP */
1700 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1701 * CPUID_EXT_XTPR */
1702 .features[FEAT_1_ECX] =
1703 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1704 CPUID_EXT_MOVBE,
1705 .features[FEAT_8000_0001_EDX] =
1706 CPUID_EXT2_NX,
1707 .features[FEAT_8000_0001_ECX] =
1708 CPUID_EXT3_LAHF_LM,
1709 .xlevel = 0x80000008,
1710 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1711 },
1712 {
1713 .name = "Conroe",
1714 .level = 10,
1715 .vendor = CPUID_VENDOR_INTEL,
1716 .family = 6,
1717 .model = 15,
1718 .stepping = 3,
1719 .features[FEAT_1_EDX] =
1720 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1721 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1722 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1723 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1724 CPUID_DE | CPUID_FP87,
1725 .features[FEAT_1_ECX] =
1726 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1727 .features[FEAT_8000_0001_EDX] =
1728 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1729 .features[FEAT_8000_0001_ECX] =
1730 CPUID_EXT3_LAHF_LM,
1731 .xlevel = 0x80000008,
1732 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1733 },
1734 {
1735 .name = "Penryn",
1736 .level = 10,
1737 .vendor = CPUID_VENDOR_INTEL,
1738 .family = 6,
1739 .model = 23,
1740 .stepping = 3,
1741 .features[FEAT_1_EDX] =
1742 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1743 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1744 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1745 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1746 CPUID_DE | CPUID_FP87,
1747 .features[FEAT_1_ECX] =
1748 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1749 CPUID_EXT_SSE3,
1750 .features[FEAT_8000_0001_EDX] =
1751 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1752 .features[FEAT_8000_0001_ECX] =
1753 CPUID_EXT3_LAHF_LM,
1754 .xlevel = 0x80000008,
1755 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1756 },
1757 {
1758 .name = "Nehalem",
1759 .level = 11,
1760 .vendor = CPUID_VENDOR_INTEL,
1761 .family = 6,
1762 .model = 26,
1763 .stepping = 3,
1764 .features[FEAT_1_EDX] =
1765 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1766 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1767 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1768 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1769 CPUID_DE | CPUID_FP87,
1770 .features[FEAT_1_ECX] =
1771 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1772 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1773 .features[FEAT_8000_0001_EDX] =
1774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1775 .features[FEAT_8000_0001_ECX] =
1776 CPUID_EXT3_LAHF_LM,
1777 .xlevel = 0x80000008,
1778 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1779 },
1780 {
1781 .name = "Nehalem-IBRS",
1782 .level = 11,
1783 .vendor = CPUID_VENDOR_INTEL,
1784 .family = 6,
1785 .model = 26,
1786 .stepping = 3,
1787 .features[FEAT_1_EDX] =
1788 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1789 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1790 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1791 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1792 CPUID_DE | CPUID_FP87,
1793 .features[FEAT_1_ECX] =
1794 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1795 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1796 .features[FEAT_7_0_EDX] =
1797 CPUID_7_0_EDX_SPEC_CTRL,
1798 .features[FEAT_8000_0001_EDX] =
1799 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1800 .features[FEAT_8000_0001_ECX] =
1801 CPUID_EXT3_LAHF_LM,
1802 .xlevel = 0x80000008,
1803 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1804 },
1805 {
1806 .name = "Westmere",
1807 .level = 11,
1808 .vendor = CPUID_VENDOR_INTEL,
1809 .family = 6,
1810 .model = 44,
1811 .stepping = 1,
1812 .features[FEAT_1_EDX] =
1813 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1814 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1815 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1816 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1817 CPUID_DE | CPUID_FP87,
1818 .features[FEAT_1_ECX] =
1819 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1820 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1821 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1822 .features[FEAT_8000_0001_EDX] =
1823 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1824 .features[FEAT_8000_0001_ECX] =
1825 CPUID_EXT3_LAHF_LM,
1826 .features[FEAT_6_EAX] =
1827 CPUID_6_EAX_ARAT,
1828 .xlevel = 0x80000008,
1829 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1830 },
1831 {
1832 .name = "Westmere-IBRS",
1833 .level = 11,
1834 .vendor = CPUID_VENDOR_INTEL,
1835 .family = 6,
1836 .model = 44,
1837 .stepping = 1,
1838 .features[FEAT_1_EDX] =
1839 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1840 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1841 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1842 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1843 CPUID_DE | CPUID_FP87,
1844 .features[FEAT_1_ECX] =
1845 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1846 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1847 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1848 .features[FEAT_8000_0001_EDX] =
1849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1850 .features[FEAT_8000_0001_ECX] =
1851 CPUID_EXT3_LAHF_LM,
1852 .features[FEAT_7_0_EDX] =
1853 CPUID_7_0_EDX_SPEC_CTRL,
1854 .features[FEAT_6_EAX] =
1855 CPUID_6_EAX_ARAT,
1856 .xlevel = 0x80000008,
1857 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1858 },
1859 {
1860 .name = "SandyBridge",
1861 .level = 0xd,
1862 .vendor = CPUID_VENDOR_INTEL,
1863 .family = 6,
1864 .model = 42,
1865 .stepping = 1,
1866 .features[FEAT_1_EDX] =
1867 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1868 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1869 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1870 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1871 CPUID_DE | CPUID_FP87,
1872 .features[FEAT_1_ECX] =
1873 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1874 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1875 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1876 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1877 CPUID_EXT_SSE3,
1878 .features[FEAT_8000_0001_EDX] =
1879 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1880 CPUID_EXT2_SYSCALL,
1881 .features[FEAT_8000_0001_ECX] =
1882 CPUID_EXT3_LAHF_LM,
1883 .features[FEAT_XSAVE] =
1884 CPUID_XSAVE_XSAVEOPT,
1885 .features[FEAT_6_EAX] =
1886 CPUID_6_EAX_ARAT,
1887 .xlevel = 0x80000008,
1888 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1889 },
1890 {
1891 .name = "SandyBridge-IBRS",
1892 .level = 0xd,
1893 .vendor = CPUID_VENDOR_INTEL,
1894 .family = 6,
1895 .model = 42,
1896 .stepping = 1,
1897 .features[FEAT_1_EDX] =
1898 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1899 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1900 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1901 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1902 CPUID_DE | CPUID_FP87,
1903 .features[FEAT_1_ECX] =
1904 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1905 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1906 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1907 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1908 CPUID_EXT_SSE3,
1909 .features[FEAT_8000_0001_EDX] =
1910 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1911 CPUID_EXT2_SYSCALL,
1912 .features[FEAT_8000_0001_ECX] =
1913 CPUID_EXT3_LAHF_LM,
1914 .features[FEAT_7_0_EDX] =
1915 CPUID_7_0_EDX_SPEC_CTRL,
1916 .features[FEAT_XSAVE] =
1917 CPUID_XSAVE_XSAVEOPT,
1918 .features[FEAT_6_EAX] =
1919 CPUID_6_EAX_ARAT,
1920 .xlevel = 0x80000008,
1921 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1922 },
1923 {
1924 .name = "IvyBridge",
1925 .level = 0xd,
1926 .vendor = CPUID_VENDOR_INTEL,
1927 .family = 6,
1928 .model = 58,
1929 .stepping = 9,
1930 .features[FEAT_1_EDX] =
1931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1935 CPUID_DE | CPUID_FP87,
1936 .features[FEAT_1_ECX] =
1937 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1938 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1939 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1940 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1941 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1942 .features[FEAT_7_0_EBX] =
1943 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1944 CPUID_7_0_EBX_ERMS,
1945 .features[FEAT_8000_0001_EDX] =
1946 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1947 CPUID_EXT2_SYSCALL,
1948 .features[FEAT_8000_0001_ECX] =
1949 CPUID_EXT3_LAHF_LM,
1950 .features[FEAT_XSAVE] =
1951 CPUID_XSAVE_XSAVEOPT,
1952 .features[FEAT_6_EAX] =
1953 CPUID_6_EAX_ARAT,
1954 .xlevel = 0x80000008,
1955 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1956 },
1957 {
1958 .name = "IvyBridge-IBRS",
1959 .level = 0xd,
1960 .vendor = CPUID_VENDOR_INTEL,
1961 .family = 6,
1962 .model = 58,
1963 .stepping = 9,
1964 .features[FEAT_1_EDX] =
1965 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1966 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1967 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1968 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1969 CPUID_DE | CPUID_FP87,
1970 .features[FEAT_1_ECX] =
1971 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1972 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1973 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1974 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1975 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1976 .features[FEAT_7_0_EBX] =
1977 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1978 CPUID_7_0_EBX_ERMS,
1979 .features[FEAT_8000_0001_EDX] =
1980 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1981 CPUID_EXT2_SYSCALL,
1982 .features[FEAT_8000_0001_ECX] =
1983 CPUID_EXT3_LAHF_LM,
1984 .features[FEAT_7_0_EDX] =
1985 CPUID_7_0_EDX_SPEC_CTRL,
1986 .features[FEAT_XSAVE] =
1987 CPUID_XSAVE_XSAVEOPT,
1988 .features[FEAT_6_EAX] =
1989 CPUID_6_EAX_ARAT,
1990 .xlevel = 0x80000008,
1991 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1992 },
1993 {
1994 .name = "Haswell-noTSX",
1995 .level = 0xd,
1996 .vendor = CPUID_VENDOR_INTEL,
1997 .family = 6,
1998 .model = 60,
1999 .stepping = 1,
2000 .features[FEAT_1_EDX] =
2001 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2002 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2003 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2004 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2005 CPUID_DE | CPUID_FP87,
2006 .features[FEAT_1_ECX] =
2007 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2008 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2009 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2010 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2011 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2012 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2013 .features[FEAT_8000_0001_EDX] =
2014 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2015 CPUID_EXT2_SYSCALL,
2016 .features[FEAT_8000_0001_ECX] =
2017 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2018 .features[FEAT_7_0_EBX] =
2019 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2020 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2021 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2022 .features[FEAT_XSAVE] =
2023 CPUID_XSAVE_XSAVEOPT,
2024 .features[FEAT_6_EAX] =
2025 CPUID_6_EAX_ARAT,
2026 .xlevel = 0x80000008,
2027 .model_id = "Intel Core Processor (Haswell, no TSX)",
2028 },
2029 {
2030 .name = "Haswell-noTSX-IBRS",
2031 .level = 0xd,
2032 .vendor = CPUID_VENDOR_INTEL,
2033 .family = 6,
2034 .model = 60,
2035 .stepping = 1,
2036 .features[FEAT_1_EDX] =
2037 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2038 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2039 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2040 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2041 CPUID_DE | CPUID_FP87,
2042 .features[FEAT_1_ECX] =
2043 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2044 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2045 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2046 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2047 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2048 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2049 .features[FEAT_8000_0001_EDX] =
2050 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2051 CPUID_EXT2_SYSCALL,
2052 .features[FEAT_8000_0001_ECX] =
2053 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2054 .features[FEAT_7_0_EDX] =
2055 CPUID_7_0_EDX_SPEC_CTRL,
2056 .features[FEAT_7_0_EBX] =
2057 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2058 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2059 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2060 .features[FEAT_XSAVE] =
2061 CPUID_XSAVE_XSAVEOPT,
2062 .features[FEAT_6_EAX] =
2063 CPUID_6_EAX_ARAT,
2064 .xlevel = 0x80000008,
2065 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2066 },
2067 {
2068 .name = "Haswell",
2069 .level = 0xd,
2070 .vendor = CPUID_VENDOR_INTEL,
2071 .family = 6,
2072 .model = 60,
2073 .stepping = 4,
2074 .features[FEAT_1_EDX] =
2075 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2076 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2077 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2078 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2079 CPUID_DE | CPUID_FP87,
2080 .features[FEAT_1_ECX] =
2081 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2082 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2083 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2084 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2085 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2086 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2087 .features[FEAT_8000_0001_EDX] =
2088 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2089 CPUID_EXT2_SYSCALL,
2090 .features[FEAT_8000_0001_ECX] =
2091 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2092 .features[FEAT_7_0_EBX] =
2093 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2094 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2095 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2096 CPUID_7_0_EBX_RTM,
2097 .features[FEAT_XSAVE] =
2098 CPUID_XSAVE_XSAVEOPT,
2099 .features[FEAT_6_EAX] =
2100 CPUID_6_EAX_ARAT,
2101 .xlevel = 0x80000008,
2102 .model_id = "Intel Core Processor (Haswell)",
2103 },
2104 {
2105 .name = "Haswell-IBRS",
2106 .level = 0xd,
2107 .vendor = CPUID_VENDOR_INTEL,
2108 .family = 6,
2109 .model = 60,
2110 .stepping = 4,
2111 .features[FEAT_1_EDX] =
2112 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2113 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2114 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2115 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2116 CPUID_DE | CPUID_FP87,
2117 .features[FEAT_1_ECX] =
2118 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2119 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2120 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2121 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2122 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2123 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2124 .features[FEAT_8000_0001_EDX] =
2125 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2126 CPUID_EXT2_SYSCALL,
2127 .features[FEAT_8000_0001_ECX] =
2128 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2129 .features[FEAT_7_0_EDX] =
2130 CPUID_7_0_EDX_SPEC_CTRL,
2131 .features[FEAT_7_0_EBX] =
2132 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2133 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2134 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2135 CPUID_7_0_EBX_RTM,
2136 .features[FEAT_XSAVE] =
2137 CPUID_XSAVE_XSAVEOPT,
2138 .features[FEAT_6_EAX] =
2139 CPUID_6_EAX_ARAT,
2140 .xlevel = 0x80000008,
2141 .model_id = "Intel Core Processor (Haswell, IBRS)",
2142 },
2143 {
2144 .name = "Broadwell-noTSX",
2145 .level = 0xd,
2146 .vendor = CPUID_VENDOR_INTEL,
2147 .family = 6,
2148 .model = 61,
2149 .stepping = 2,
2150 .features[FEAT_1_EDX] =
2151 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2152 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2153 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2154 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2155 CPUID_DE | CPUID_FP87,
2156 .features[FEAT_1_ECX] =
2157 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2158 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2159 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2160 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2161 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2162 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2163 .features[FEAT_8000_0001_EDX] =
2164 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2165 CPUID_EXT2_SYSCALL,
2166 .features[FEAT_8000_0001_ECX] =
2167 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2168 .features[FEAT_7_0_EBX] =
2169 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2170 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2171 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2172 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2173 CPUID_7_0_EBX_SMAP,
2174 .features[FEAT_XSAVE] =
2175 CPUID_XSAVE_XSAVEOPT,
2176 .features[FEAT_6_EAX] =
2177 CPUID_6_EAX_ARAT,
2178 .xlevel = 0x80000008,
2179 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2180 },
2181 {
2182 .name = "Broadwell-noTSX-IBRS",
2183 .level = 0xd,
2184 .vendor = CPUID_VENDOR_INTEL,
2185 .family = 6,
2186 .model = 61,
2187 .stepping = 2,
2188 .features[FEAT_1_EDX] =
2189 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2190 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2191 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2192 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2193 CPUID_DE | CPUID_FP87,
2194 .features[FEAT_1_ECX] =
2195 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2196 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2197 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2198 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2199 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2200 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2201 .features[FEAT_8000_0001_EDX] =
2202 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2203 CPUID_EXT2_SYSCALL,
2204 .features[FEAT_8000_0001_ECX] =
2205 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2206 .features[FEAT_7_0_EDX] =
2207 CPUID_7_0_EDX_SPEC_CTRL,
2208 .features[FEAT_7_0_EBX] =
2209 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2210 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2211 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2212 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2213 CPUID_7_0_EBX_SMAP,
2214 .features[FEAT_XSAVE] =
2215 CPUID_XSAVE_XSAVEOPT,
2216 .features[FEAT_6_EAX] =
2217 CPUID_6_EAX_ARAT,
2218 .xlevel = 0x80000008,
2219 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2220 },
2221 {
2222 .name = "Broadwell",
2223 .level = 0xd,
2224 .vendor = CPUID_VENDOR_INTEL,
2225 .family = 6,
2226 .model = 61,
2227 .stepping = 2,
2228 .features[FEAT_1_EDX] =
2229 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2230 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2231 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2232 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2233 CPUID_DE | CPUID_FP87,
2234 .features[FEAT_1_ECX] =
2235 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2236 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2237 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2238 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2239 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2240 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2241 .features[FEAT_8000_0001_EDX] =
2242 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2243 CPUID_EXT2_SYSCALL,
2244 .features[FEAT_8000_0001_ECX] =
2245 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2246 .features[FEAT_7_0_EBX] =
2247 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2248 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2249 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2250 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2251 CPUID_7_0_EBX_SMAP,
2252 .features[FEAT_XSAVE] =
2253 CPUID_XSAVE_XSAVEOPT,
2254 .features[FEAT_6_EAX] =
2255 CPUID_6_EAX_ARAT,
2256 .xlevel = 0x80000008,
2257 .model_id = "Intel Core Processor (Broadwell)",
2258 },
2259 {
2260 .name = "Broadwell-IBRS",
2261 .level = 0xd,
2262 .vendor = CPUID_VENDOR_INTEL,
2263 .family = 6,
2264 .model = 61,
2265 .stepping = 2,
2266 .features[FEAT_1_EDX] =
2267 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2268 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2269 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2270 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2271 CPUID_DE | CPUID_FP87,
2272 .features[FEAT_1_ECX] =
2273 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2274 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2275 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2276 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2277 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2278 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2279 .features[FEAT_8000_0001_EDX] =
2280 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2281 CPUID_EXT2_SYSCALL,
2282 .features[FEAT_8000_0001_ECX] =
2283 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2284 .features[FEAT_7_0_EDX] =
2285 CPUID_7_0_EDX_SPEC_CTRL,
2286 .features[FEAT_7_0_EBX] =
2287 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2288 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2289 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2290 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2291 CPUID_7_0_EBX_SMAP,
2292 .features[FEAT_XSAVE] =
2293 CPUID_XSAVE_XSAVEOPT,
2294 .features[FEAT_6_EAX] =
2295 CPUID_6_EAX_ARAT,
2296 .xlevel = 0x80000008,
2297 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2298 },
2299 {
2300 .name = "Skylake-Client",
2301 .level = 0xd,
2302 .vendor = CPUID_VENDOR_INTEL,
2303 .family = 6,
2304 .model = 94,
2305 .stepping = 3,
2306 .features[FEAT_1_EDX] =
2307 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2308 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2309 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2310 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2311 CPUID_DE | CPUID_FP87,
2312 .features[FEAT_1_ECX] =
2313 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2314 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2315 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2316 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2317 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2318 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2319 .features[FEAT_8000_0001_EDX] =
2320 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2321 CPUID_EXT2_SYSCALL,
2322 .features[FEAT_8000_0001_ECX] =
2323 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2324 .features[FEAT_7_0_EBX] =
2325 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2326 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2327 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2328 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2329 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2330 /* Missing: XSAVES (not supported by some Linux versions,
2331 * including v4.1 to v4.12).
2332 * KVM doesn't yet expose any XSAVES state save component,
2333 * and the only one defined in Skylake (processor tracing)
2334 * probably will block migration anyway.
2335 */
2336 .features[FEAT_XSAVE] =
2337 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2338 CPUID_XSAVE_XGETBV1,
2339 .features[FEAT_6_EAX] =
2340 CPUID_6_EAX_ARAT,
2341 .xlevel = 0x80000008,
2342 .model_id = "Intel Core Processor (Skylake)",
2343 },
2344 {
2345 .name = "Skylake-Client-IBRS",
2346 .level = 0xd,
2347 .vendor = CPUID_VENDOR_INTEL,
2348 .family = 6,
2349 .model = 94,
2350 .stepping = 3,
2351 .features[FEAT_1_EDX] =
2352 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2353 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2354 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2355 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2356 CPUID_DE | CPUID_FP87,
2357 .features[FEAT_1_ECX] =
2358 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2359 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2360 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2361 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2362 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2363 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2364 .features[FEAT_8000_0001_EDX] =
2365 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2366 CPUID_EXT2_SYSCALL,
2367 .features[FEAT_8000_0001_ECX] =
2368 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2369 .features[FEAT_7_0_EDX] =
2370 CPUID_7_0_EDX_SPEC_CTRL,
2371 .features[FEAT_7_0_EBX] =
2372 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2373 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2374 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2375 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2376 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2377 /* Missing: XSAVES (not supported by some Linux versions,
2378 * including v4.1 to v4.12).
2379 * KVM doesn't yet expose any XSAVES state save component,
2380 * and the only one defined in Skylake (processor tracing)
2381 * probably will block migration anyway.
2382 */
2383 .features[FEAT_XSAVE] =
2384 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2385 CPUID_XSAVE_XGETBV1,
2386 .features[FEAT_6_EAX] =
2387 CPUID_6_EAX_ARAT,
2388 .xlevel = 0x80000008,
2389 .model_id = "Intel Core Processor (Skylake, IBRS)",
2390 },
2391 {
2392 .name = "Skylake-Server",
2393 .level = 0xd,
2394 .vendor = CPUID_VENDOR_INTEL,
2395 .family = 6,
2396 .model = 85,
2397 .stepping = 4,
2398 .features[FEAT_1_EDX] =
2399 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2400 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2401 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2402 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2403 CPUID_DE | CPUID_FP87,
2404 .features[FEAT_1_ECX] =
2405 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2406 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2407 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2408 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2409 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2410 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2411 .features[FEAT_8000_0001_EDX] =
2412 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2413 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2414 .features[FEAT_8000_0001_ECX] =
2415 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2416 .features[FEAT_7_0_EBX] =
2417 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2418 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2419 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2420 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2421 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2422 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2423 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2424 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2425 .features[FEAT_7_0_ECX] =
2426 CPUID_7_0_ECX_PKU,
2427 /* Missing: XSAVES (not supported by some Linux versions,
2428 * including v4.1 to v4.12).
2429 * KVM doesn't yet expose any XSAVES state save component,
2430 * and the only one defined in Skylake (processor tracing)
2431 * probably will block migration anyway.
2432 */
2433 .features[FEAT_XSAVE] =
2434 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2435 CPUID_XSAVE_XGETBV1,
2436 .features[FEAT_6_EAX] =
2437 CPUID_6_EAX_ARAT,
2438 .xlevel = 0x80000008,
2439 .model_id = "Intel Xeon Processor (Skylake)",
2440 },
2441 {
2442 .name = "Skylake-Server-IBRS",
2443 .level = 0xd,
2444 .vendor = CPUID_VENDOR_INTEL,
2445 .family = 6,
2446 .model = 85,
2447 .stepping = 4,
2448 .features[FEAT_1_EDX] =
2449 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2450 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2451 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2452 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2453 CPUID_DE | CPUID_FP87,
2454 .features[FEAT_1_ECX] =
2455 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2456 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2457 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2458 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2459 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2460 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2461 .features[FEAT_8000_0001_EDX] =
2462 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2463 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2464 .features[FEAT_8000_0001_ECX] =
2465 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2466 .features[FEAT_7_0_EDX] =
2467 CPUID_7_0_EDX_SPEC_CTRL,
2468 .features[FEAT_7_0_EBX] =
2469 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2470 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2471 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2472 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2473 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2474 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2475 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2476 CPUID_7_0_EBX_AVX512VL,
2477 .features[FEAT_7_0_ECX] =
2478 CPUID_7_0_ECX_PKU,
2479 /* Missing: XSAVES (not supported by some Linux versions,
2480 * including v4.1 to v4.12).
2481 * KVM doesn't yet expose any XSAVES state save component,
2482 * and the only one defined in Skylake (processor tracing)
2483 * probably will block migration anyway.
2484 */
2485 .features[FEAT_XSAVE] =
2486 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2487 CPUID_XSAVE_XGETBV1,
2488 .features[FEAT_6_EAX] =
2489 CPUID_6_EAX_ARAT,
2490 .xlevel = 0x80000008,
2491 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2492 },
2493 {
2494 .name = "Cascadelake-Server",
2495 .level = 0xd,
2496 .vendor = CPUID_VENDOR_INTEL,
2497 .family = 6,
2498 .model = 85,
2499 .stepping = 5,
2500 .features[FEAT_1_EDX] =
2501 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2502 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2503 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2504 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2505 CPUID_DE | CPUID_FP87,
2506 .features[FEAT_1_ECX] =
2507 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2508 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2509 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2510 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2511 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2512 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2513 .features[FEAT_8000_0001_EDX] =
2514 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2515 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2516 .features[FEAT_8000_0001_ECX] =
2517 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2518 .features[FEAT_7_0_EBX] =
2519 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2520 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2521 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2522 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2523 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2524 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2525 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2526 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT |
2527 CPUID_7_0_EBX_INTEL_PT,
2528 .features[FEAT_7_0_ECX] =
2529 CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE |
2530 CPUID_7_0_ECX_AVX512VNNI,
2531 .features[FEAT_7_0_EDX] =
2532 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2533 /* Missing: XSAVES (not supported by some Linux versions,
2534 * including v4.1 to v4.12).
2535 * KVM doesn't yet expose any XSAVES state save component,
2536 * and the only one defined in Skylake (processor tracing)
2537 * probably will block migration anyway.
2538 */
2539 .features[FEAT_XSAVE] =
2540 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2541 CPUID_XSAVE_XGETBV1,
2542 .features[FEAT_6_EAX] =
2543 CPUID_6_EAX_ARAT,
2544 .xlevel = 0x80000008,
2545 .model_id = "Intel Xeon Processor (Cascadelake)",
2546 },
2547 {
2548 .name = "Icelake-Client",
2549 .level = 0xd,
2550 .vendor = CPUID_VENDOR_INTEL,
2551 .family = 6,
2552 .model = 126,
2553 .stepping = 0,
2554 .features[FEAT_1_EDX] =
2555 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2556 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2557 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2558 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2559 CPUID_DE | CPUID_FP87,
2560 .features[FEAT_1_ECX] =
2561 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2562 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2563 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2564 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2565 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2566 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2567 .features[FEAT_8000_0001_EDX] =
2568 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2569 CPUID_EXT2_SYSCALL,
2570 .features[FEAT_8000_0001_ECX] =
2571 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2572 .features[FEAT_8000_0008_EBX] =
2573 CPUID_8000_0008_EBX_WBNOINVD,
2574 .features[FEAT_7_0_EBX] =
2575 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2576 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2577 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2578 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2579 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_INTEL_PT,
2580 .features[FEAT_7_0_ECX] =
2581 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2582 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2583 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2584 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2585 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2586 .features[FEAT_7_0_EDX] =
2587 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2588 /* Missing: XSAVES (not supported by some Linux versions,
2589 * including v4.1 to v4.12).
2590 * KVM doesn't yet expose any XSAVES state save component,
2591 * and the only one defined in Skylake (processor tracing)
2592 * probably will block migration anyway.
2593 */
2594 .features[FEAT_XSAVE] =
2595 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2596 CPUID_XSAVE_XGETBV1,
2597 .features[FEAT_6_EAX] =
2598 CPUID_6_EAX_ARAT,
2599 .xlevel = 0x80000008,
2600 .model_id = "Intel Core Processor (Icelake)",
2601 },
2602 {
2603 .name = "Icelake-Server",
2604 .level = 0xd,
2605 .vendor = CPUID_VENDOR_INTEL,
2606 .family = 6,
2607 .model = 134,
2608 .stepping = 0,
2609 .features[FEAT_1_EDX] =
2610 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2611 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2612 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2613 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2614 CPUID_DE | CPUID_FP87,
2615 .features[FEAT_1_ECX] =
2616 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2617 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2618 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2619 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2620 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2621 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2622 .features[FEAT_8000_0001_EDX] =
2623 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2624 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2625 .features[FEAT_8000_0001_ECX] =
2626 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2627 .features[FEAT_8000_0008_EBX] =
2628 CPUID_8000_0008_EBX_WBNOINVD,
2629 .features[FEAT_7_0_EBX] =
2630 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2631 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2632 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2633 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2634 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2635 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2636 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2637 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT |
2638 CPUID_7_0_EBX_INTEL_PT,
2639 .features[FEAT_7_0_ECX] =
2640 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2641 CPUID_7_0_ECX_OSPKE | CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2642 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2643 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2644 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2645 .features[FEAT_7_0_EDX] =
2646 CPUID_7_0_EDX_PCONFIG | CPUID_7_0_EDX_SPEC_CTRL |
2647 CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2648 /* Missing: XSAVES (not supported by some Linux versions,
2649 * including v4.1 to v4.12).
2650 * KVM doesn't yet expose any XSAVES state save component,
2651 * and the only one defined in Skylake (processor tracing)
2652 * probably will block migration anyway.
2653 */
2654 .features[FEAT_XSAVE] =
2655 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2656 CPUID_XSAVE_XGETBV1,
2657 .features[FEAT_6_EAX] =
2658 CPUID_6_EAX_ARAT,
2659 .xlevel = 0x80000008,
2660 .model_id = "Intel Xeon Processor (Icelake)",
2661 },
2662 {
2663 .name = "KnightsMill",
2664 .level = 0xd,
2665 .vendor = CPUID_VENDOR_INTEL,
2666 .family = 6,
2667 .model = 133,
2668 .stepping = 0,
2669 .features[FEAT_1_EDX] =
2670 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2671 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2672 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2673 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2674 CPUID_PSE | CPUID_DE | CPUID_FP87,
2675 .features[FEAT_1_ECX] =
2676 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2677 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2678 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2679 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2680 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2681 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2682 .features[FEAT_8000_0001_EDX] =
2683 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2684 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2685 .features[FEAT_8000_0001_ECX] =
2686 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2687 .features[FEAT_7_0_EBX] =
2688 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2689 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2690 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2691 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2692 CPUID_7_0_EBX_AVX512ER,
2693 .features[FEAT_7_0_ECX] =
2694 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2695 .features[FEAT_7_0_EDX] =
2696 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2697 .features[FEAT_XSAVE] =
2698 CPUID_XSAVE_XSAVEOPT,
2699 .features[FEAT_6_EAX] =
2700 CPUID_6_EAX_ARAT,
2701 .xlevel = 0x80000008,
2702 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2703 },
2704 {
2705 .name = "Opteron_G1",
2706 .level = 5,
2707 .vendor = CPUID_VENDOR_AMD,
2708 .family = 15,
2709 .model = 6,
2710 .stepping = 1,
2711 .features[FEAT_1_EDX] =
2712 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2713 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2714 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2715 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2716 CPUID_DE | CPUID_FP87,
2717 .features[FEAT_1_ECX] =
2718 CPUID_EXT_SSE3,
2719 .features[FEAT_8000_0001_EDX] =
2720 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2721 .xlevel = 0x80000008,
2722 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2723 },
2724 {
2725 .name = "Opteron_G2",
2726 .level = 5,
2727 .vendor = CPUID_VENDOR_AMD,
2728 .family = 15,
2729 .model = 6,
2730 .stepping = 1,
2731 .features[FEAT_1_EDX] =
2732 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2733 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2734 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2735 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2736 CPUID_DE | CPUID_FP87,
2737 .features[FEAT_1_ECX] =
2738 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2739 /* Missing: CPUID_EXT2_RDTSCP */
2740 .features[FEAT_8000_0001_EDX] =
2741 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2742 .features[FEAT_8000_0001_ECX] =
2743 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2744 .xlevel = 0x80000008,
2745 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2746 },
2747 {
2748 .name = "Opteron_G3",
2749 .level = 5,
2750 .vendor = CPUID_VENDOR_AMD,
2751 .family = 16,
2752 .model = 2,
2753 .stepping = 3,
2754 .features[FEAT_1_EDX] =
2755 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2756 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2757 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2758 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2759 CPUID_DE | CPUID_FP87,
2760 .features[FEAT_1_ECX] =
2761 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2762 CPUID_EXT_SSE3,
2763 /* Missing: CPUID_EXT2_RDTSCP */
2764 .features[FEAT_8000_0001_EDX] =
2765 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2766 .features[FEAT_8000_0001_ECX] =
2767 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2768 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2769 .xlevel = 0x80000008,
2770 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2771 },
2772 {
2773 .name = "Opteron_G4",
2774 .level = 0xd,
2775 .vendor = CPUID_VENDOR_AMD,
2776 .family = 21,
2777 .model = 1,
2778 .stepping = 2,
2779 .features[FEAT_1_EDX] =
2780 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2781 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2782 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2783 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2784 CPUID_DE | CPUID_FP87,
2785 .features[FEAT_1_ECX] =
2786 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2787 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2788 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2789 CPUID_EXT_SSE3,
2790 /* Missing: CPUID_EXT2_RDTSCP */
2791 .features[FEAT_8000_0001_EDX] =
2792 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2793 CPUID_EXT2_SYSCALL,
2794 .features[FEAT_8000_0001_ECX] =
2795 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2796 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2797 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2798 CPUID_EXT3_LAHF_LM,
2799 /* no xsaveopt! */
2800 .xlevel = 0x8000001A,
2801 .model_id = "AMD Opteron 62xx class CPU",
2802 },
2803 {
2804 .name = "Opteron_G5",
2805 .level = 0xd,
2806 .vendor = CPUID_VENDOR_AMD,
2807 .family = 21,
2808 .model = 2,
2809 .stepping = 0,
2810 .features[FEAT_1_EDX] =
2811 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2812 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2813 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2814 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2815 CPUID_DE | CPUID_FP87,
2816 .features[FEAT_1_ECX] =
2817 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2818 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2819 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2820 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2821 /* Missing: CPUID_EXT2_RDTSCP */
2822 .features[FEAT_8000_0001_EDX] =
2823 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2824 CPUID_EXT2_SYSCALL,
2825 .features[FEAT_8000_0001_ECX] =
2826 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2827 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2828 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2829 CPUID_EXT3_LAHF_LM,
2830 /* no xsaveopt! */
2831 .xlevel = 0x8000001A,
2832 .model_id = "AMD Opteron 63xx class CPU",
2833 },
2834 {
2835 .name = "EPYC",
2836 .level = 0xd,
2837 .vendor = CPUID_VENDOR_AMD,
2838 .family = 23,
2839 .model = 1,
2840 .stepping = 2,
2841 .features[FEAT_1_EDX] =
2842 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2843 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2844 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2845 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2846 CPUID_VME | CPUID_FP87,
2847 .features[FEAT_1_ECX] =
2848 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2849 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2850 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2851 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2852 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2853 .features[FEAT_8000_0001_EDX] =
2854 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2855 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2856 CPUID_EXT2_SYSCALL,
2857 .features[FEAT_8000_0001_ECX] =
2858 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2859 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2860 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2861 CPUID_EXT3_TOPOEXT,
2862 .features[FEAT_7_0_EBX] =
2863 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2864 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2865 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2866 CPUID_7_0_EBX_SHA_NI,
2867 /* Missing: XSAVES (not supported by some Linux versions,
2868 * including v4.1 to v4.12).
2869 * KVM doesn't yet expose any XSAVES state save component.
2870 */
2871 .features[FEAT_XSAVE] =
2872 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2873 CPUID_XSAVE_XGETBV1,
2874 .features[FEAT_6_EAX] =
2875 CPUID_6_EAX_ARAT,
2876 .xlevel = 0x8000001E,
2877 .model_id = "AMD EPYC Processor",
2878 .cache_info = &epyc_cache_info,
2879 },
2880 {
2881 .name = "EPYC-IBPB",
2882 .level = 0xd,
2883 .vendor = CPUID_VENDOR_AMD,
2884 .family = 23,
2885 .model = 1,
2886 .stepping = 2,
2887 .features[FEAT_1_EDX] =
2888 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2889 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2890 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2891 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2892 CPUID_VME | CPUID_FP87,
2893 .features[FEAT_1_ECX] =
2894 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2895 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2896 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2897 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2898 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2899 .features[FEAT_8000_0001_EDX] =
2900 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2901 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2902 CPUID_EXT2_SYSCALL,
2903 .features[FEAT_8000_0001_ECX] =
2904 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2905 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2906 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2907 CPUID_EXT3_TOPOEXT,
2908 .features[FEAT_8000_0008_EBX] =
2909 CPUID_8000_0008_EBX_IBPB,
2910 .features[FEAT_7_0_EBX] =
2911 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2912 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2913 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2914 CPUID_7_0_EBX_SHA_NI,
2915 /* Missing: XSAVES (not supported by some Linux versions,
2916 * including v4.1 to v4.12).
2917 * KVM doesn't yet expose any XSAVES state save component.
2918 */
2919 .features[FEAT_XSAVE] =
2920 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2921 CPUID_XSAVE_XGETBV1,
2922 .features[FEAT_6_EAX] =
2923 CPUID_6_EAX_ARAT,
2924 .xlevel = 0x8000001E,
2925 .model_id = "AMD EPYC Processor (with IBPB)",
2926 .cache_info = &epyc_cache_info,
2927 },
2928 };
2929
2930 typedef struct PropValue {
2931 const char *prop, *value;
2932 } PropValue;
2933
2934 /* KVM-specific features that are automatically added/removed
2935 * from all CPU models when KVM is enabled.
2936 */
2937 static PropValue kvm_default_props[] = {
2938 { "kvmclock", "on" },
2939 { "kvm-nopiodelay", "on" },
2940 { "kvm-asyncpf", "on" },
2941 { "kvm-steal-time", "on" },
2942 { "kvm-pv-eoi", "on" },
2943 { "kvmclock-stable-bit", "on" },
2944 { "x2apic", "on" },
2945 { "acpi", "off" },
2946 { "monitor", "off" },
2947 { "svm", "off" },
2948 { NULL, NULL },
2949 };
2950
2951 /* TCG-specific defaults that override all CPU models when using TCG
2952 */
2953 static PropValue tcg_default_props[] = {
2954 { "vme", "off" },
2955 { NULL, NULL },
2956 };
2957
2958
2959 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2960 {
2961 PropValue *pv;
2962 for (pv = kvm_default_props; pv->prop; pv++) {
2963 if (!strcmp(pv->prop, prop)) {
2964 pv->value = value;
2965 break;
2966 }
2967 }
2968
2969 /* It is valid to call this function only for properties that
2970 * are already present in the kvm_default_props table.
2971 */
2972 assert(pv->prop);
2973 }
2974
2975 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2976 bool migratable_only);
2977
2978 static bool lmce_supported(void)
2979 {
2980 uint64_t mce_cap = 0;
2981
2982 #ifdef CONFIG_KVM
2983 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2984 return false;
2985 }
2986 #endif
2987
2988 return !!(mce_cap & MCG_LMCE_P);
2989 }
2990
2991 #define CPUID_MODEL_ID_SZ 48
2992
2993 /**
2994 * cpu_x86_fill_model_id:
2995 * Get CPUID model ID string from host CPU.
2996 *
2997 * @str should have at least CPUID_MODEL_ID_SZ bytes
2998 *
2999 * The function does NOT add a null terminator to the string
3000 * automatically.
3001 */
3002 static int cpu_x86_fill_model_id(char *str)
3003 {
3004 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3005 int i;
3006
3007 for (i = 0; i < 3; i++) {
3008 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3009 memcpy(str + i * 16 + 0, &eax, 4);
3010 memcpy(str + i * 16 + 4, &ebx, 4);
3011 memcpy(str + i * 16 + 8, &ecx, 4);
3012 memcpy(str + i * 16 + 12, &edx, 4);
3013 }
3014 return 0;
3015 }
3016
3017 static Property max_x86_cpu_properties[] = {
3018 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3019 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3020 DEFINE_PROP_END_OF_LIST()
3021 };
3022
3023 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3024 {
3025 DeviceClass *dc = DEVICE_CLASS(oc);
3026 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3027
3028 xcc->ordering = 9;
3029
3030 xcc->model_description =
3031 "Enables all features supported by the accelerator in the current host";
3032
3033 dc->props = max_x86_cpu_properties;
3034 }
3035
3036 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3037
3038 static void max_x86_cpu_initfn(Object *obj)
3039 {
3040 X86CPU *cpu = X86_CPU(obj);
3041 CPUX86State *env = &cpu->env;
3042 KVMState *s = kvm_state;
3043
3044 /* We can't fill the features array here because we don't know yet if
3045 * "migratable" is true or false.
3046 */
3047 cpu->max_features = true;
3048
3049 if (accel_uses_host_cpuid()) {
3050 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3051 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3052 int family, model, stepping;
3053 X86CPUDefinition host_cpudef = { };
3054 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3055
3056 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
3057 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
3058
3059 host_vendor_fms(vendor, &family, &model, &stepping);
3060
3061 cpu_x86_fill_model_id(model_id);
3062
3063 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3064 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3065 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3066 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3067 &error_abort);
3068 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3069 &error_abort);
3070
3071 if (kvm_enabled()) {
3072 env->cpuid_min_level =
3073 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3074 env->cpuid_min_xlevel =
3075 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3076 env->cpuid_min_xlevel2 =
3077 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3078 } else {
3079 env->cpuid_min_level =
3080 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3081 env->cpuid_min_xlevel =
3082 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3083 env->cpuid_min_xlevel2 =
3084 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3085 }
3086
3087 if (lmce_supported()) {
3088 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3089 }
3090 } else {
3091 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3092 "vendor", &error_abort);
3093 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3094 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3095 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3096 object_property_set_str(OBJECT(cpu),
3097 "QEMU TCG CPU version " QEMU_HW_VERSION,
3098 "model-id", &error_abort);
3099 }
3100
3101 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3102 }
3103
3104 static const TypeInfo max_x86_cpu_type_info = {
3105 .name = X86_CPU_TYPE_NAME("max"),
3106 .parent = TYPE_X86_CPU,
3107 .instance_init = max_x86_cpu_initfn,
3108 .class_init = max_x86_cpu_class_init,
3109 };
3110
3111 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3112 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3113 {
3114 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3115
3116 xcc->host_cpuid_required = true;
3117 xcc->ordering = 8;
3118
3119 #if defined(CONFIG_KVM)
3120 xcc->model_description =
3121 "KVM processor with all supported host features ";
3122 #elif defined(CONFIG_HVF)
3123 xcc->model_description =
3124 "HVF processor with all supported host features ";
3125 #endif
3126 }
3127
3128 static const TypeInfo host_x86_cpu_type_info = {
3129 .name = X86_CPU_TYPE_NAME("host"),
3130 .parent = X86_CPU_TYPE_NAME("max"),
3131 .class_init = host_x86_cpu_class_init,
3132 };
3133
3134 #endif
3135
3136 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3137 {
3138 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3139
3140 switch (f->type) {
3141 case CPUID_FEATURE_WORD:
3142 {
3143 const char *reg = get_register_name_32(f->cpuid.reg);
3144 assert(reg);
3145 return g_strdup_printf("CPUID.%02XH:%s",
3146 f->cpuid.eax, reg);
3147 }
3148 case MSR_FEATURE_WORD:
3149 return g_strdup_printf("MSR(%02XH)",
3150 f->msr.index);
3151 }
3152
3153 return NULL;
3154 }
3155
3156 static void report_unavailable_features(FeatureWord w, uint32_t mask)
3157 {
3158 FeatureWordInfo *f = &feature_word_info[w];
3159 int i;
3160 char *feat_word_str;
3161
3162 for (i = 0; i < 32; ++i) {
3163 if ((1UL << i) & mask) {
3164 feat_word_str = feature_word_description(f, i);
3165 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3166 accel_uses_host_cpuid() ? "host" : "TCG",
3167 feat_word_str,
3168 f->feat_names[i] ? "." : "",
3169 f->feat_names[i] ? f->feat_names[i] : "", i);
3170 g_free(feat_word_str);
3171 }
3172 }
3173 }
3174
3175 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3176 const char *name, void *opaque,
3177 Error **errp)
3178 {
3179 X86CPU *cpu = X86_CPU(obj);
3180 CPUX86State *env = &cpu->env;
3181 int64_t value;
3182
3183 value = (env->cpuid_version >> 8) & 0xf;
3184 if (value == 0xf) {
3185 value += (env->cpuid_version >> 20) & 0xff;
3186 }
3187 visit_type_int(v, name, &value, errp);
3188 }
3189
3190 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3191 const char *name, void *opaque,
3192 Error **errp)
3193 {
3194 X86CPU *cpu = X86_CPU(obj);
3195 CPUX86State *env = &cpu->env;
3196 const int64_t min = 0;
3197 const int64_t max = 0xff + 0xf;
3198 Error *local_err = NULL;
3199 int64_t value;
3200
3201 visit_type_int(v, name, &value, &local_err);
3202 if (local_err) {
3203 error_propagate(errp, local_err);
3204 return;
3205 }
3206 if (value < min || value > max) {
3207 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3208 name ? name : "null", value, min, max);
3209 return;
3210 }
3211
3212 env->cpuid_version &= ~0xff00f00;
3213 if (value > 0x0f) {
3214 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3215 } else {
3216 env->cpuid_version |= value << 8;
3217 }
3218 }
3219
3220 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3221 const char *name, void *opaque,
3222 Error **errp)
3223 {
3224 X86CPU *cpu = X86_CPU(obj);
3225 CPUX86State *env = &cpu->env;
3226 int64_t value;
3227
3228 value = (env->cpuid_version >> 4) & 0xf;
3229 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3230 visit_type_int(v, name, &value, errp);
3231 }
3232
3233 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3234 const char *name, void *opaque,
3235 Error **errp)
3236 {
3237 X86CPU *cpu = X86_CPU(obj);
3238 CPUX86State *env = &cpu->env;
3239 const int64_t min = 0;
3240 const int64_t max = 0xff;
3241 Error *local_err = NULL;
3242 int64_t value;
3243
3244 visit_type_int(v, name, &value, &local_err);
3245 if (local_err) {
3246 error_propagate(errp, local_err);
3247 return;
3248 }
3249 if (value < min || value > max) {
3250 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3251 name ? name : "null", value, min, max);
3252 return;
3253 }
3254
3255 env->cpuid_version &= ~0xf00f0;
3256 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3257 }
3258
3259 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3260 const char *name, void *opaque,
3261 Error **errp)
3262 {
3263 X86CPU *cpu = X86_CPU(obj);
3264 CPUX86State *env = &cpu->env;
3265 int64_t value;
3266
3267 value = env->cpuid_version & 0xf;
3268 visit_type_int(v, name, &value, errp);
3269 }
3270
3271 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3272 const char *name, void *opaque,
3273 Error **errp)
3274 {
3275 X86CPU *cpu = X86_CPU(obj);
3276 CPUX86State *env = &cpu->env;
3277 const int64_t min = 0;
3278 const int64_t max = 0xf;
3279 Error *local_err = NULL;
3280 int64_t value;
3281
3282 visit_type_int(v, name, &value, &local_err);
3283 if (local_err) {
3284 error_propagate(errp, local_err);
3285 return;
3286 }
3287 if (value < min || value > max) {
3288 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3289 name ? name : "null", value, min, max);
3290 return;
3291 }
3292
3293 env->cpuid_version &= ~0xf;
3294 env->cpuid_version |= value & 0xf;
3295 }
3296
3297 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3298 {
3299 X86CPU *cpu = X86_CPU(obj);
3300 CPUX86State *env = &cpu->env;
3301 char *value;
3302
3303 value = g_malloc(CPUID_VENDOR_SZ + 1);
3304 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3305 env->cpuid_vendor3);
3306 return value;
3307 }
3308
3309 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3310 Error **errp)
3311 {
3312 X86CPU *cpu = X86_CPU(obj);
3313 CPUX86State *env = &cpu->env;
3314 int i;
3315
3316 if (strlen(value) != CPUID_VENDOR_SZ) {
3317 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3318 return;
3319 }
3320
3321 env->cpuid_vendor1 = 0;
3322 env->cpuid_vendor2 = 0;
3323 env->cpuid_vendor3 = 0;
3324 for (i = 0; i < 4; i++) {
3325 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3326 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3327 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3328 }
3329 }
3330
3331 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3332 {
3333 X86CPU *cpu = X86_CPU(obj);
3334 CPUX86State *env = &cpu->env;
3335 char *value;
3336 int i;
3337
3338 value = g_malloc(48 + 1);
3339 for (i = 0; i < 48; i++) {
3340 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3341 }
3342 value[48] = '\0';
3343 return value;
3344 }
3345
3346 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3347 Error **errp)
3348 {
3349 X86CPU *cpu = X86_CPU(obj);
3350 CPUX86State *env = &cpu->env;
3351 int c, len, i;
3352
3353 if (model_id == NULL) {
3354 model_id = "";
3355 }
3356 len = strlen(model_id);
3357 memset(env->cpuid_model, 0, 48);
3358 for (i = 0; i < 48; i++) {
3359 if (i >= len) {
3360 c = '\0';
3361 } else {
3362 c = (uint8_t)model_id[i];
3363 }
3364 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3365 }
3366 }
3367
3368 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3369 void *opaque, Error **errp)
3370 {
3371 X86CPU *cpu = X86_CPU(obj);
3372 int64_t value;
3373
3374 value = cpu->env.tsc_khz * 1000;
3375 visit_type_int(v, name, &value, errp);
3376 }
3377
3378 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3379 void *opaque, Error **errp)
3380 {
3381 X86CPU *cpu = X86_CPU(obj);
3382 const int64_t min = 0;
3383 const int64_t max = INT64_MAX;
3384 Error *local_err = NULL;
3385 int64_t value;
3386
3387 visit_type_int(v, name, &value, &local_err);
3388 if (local_err) {
3389 error_propagate(errp, local_err);
3390 return;
3391 }
3392 if (value < min || value > max) {
3393 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3394 name ? name : "null", value, min, max);
3395 return;
3396 }
3397
3398 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3399 }
3400
3401 /* Generic getter for "feature-words" and "filtered-features" properties */
3402 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3403 const char *name, void *opaque,
3404 Error **errp)
3405 {
3406 uint32_t *array = (uint32_t *)opaque;
3407 FeatureWord w;
3408 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3409 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3410 X86CPUFeatureWordInfoList *list = NULL;
3411
3412 for (w = 0; w < FEATURE_WORDS; w++) {
3413 FeatureWordInfo *wi = &feature_word_info[w];
3414 /*
3415 * We didn't have MSR features when "feature-words" was
3416 * introduced. Therefore skipped other type entries.
3417 */
3418 if (wi->type != CPUID_FEATURE_WORD) {
3419 continue;
3420 }
3421 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3422 qwi->cpuid_input_eax = wi->cpuid.eax;
3423 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3424 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3425 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3426 qwi->features = array[w];
3427
3428 /* List will be in reverse order, but order shouldn't matter */
3429 list_entries[w].next = list;
3430 list_entries[w].value = &word_infos[w];
3431 list = &list_entries[w];
3432 }
3433
3434 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3435 }
3436
3437 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3438 void *opaque, Error **errp)
3439 {
3440 X86CPU *cpu = X86_CPU(obj);
3441 int64_t value = cpu->hyperv_spinlock_attempts;
3442
3443 visit_type_int(v, name, &value, errp);
3444 }
3445
3446 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3447 void *opaque, Error **errp)
3448 {
3449 const int64_t min = 0xFFF;
3450 const int64_t max = UINT_MAX;
3451 X86CPU *cpu = X86_CPU(obj);
3452 Error *err = NULL;
3453 int64_t value;
3454
3455 visit_type_int(v, name, &value, &err);
3456 if (err) {
3457 error_propagate(errp, err);
3458 return;
3459 }
3460
3461 if (value < min || value > max) {
3462 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3463 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3464 object_get_typename(obj), name ? name : "null",
3465 value, min, max);
3466 return;
3467 }
3468 cpu->hyperv_spinlock_attempts = value;
3469 }
3470
3471 static const PropertyInfo qdev_prop_spinlocks = {
3472 .name = "int",
3473 .get = x86_get_hv_spinlocks,
3474 .set = x86_set_hv_spinlocks,
3475 };
3476
3477 /* Convert all '_' in a feature string option name to '-', to make feature
3478 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3479 */
3480 static inline void feat2prop(char *s)
3481 {
3482 while ((s = strchr(s, '_'))) {
3483 *s = '-';
3484 }
3485 }
3486
3487 /* Return the feature property name for a feature flag bit */
3488 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3489 {
3490 /* XSAVE components are automatically enabled by other features,
3491 * so return the original feature name instead
3492 */
3493 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3494 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3495
3496 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3497 x86_ext_save_areas[comp].bits) {
3498 w = x86_ext_save_areas[comp].feature;
3499 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3500 }
3501 }
3502
3503 assert(bitnr < 32);
3504 assert(w < FEATURE_WORDS);
3505 return feature_word_info[w].feat_names[bitnr];
3506 }
3507
3508 /* Compatibily hack to maintain legacy +-feat semantic,
3509 * where +-feat overwrites any feature set by
3510 * feat=on|feat even if the later is parsed after +-feat
3511 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3512 */
3513 static GList *plus_features, *minus_features;
3514
3515 static gint compare_string(gconstpointer a, gconstpointer b)
3516 {
3517 return g_strcmp0(a, b);
3518 }
3519
3520 /* Parse "+feature,-feature,feature=foo" CPU feature string
3521 */
3522 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3523 Error **errp)
3524 {
3525 char *featurestr; /* Single 'key=value" string being parsed */
3526 static bool cpu_globals_initialized;
3527 bool ambiguous = false;
3528
3529 if (cpu_globals_initialized) {
3530 return;
3531 }
3532 cpu_globals_initialized = true;
3533
3534 if (!features) {
3535 return;
3536 }
3537
3538 for (featurestr = strtok(features, ",");
3539 featurestr;
3540 featurestr = strtok(NULL, ",")) {
3541 const char *name;
3542 const char *val = NULL;
3543 char *eq = NULL;
3544 char num[32];
3545 GlobalProperty *prop;
3546
3547 /* Compatibility syntax: */
3548 if (featurestr[0] == '+') {
3549 plus_features = g_list_append(plus_features,
3550 g_strdup(featurestr + 1));
3551 continue;
3552 } else if (featurestr[0] == '-') {
3553 minus_features = g_list_append(minus_features,
3554 g_strdup(featurestr + 1));
3555 continue;
3556 }
3557
3558 eq = strchr(featurestr, '=');
3559 if (eq) {
3560 *eq++ = 0;
3561 val = eq;
3562 } else {
3563 val = "on";
3564 }
3565
3566 feat2prop(featurestr);
3567 name = featurestr;
3568
3569 if (g_list_find_custom(plus_features, name, compare_string)) {
3570 warn_report("Ambiguous CPU model string. "
3571 "Don't mix both \"+%s\" and \"%s=%s\"",
3572 name, name, val);
3573 ambiguous = true;
3574 }
3575 if (g_list_find_custom(minus_features, name, compare_string)) {
3576 warn_report("Ambiguous CPU model string. "
3577 "Don't mix both \"-%s\" and \"%s=%s\"",
3578 name, name, val);
3579 ambiguous = true;
3580 }
3581
3582 /* Special case: */
3583 if (!strcmp(name, "tsc-freq")) {
3584 int ret;
3585 uint64_t tsc_freq;
3586
3587 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3588 if (ret < 0 || tsc_freq > INT64_MAX) {
3589 error_setg(errp, "bad numerical value %s", val);
3590 return;
3591 }
3592 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3593 val = num;
3594 name = "tsc-frequency";
3595 }
3596
3597 prop = g_new0(typeof(*prop), 1);
3598 prop->driver = typename;
3599 prop->property = g_strdup(name);
3600 prop->value = g_strdup(val);
3601 qdev_prop_register_global(prop);
3602 }
3603
3604 if (ambiguous) {
3605 warn_report("Compatibility of ambiguous CPU model "
3606 "strings won't be kept on future QEMU versions");
3607 }
3608 }
3609
3610 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3611 static int x86_cpu_filter_features(X86CPU *cpu);
3612
3613 /* Check for missing features that may prevent the CPU class from
3614 * running using the current machine and accelerator.
3615 */
3616 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3617 strList **missing_feats)
3618 {
3619 X86CPU *xc;
3620 FeatureWord w;
3621 Error *err = NULL;
3622 strList **next = missing_feats;
3623
3624 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3625 strList *new = g_new0(strList, 1);
3626 new->value = g_strdup("kvm");
3627 *missing_feats = new;
3628 return;
3629 }
3630
3631 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3632
3633 x86_cpu_expand_features(xc, &err);
3634 if (err) {
3635 /* Errors at x86_cpu_expand_features should never happen,
3636 * but in case it does, just report the model as not
3637 * runnable at all using the "type" property.
3638 */
3639 strList *new = g_new0(strList, 1);
3640 new->value = g_strdup("type");
3641 *next = new;
3642 next = &new->next;
3643 }
3644
3645 x86_cpu_filter_features(xc);
3646
3647 for (w = 0; w < FEATURE_WORDS; w++) {
3648 uint32_t filtered = xc->filtered_features[w];
3649 int i;
3650 for (i = 0; i < 32; i++) {
3651 if (filtered & (1UL << i)) {
3652 strList *new = g_new0(strList, 1);
3653 new->value = g_strdup(x86_cpu_feature_name(w, i));
3654 *next = new;
3655 next = &new->next;
3656 }
3657 }
3658 }
3659
3660 object_unref(OBJECT(xc));
3661 }
3662
3663 /* Print all cpuid feature names in featureset
3664 */
3665 static void listflags(FILE *f, fprintf_function print, GList *features)
3666 {
3667 size_t len = 0;
3668 GList *tmp;
3669
3670 for (tmp = features; tmp; tmp = tmp->next) {
3671 const char *name = tmp->data;
3672 if ((len + strlen(name) + 1) >= 75) {
3673 print(f, "\n");
3674 len = 0;
3675 }
3676 print(f, "%s%s", len == 0 ? " " : " ", name);
3677 len += strlen(name) + 1;
3678 }
3679 print(f, "\n");
3680 }
3681
3682 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3683 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3684 {
3685 ObjectClass *class_a = (ObjectClass *)a;
3686 ObjectClass *class_b = (ObjectClass *)b;
3687 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3688 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3689 char *name_a, *name_b;
3690 int ret;
3691
3692 if (cc_a->ordering != cc_b->ordering) {
3693 ret = cc_a->ordering - cc_b->ordering;
3694 } else {
3695 name_a = x86_cpu_class_get_model_name(cc_a);
3696 name_b = x86_cpu_class_get_model_name(cc_b);
3697 ret = strcmp(name_a, name_b);
3698 g_free(name_a);
3699 g_free(name_b);
3700 }
3701 return ret;
3702 }
3703
3704 static GSList *get_sorted_cpu_model_list(void)
3705 {
3706 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3707 list = g_slist_sort(list, x86_cpu_list_compare);
3708 return list;
3709 }
3710
3711 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3712 {
3713 ObjectClass *oc = data;
3714 X86CPUClass *cc = X86_CPU_CLASS(oc);
3715 CPUListState *s = user_data;
3716 char *name = x86_cpu_class_get_model_name(cc);
3717 const char *desc = cc->model_description;
3718 if (!desc && cc->cpu_def) {
3719 desc = cc->cpu_def->model_id;
3720 }
3721
3722 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3723 name, desc);
3724 g_free(name);
3725 }
3726
3727 /* list available CPU models and flags */
3728 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3729 {
3730 int i, j;
3731 CPUListState s = {
3732 .file = f,
3733 .cpu_fprintf = cpu_fprintf,
3734 };
3735 GSList *list;
3736 GList *names = NULL;
3737
3738 (*cpu_fprintf)(f, "Available CPUs:\n");
3739 list = get_sorted_cpu_model_list();
3740 g_slist_foreach(list, x86_cpu_list_entry, &s);
3741 g_slist_free(list);
3742
3743 names = NULL;
3744 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3745 FeatureWordInfo *fw = &feature_word_info[i];
3746 for (j = 0; j < 32; j++) {
3747 if (fw->feat_names[j]) {
3748 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3749 }
3750 }
3751 }
3752
3753 names = g_list_sort(names, (GCompareFunc)strcmp);
3754
3755 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3756 listflags(f, cpu_fprintf, names);
3757 (*cpu_fprintf)(f, "\n");
3758 g_list_free(names);
3759 }
3760
3761 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3762 {
3763 ObjectClass *oc = data;
3764 X86CPUClass *cc = X86_CPU_CLASS(oc);
3765 CpuDefinitionInfoList **cpu_list = user_data;
3766 CpuDefinitionInfoList *entry;
3767 CpuDefinitionInfo *info;
3768
3769 info = g_malloc0(sizeof(*info));
3770 info->name = x86_cpu_class_get_model_name(cc);
3771 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3772 info->has_unavailable_features = true;
3773 info->q_typename = g_strdup(object_class_get_name(oc));
3774 info->migration_safe = cc->migration_safe;
3775 info->has_migration_safe = true;
3776 info->q_static = cc->static_model;
3777
3778 entry = g_malloc0(sizeof(*entry));
3779 entry->value = info;
3780 entry->next = *cpu_list;
3781 *cpu_list = entry;
3782 }
3783
3784 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3785 {
3786 CpuDefinitionInfoList *cpu_list = NULL;
3787 GSList *list = get_sorted_cpu_model_list();
3788 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3789 g_slist_free(list);
3790 return cpu_list;
3791 }
3792
3793 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3794 bool migratable_only)
3795 {
3796 FeatureWordInfo *wi = &feature_word_info[w];
3797 uint32_t r = 0;
3798
3799 if (kvm_enabled()) {
3800 switch (wi->type) {
3801 case CPUID_FEATURE_WORD:
3802 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3803 wi->cpuid.ecx,
3804 wi->cpuid.reg);
3805 break;
3806 case MSR_FEATURE_WORD:
3807 r = kvm_arch_get_supported_msr_feature(kvm_state,
3808 wi->msr.index);
3809 break;
3810 }
3811 } else if (hvf_enabled()) {
3812 if (wi->type != CPUID_FEATURE_WORD) {
3813 return 0;
3814 }
3815 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3816 wi->cpuid.ecx,
3817 wi->cpuid.reg);
3818 } else if (tcg_enabled()) {
3819 r = wi->tcg_features;
3820 } else {
3821 return ~0;
3822 }
3823 if (migratable_only) {
3824 r &= x86_cpu_get_migratable_flags(w);
3825 }
3826 return r;
3827 }
3828
3829 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3830 {
3831 FeatureWord w;
3832
3833 for (w = 0; w < FEATURE_WORDS; w++) {
3834 report_unavailable_features(w, cpu->filtered_features[w]);
3835 }
3836 }
3837
3838 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3839 {
3840 PropValue *pv;
3841 for (pv = props; pv->prop; pv++) {
3842 if (!pv->value) {
3843 continue;
3844 }
3845 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3846 &error_abort);
3847 }
3848 }
3849
3850 /* Load data from X86CPUDefinition into a X86CPU object
3851 */
3852 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3853 {
3854 CPUX86State *env = &cpu->env;
3855 const char *vendor;
3856 char host_vendor[CPUID_VENDOR_SZ + 1];
3857 FeatureWord w;
3858
3859 /*NOTE: any property set by this function should be returned by
3860 * x86_cpu_static_props(), so static expansion of
3861 * query-cpu-model-expansion is always complete.
3862 */
3863
3864 /* CPU models only set _minimum_ values for level/xlevel: */
3865 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3866 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3867
3868 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3869 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3870 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3871 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3872 for (w = 0; w < FEATURE_WORDS; w++) {
3873 env->features[w] = def->features[w];
3874 }
3875
3876 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3877 cpu->legacy_cache = !def->cache_info;
3878
3879 /* Special cases not set in the X86CPUDefinition structs: */
3880 /* TODO: in-kernel irqchip for hvf */
3881 if (kvm_enabled()) {
3882 if (!kvm_irqchip_in_kernel()) {
3883 x86_cpu_change_kvm_default("x2apic", "off");
3884 }
3885
3886 x86_cpu_apply_props(cpu, kvm_default_props);
3887 } else if (tcg_enabled()) {
3888 x86_cpu_apply_props(cpu, tcg_default_props);
3889 }
3890
3891 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3892
3893 /* sysenter isn't supported in compatibility mode on AMD,
3894 * syscall isn't supported in compatibility mode on Intel.
3895 * Normally we advertise the actual CPU vendor, but you can
3896 * override this using the 'vendor' property if you want to use
3897 * KVM's sysenter/syscall emulation in compatibility mode and
3898 * when doing cross vendor migration
3899 */
3900 vendor = def->vendor;
3901 if (accel_uses_host_cpuid()) {
3902 uint32_t ebx = 0, ecx = 0, edx = 0;
3903 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3904 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3905 vendor = host_vendor;
3906 }
3907
3908 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3909
3910 }
3911
3912 /* Return a QDict containing keys for all properties that can be included
3913 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3914 * must be included in the dictionary.
3915 */
3916 static QDict *x86_cpu_static_props(void)
3917 {
3918 FeatureWord w;
3919 int i;
3920 static const char *props[] = {
3921 "min-level",
3922 "min-xlevel",
3923 "family",
3924 "model",
3925 "stepping",
3926 "model-id",
3927 "vendor",
3928 "lmce",
3929 NULL,
3930 };
3931 static QDict *d;
3932
3933 if (d) {
3934 return d;
3935 }
3936
3937 d = qdict_new();
3938 for (i = 0; props[i]; i++) {
3939 qdict_put_null(d, props[i]);
3940 }
3941
3942 for (w = 0; w < FEATURE_WORDS; w++) {
3943 FeatureWordInfo *fi = &feature_word_info[w];
3944 int bit;
3945 for (bit = 0; bit < 32; bit++) {
3946 if (!fi->feat_names[bit]) {
3947 continue;
3948 }
3949 qdict_put_null(d, fi->feat_names[bit]);
3950 }
3951 }
3952
3953 return d;
3954 }
3955
3956 /* Add an entry to @props dict, with the value for property. */
3957 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3958 {
3959 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3960 &error_abort);
3961
3962 qdict_put_obj(props, prop, value);
3963 }
3964
3965 /* Convert CPU model data from X86CPU object to a property dictionary
3966 * that can recreate exactly the same CPU model.
3967 */
3968 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3969 {
3970 QDict *sprops = x86_cpu_static_props();
3971 const QDictEntry *e;
3972
3973 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3974 const char *prop = qdict_entry_key(e);
3975 x86_cpu_expand_prop(cpu, props, prop);
3976 }
3977 }
3978
3979 /* Convert CPU model data from X86CPU object to a property dictionary
3980 * that can recreate exactly the same CPU model, including every
3981 * writeable QOM property.
3982 */
3983 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3984 {
3985 ObjectPropertyIterator iter;
3986 ObjectProperty *prop;
3987
3988 object_property_iter_init(&iter, OBJECT(cpu));
3989 while ((prop = object_property_iter_next(&iter))) {
3990 /* skip read-only or write-only properties */
3991 if (!prop->get || !prop->set) {
3992 continue;
3993 }
3994
3995 /* "hotplugged" is the only property that is configurable
3996 * on the command-line but will be set differently on CPUs
3997 * created using "-cpu ... -smp ..." and by CPUs created
3998 * on the fly by x86_cpu_from_model() for querying. Skip it.
3999 */
4000 if (!strcmp(prop->name, "hotplugged")) {
4001 continue;
4002 }
4003 x86_cpu_expand_prop(cpu, props, prop->name);
4004 }
4005 }
4006
4007 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4008 {
4009 const QDictEntry *prop;
4010 Error *err = NULL;
4011
4012 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4013 object_property_set_qobject(obj, qdict_entry_value(prop),
4014 qdict_entry_key(prop), &err);
4015 if (err) {
4016 break;
4017 }
4018 }
4019
4020 error_propagate(errp, err);
4021 }
4022
4023 /* Create X86CPU object according to model+props specification */
4024 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4025 {
4026 X86CPU *xc = NULL;
4027 X86CPUClass *xcc;
4028 Error *err = NULL;
4029
4030 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4031 if (xcc == NULL) {
4032 error_setg(&err, "CPU model '%s' not found", model);
4033 goto out;
4034 }
4035
4036 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4037 if (props) {
4038 object_apply_props(OBJECT(xc), props, &err);
4039 if (err) {
4040 goto out;
4041 }
4042 }
4043
4044 x86_cpu_expand_features(xc, &err);
4045 if (err) {
4046 goto out;
4047 }
4048
4049 out:
4050 if (err) {
4051 error_propagate(errp, err);
4052 object_unref(OBJECT(xc));
4053 xc = NULL;
4054 }
4055 return xc;
4056 }
4057
4058 CpuModelExpansionInfo *
4059 arch_query_cpu_model_expansion(CpuModelExpansionType type,
4060 CpuModelInfo *model,
4061 Error **errp)
4062 {
4063 X86CPU *xc = NULL;
4064 Error *err = NULL;
4065 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4066 QDict *props = NULL;
4067 const char *base_name;
4068
4069 xc = x86_cpu_from_model(model->name,
4070 model->has_props ?
4071 qobject_to(QDict, model->props) :
4072 NULL, &err);
4073 if (err) {
4074 goto out;
4075 }
4076
4077 props = qdict_new();
4078 ret->model = g_new0(CpuModelInfo, 1);
4079 ret->model->props = QOBJECT(props);
4080 ret->model->has_props = true;
4081
4082 switch (type) {
4083 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4084 /* Static expansion will be based on "base" only */
4085 base_name = "base";
4086 x86_cpu_to_dict(xc, props);
4087 break;
4088 case CPU_MODEL_EXPANSION_TYPE_FULL:
4089 /* As we don't return every single property, full expansion needs
4090 * to keep the original model name+props, and add extra
4091 * properties on top of that.
4092 */
4093 base_name = model->name;
4094 x86_cpu_to_dict_full(xc, props);
4095 break;
4096 default:
4097 error_setg(&err, "Unsupported expansion type");
4098 goto out;
4099 }
4100
4101 x86_cpu_to_dict(xc, props);
4102
4103 ret->model->name = g_strdup(base_name);
4104
4105 out:
4106 object_unref(OBJECT(xc));
4107 if (err) {
4108 error_propagate(errp, err);
4109 qapi_free_CpuModelExpansionInfo(ret);
4110 ret = NULL;
4111 }
4112 return ret;
4113 }
4114
4115 static gchar *x86_gdb_arch_name(CPUState *cs)
4116 {
4117 #ifdef TARGET_X86_64
4118 return g_strdup("i386:x86-64");
4119 #else
4120 return g_strdup("i386");
4121 #endif
4122 }
4123
4124 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4125 {
4126 X86CPUDefinition *cpudef = data;
4127 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4128
4129 xcc->cpu_def = cpudef;
4130 xcc->migration_safe = true;
4131 }
4132
4133 static void x86_register_cpudef_type(X86CPUDefinition *def)
4134 {
4135 char *typename = x86_cpu_type_name(def->name);
4136 TypeInfo ti = {
4137 .name = typename,
4138 .parent = TYPE_X86_CPU,
4139 .class_init = x86_cpu_cpudef_class_init,
4140 .class_data = def,
4141 };
4142
4143 /* AMD aliases are handled at runtime based on CPUID vendor, so
4144 * they shouldn't be set on the CPU model table.
4145 */
4146 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4147 /* catch mistakes instead of silently truncating model_id when too long */
4148 assert(def->model_id && strlen(def->model_id) <= 48);
4149
4150
4151 type_register(&ti);
4152 g_free(typename);
4153 }
4154
4155 #if !defined(CONFIG_USER_ONLY)
4156
4157 void cpu_clear_apic_feature(CPUX86State *env)
4158 {
4159 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4160 }
4161
4162 #endif /* !CONFIG_USER_ONLY */
4163
4164 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4165 uint32_t *eax, uint32_t *ebx,
4166 uint32_t *ecx, uint32_t *edx)
4167 {
4168 X86CPU *cpu = x86_env_get_cpu(env);
4169 CPUState *cs = CPU(cpu);
4170 uint32_t pkg_offset;
4171 uint32_t limit;
4172 uint32_t signature[3];
4173
4174 /* Calculate & apply limits for different index ranges */
4175 if (index >= 0xC0000000) {
4176 limit = env->cpuid_xlevel2;
4177 } else if (index >= 0x80000000) {
4178 limit = env->cpuid_xlevel;
4179 } else if (index >= 0x40000000) {
4180 limit = 0x40000001;
4181 } else {
4182 limit = env->cpuid_level;
4183 }
4184
4185 if (index > limit) {
4186 /* Intel documentation states that invalid EAX input will
4187 * return the same information as EAX=cpuid_level
4188 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4189 */
4190 index = env->cpuid_level;
4191 }
4192
4193 switch(index) {
4194 case 0:
4195 *eax = env->cpuid_level;
4196 *ebx = env->cpuid_vendor1;
4197 *edx = env->cpuid_vendor2;
4198 *ecx = env->cpuid_vendor3;
4199 break;
4200 case 1:
4201 *eax = env->cpuid_version;
4202 *ebx = (cpu->apic_id << 24) |
4203 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4204 *ecx = env->features[FEAT_1_ECX];
4205 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4206 *ecx |= CPUID_EXT_OSXSAVE;
4207 }
4208 *edx = env->features[FEAT_1_EDX];
4209 if (cs->nr_cores * cs->nr_threads > 1) {
4210 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4211 *edx |= CPUID_HT;
4212 }
4213 break;
4214 case 2:
4215 /* cache info: needed for Pentium Pro compatibility */
4216 if (cpu->cache_info_passthrough) {
4217 host_cpuid(index, 0, eax, ebx, ecx, edx);
4218 break;
4219 }
4220 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4221 *ebx = 0;
4222 if (!cpu->enable_l3_cache) {
4223 *ecx = 0;
4224 } else {
4225 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4226 }
4227 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4228 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4229 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4230 break;
4231 case 4:
4232 /* cache info: needed for Core compatibility */
4233 if (cpu->cache_info_passthrough) {
4234 host_cpuid(index, count, eax, ebx, ecx, edx);
4235 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4236 *eax &= ~0xFC000000;
4237 if ((*eax & 31) && cs->nr_cores > 1) {
4238 *eax |= (cs->nr_cores - 1) << 26;
4239 }
4240 } else {
4241 *eax = 0;
4242 switch (count) {
4243 case 0: /* L1 dcache info */
4244 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4245 1, cs->nr_cores,
4246 eax, ebx, ecx, edx);
4247 break;
4248 case 1: /* L1 icache info */
4249 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4250 1, cs->nr_cores,
4251 eax, ebx, ecx, edx);
4252 break;
4253 case 2: /* L2 cache info */
4254 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4255 cs->nr_threads, cs->nr_cores,
4256 eax, ebx, ecx, edx);
4257 break;
4258 case 3: /* L3 cache info */
4259 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4260 if (cpu->enable_l3_cache) {
4261 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4262 (1 << pkg_offset), cs->nr_cores,
4263 eax, ebx, ecx, edx);
4264 break;
4265 }
4266 /* fall through */
4267 default: /* end of info */
4268 *eax = *ebx = *ecx = *edx = 0;
4269 break;
4270 }
4271 }
4272 break;
4273 case 5:
4274 /* MONITOR/MWAIT Leaf */
4275 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4276 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4277 *ecx = cpu->mwait.ecx; /* flags */
4278 *edx = cpu->mwait.edx; /* mwait substates */
4279 break;
4280 case 6:
4281 /* Thermal and Power Leaf */
4282 *eax = env->features[FEAT_6_EAX];
4283 *ebx = 0;
4284 *ecx = 0;
4285 *edx = 0;
4286 break;
4287 case 7:
4288 /* Structured Extended Feature Flags Enumeration Leaf */
4289 if (count == 0) {
4290 *eax = 0; /* Maximum ECX value for sub-leaves */
4291 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4292 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4293 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4294 *ecx |= CPUID_7_0_ECX_OSPKE;
4295 }
4296 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4297 } else {
4298 *eax = 0;
4299 *ebx = 0;
4300 *ecx = 0;
4301 *edx = 0;
4302 }
4303 break;
4304 case 9:
4305 /* Direct Cache Access Information Leaf */
4306 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4307 *ebx = 0;
4308 *ecx = 0;
4309 *edx = 0;
4310 break;
4311 case 0xA:
4312 /* Architectural Performance Monitoring Leaf */
4313 if (kvm_enabled() && cpu->enable_pmu) {
4314 KVMState *s = cs->kvm_state;
4315
4316 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4317 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4318 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4319 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4320 } else if (hvf_enabled() && cpu->enable_pmu) {
4321 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4322 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4323 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4324 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4325 } else {
4326 *eax = 0;
4327 *ebx = 0;
4328 *ecx = 0;
4329 *edx = 0;
4330 }
4331 break;
4332 case 0xB:
4333 /* Extended Topology Enumeration Leaf */
4334 if (!cpu->enable_cpuid_0xb) {
4335 *eax = *ebx = *ecx = *edx = 0;
4336 break;
4337 }
4338
4339 *ecx = count & 0xff;
4340 *edx = cpu->apic_id;
4341
4342 switch (count) {
4343 case 0:
4344 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4345 *ebx = cs->nr_threads;
4346 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4347 break;
4348 case 1:
4349 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4350 *ebx = cs->nr_cores * cs->nr_threads;
4351 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4352 break;
4353 default:
4354 *eax = 0;
4355 *ebx = 0;
4356 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4357 }
4358
4359 assert(!(*eax & ~0x1f));
4360 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4361 break;
4362 case 0xD: {
4363 /* Processor Extended State */
4364 *eax = 0;
4365 *ebx = 0;
4366 *ecx = 0;
4367 *edx = 0;
4368 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4369 break;
4370 }
4371
4372 if (count == 0) {
4373 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4374 *eax = env->features[FEAT_XSAVE_COMP_LO];
4375 *edx = env->features[FEAT_XSAVE_COMP_HI];
4376 *ebx = xsave_area_size(env->xcr0);
4377 } else if (count == 1) {
4378 *eax = env->features[FEAT_XSAVE];
4379 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4380 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4381 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4382 *eax = esa->size;
4383 *ebx = esa->offset;
4384 }
4385 }
4386 break;
4387 }
4388 case 0x14: {
4389 /* Intel Processor Trace Enumeration */
4390 *eax = 0;
4391 *ebx = 0;
4392 *ecx = 0;
4393 *edx = 0;
4394 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4395 !kvm_enabled()) {
4396 break;
4397 }
4398
4399 if (count == 0) {
4400 *eax = INTEL_PT_MAX_SUBLEAF;
4401 *ebx = INTEL_PT_MINIMAL_EBX;
4402 *ecx = INTEL_PT_MINIMAL_ECX;
4403 } else if (count == 1) {
4404 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4405 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4406 }
4407 break;
4408 }
4409 case 0x40000000:
4410 /*
4411 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4412 * set here, but we restrict to TCG none the less.
4413 */
4414 if (tcg_enabled() && cpu->expose_tcg) {
4415 memcpy(signature, "TCGTCGTCGTCG", 12);
4416 *eax = 0x40000001;
4417 *ebx = signature[0];
4418 *ecx = signature[1];
4419 *edx = signature[2];
4420 } else {
4421 *eax = 0;
4422 *ebx = 0;
4423 *ecx = 0;
4424 *edx = 0;
4425 }
4426 break;
4427 case 0x40000001:
4428 *eax = 0;
4429 *ebx = 0;
4430 *ecx = 0;
4431 *edx = 0;
4432 break;
4433 case 0x80000000:
4434 *eax = env->cpuid_xlevel;
4435 *ebx = env->cpuid_vendor1;
4436 *edx = env->cpuid_vendor2;
4437 *ecx = env->cpuid_vendor3;
4438 break;
4439 case 0x80000001:
4440 *eax = env->cpuid_version;
4441 *ebx = 0;
4442 *ecx = env->features[FEAT_8000_0001_ECX];
4443 *edx = env->features[FEAT_8000_0001_EDX];
4444
4445 /* The Linux kernel checks for the CMPLegacy bit and
4446 * discards multiple thread information if it is set.
4447 * So don't set it here for Intel to make Linux guests happy.
4448 */
4449 if (cs->nr_cores * cs->nr_threads > 1) {
4450 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4451 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4452 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4453 *ecx |= 1 << 1; /* CmpLegacy bit */
4454 }
4455 }
4456 break;
4457 case 0x80000002:
4458 case 0x80000003:
4459 case 0x80000004:
4460 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4461 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4462 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4463 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4464 break;
4465 case 0x80000005:
4466 /* cache info (L1 cache) */
4467 if (cpu->cache_info_passthrough) {
4468 host_cpuid(index, 0, eax, ebx, ecx, edx);
4469 break;
4470 }
4471 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4472 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4473 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4474 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4475 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4476 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4477 break;
4478 case 0x80000006:
4479 /* cache info (L2 cache) */
4480 if (cpu->cache_info_passthrough) {
4481 host_cpuid(index, 0, eax, ebx, ecx, edx);
4482 break;
4483 }
4484 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4485 (L2_DTLB_2M_ENTRIES << 16) | \
4486 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4487 (L2_ITLB_2M_ENTRIES);
4488 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4489 (L2_DTLB_4K_ENTRIES << 16) | \
4490 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4491 (L2_ITLB_4K_ENTRIES);
4492 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4493 cpu->enable_l3_cache ?
4494 env->cache_info_amd.l3_cache : NULL,
4495 ecx, edx);
4496 break;
4497 case 0x80000007:
4498 *eax = 0;
4499 *ebx = 0;
4500 *ecx = 0;
4501 *edx = env->features[FEAT_8000_0007_EDX];
4502 break;
4503 case 0x80000008:
4504 /* virtual & phys address size in low 2 bytes. */
4505 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4506 /* 64 bit processor */
4507 *eax = cpu->phys_bits; /* configurable physical bits */
4508 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4509 *eax |= 0x00003900; /* 57 bits virtual */
4510 } else {
4511 *eax |= 0x00003000; /* 48 bits virtual */
4512 }
4513 } else {
4514 *eax = cpu->phys_bits;
4515 }
4516 *ebx = env->features[FEAT_8000_0008_EBX];
4517 *ecx = 0;
4518 *edx = 0;
4519 if (cs->nr_cores * cs->nr_threads > 1) {
4520 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4521 }
4522 break;
4523 case 0x8000000A:
4524 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4525 *eax = 0x00000001; /* SVM Revision */
4526 *ebx = 0x00000010; /* nr of ASIDs */
4527 *ecx = 0;
4528 *edx = env->features[FEAT_SVM]; /* optional features */
4529 } else {
4530 *eax = 0;
4531 *ebx = 0;
4532 *ecx = 0;
4533 *edx = 0;
4534 }
4535 break;
4536 case 0x8000001D:
4537 *eax = 0;
4538 switch (count) {
4539 case 0: /* L1 dcache info */
4540 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4541 eax, ebx, ecx, edx);
4542 break;
4543 case 1: /* L1 icache info */
4544 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4545 eax, ebx, ecx, edx);
4546 break;
4547 case 2: /* L2 cache info */
4548 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4549 eax, ebx, ecx, edx);
4550 break;
4551 case 3: /* L3 cache info */
4552 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4553 eax, ebx, ecx, edx);
4554 break;
4555 default: /* end of info */
4556 *eax = *ebx = *ecx = *edx = 0;
4557 break;
4558 }
4559 break;
4560 case 0x8000001E:
4561 assert(cpu->core_id <= 255);
4562 encode_topo_cpuid8000001e(cs, cpu,
4563 eax, ebx, ecx, edx);
4564 break;
4565 case 0xC0000000:
4566 *eax = env->cpuid_xlevel2;
4567 *ebx = 0;
4568 *ecx = 0;
4569 *edx = 0;
4570 break;
4571 case 0xC0000001:
4572 /* Support for VIA CPU's CPUID instruction */
4573 *eax = env->cpuid_version;
4574 *ebx = 0;
4575 *ecx = 0;
4576 *edx = env->features[FEAT_C000_0001_EDX];
4577 break;
4578 case 0xC0000002:
4579 case 0xC0000003:
4580 case 0xC0000004:
4581 /* Reserved for the future, and now filled with zero */
4582 *eax = 0;
4583 *ebx = 0;
4584 *ecx = 0;
4585 *edx = 0;
4586 break;
4587 case 0x8000001F:
4588 *eax = sev_enabled() ? 0x2 : 0;
4589 *ebx = sev_get_cbit_position();
4590 *ebx |= sev_get_reduced_phys_bits() << 6;
4591 *ecx = 0;
4592 *edx = 0;
4593 break;
4594 default:
4595 /* reserved values: zero */
4596 *eax = 0;
4597 *ebx = 0;
4598 *ecx = 0;
4599 *edx = 0;
4600 break;
4601 }
4602 }
4603
4604 /* CPUClass::reset() */
4605 static void x86_cpu_reset(CPUState *s)
4606 {
4607 X86CPU *cpu = X86_CPU(s);
4608 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4609 CPUX86State *env = &cpu->env;
4610 target_ulong cr4;
4611 uint64_t xcr0;
4612 int i;
4613
4614 xcc->parent_reset(s);
4615
4616 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4617
4618 env->old_exception = -1;
4619
4620 /* init to reset state */
4621
4622 env->hflags2 |= HF2_GIF_MASK;
4623
4624 cpu_x86_update_cr0(env, 0x60000010);
4625 env->a20_mask = ~0x0;
4626 env->smbase = 0x30000;
4627 env->msr_smi_count = 0;
4628
4629 env->idt.limit = 0xffff;
4630 env->gdt.limit = 0xffff;
4631 env->ldt.limit = 0xffff;
4632 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4633 env->tr.limit = 0xffff;
4634 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4635
4636 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4637 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4638 DESC_R_MASK | DESC_A_MASK);
4639 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4640 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4641 DESC_A_MASK);
4642 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4643 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4644 DESC_A_MASK);
4645 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4646 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4647 DESC_A_MASK);
4648 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4649 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4650 DESC_A_MASK);
4651 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4652 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4653 DESC_A_MASK);
4654
4655 env->eip = 0xfff0;
4656 env->regs[R_EDX] = env->cpuid_version;
4657
4658 env->eflags = 0x2;
4659
4660 /* FPU init */
4661 for (i = 0; i < 8; i++) {
4662 env->fptags[i] = 1;
4663 }
4664 cpu_set_fpuc(env, 0x37f);
4665
4666 env->mxcsr = 0x1f80;
4667 /* All units are in INIT state. */
4668 env->xstate_bv = 0;
4669
4670 env->pat = 0x0007040600070406ULL;
4671 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4672
4673 memset(env->dr, 0, sizeof(env->dr));
4674 env->dr[6] = DR6_FIXED_1;
4675 env->dr[7] = DR7_FIXED_1;
4676 cpu_breakpoint_remove_all(s, BP_CPU);
4677 cpu_watchpoint_remove_all(s, BP_CPU);
4678
4679 cr4 = 0;
4680 xcr0 = XSTATE_FP_MASK;
4681
4682 #ifdef CONFIG_USER_ONLY
4683 /* Enable all the features for user-mode. */
4684 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4685 xcr0 |= XSTATE_SSE_MASK;
4686 }
4687 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4688 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4689 if (env->features[esa->feature] & esa->bits) {
4690 xcr0 |= 1ull << i;
4691 }
4692 }
4693
4694 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4695 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4696 }
4697 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4698 cr4 |= CR4_FSGSBASE_MASK;
4699 }
4700 #endif
4701
4702 env->xcr0 = xcr0;
4703 cpu_x86_update_cr4(env, cr4);
4704
4705 /*
4706 * SDM 11.11.5 requires:
4707 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4708 * - IA32_MTRR_PHYSMASKn.V = 0
4709 * All other bits are undefined. For simplification, zero it all.
4710 */
4711 env->mtrr_deftype = 0;
4712 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4713 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4714
4715 env->interrupt_injected = -1;
4716 env->exception_injected = -1;
4717 env->nmi_injected = false;
4718 #if !defined(CONFIG_USER_ONLY)
4719 /* We hard-wire the BSP to the first CPU. */
4720 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4721
4722 s->halted = !cpu_is_bsp(cpu);
4723
4724 if (kvm_enabled()) {
4725 kvm_arch_reset_vcpu(cpu);
4726 }
4727 else if (hvf_enabled()) {
4728 hvf_reset_vcpu(s);
4729 }
4730 #endif
4731 }
4732
4733 #ifndef CONFIG_USER_ONLY
4734 bool cpu_is_bsp(X86CPU *cpu)
4735 {
4736 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4737 }
4738
4739 /* TODO: remove me, when reset over QOM tree is implemented */
4740 static void x86_cpu_machine_reset_cb(void *opaque)
4741 {
4742 X86CPU *cpu = opaque;
4743 cpu_reset(CPU(cpu));
4744 }
4745 #endif
4746
4747 static void mce_init(X86CPU *cpu)
4748 {
4749 CPUX86State *cenv = &cpu->env;
4750 unsigned int bank;
4751
4752 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4753 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4754 (CPUID_MCE | CPUID_MCA)) {
4755 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4756 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4757 cenv->mcg_ctl = ~(uint64_t)0;
4758 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4759 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4760 }
4761 }
4762 }
4763
4764 #ifndef CONFIG_USER_ONLY
4765 APICCommonClass *apic_get_class(void)
4766 {
4767 const char *apic_type = "apic";
4768
4769 /* TODO: in-kernel irqchip for hvf */
4770 if (kvm_apic_in_kernel()) {
4771 apic_type = "kvm-apic";
4772 } else if (xen_enabled()) {
4773 apic_type = "xen-apic";
4774 }
4775
4776 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4777 }
4778
4779 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4780 {
4781 APICCommonState *apic;
4782 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4783
4784 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4785
4786 object_property_add_child(OBJECT(cpu), "lapic",
4787 OBJECT(cpu->apic_state), &error_abort);
4788 object_unref(OBJECT(cpu->apic_state));
4789
4790 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4791 /* TODO: convert to link<> */
4792 apic = APIC_COMMON(cpu->apic_state);
4793 apic->cpu = cpu;
4794 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4795 }
4796
4797 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4798 {
4799 APICCommonState *apic;
4800 static bool apic_mmio_map_once;
4801
4802 if (cpu->apic_state == NULL) {
4803 return;
4804 }
4805 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4806 errp);
4807
4808 /* Map APIC MMIO area */
4809 apic = APIC_COMMON(cpu->apic_state);
4810 if (!apic_mmio_map_once) {
4811 memory_region_add_subregion_overlap(get_system_memory(),
4812 apic->apicbase &
4813 MSR_IA32_APICBASE_BASE,
4814 &apic->io_memory,
4815 0x1000);
4816 apic_mmio_map_once = true;
4817 }
4818 }
4819
4820 static void x86_cpu_machine_done(Notifier *n, void *unused)
4821 {
4822 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4823 MemoryRegion *smram =
4824 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4825
4826 if (smram) {
4827 cpu->smram = g_new(MemoryRegion, 1);
4828 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4829 smram, 0, 1ull << 32);
4830 memory_region_set_enabled(cpu->smram, true);
4831 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4832 }
4833 }
4834 #else
4835 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4836 {
4837 }
4838 #endif
4839
4840 /* Note: Only safe for use on x86(-64) hosts */
4841 static uint32_t x86_host_phys_bits(void)
4842 {
4843 uint32_t eax;
4844 uint32_t host_phys_bits;
4845
4846 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4847 if (eax >= 0x80000008) {
4848 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4849 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4850 * at 23:16 that can specify a maximum physical address bits for
4851 * the guest that can override this value; but I've not seen
4852 * anything with that set.
4853 */
4854 host_phys_bits = eax & 0xff;
4855 } else {
4856 /* It's an odd 64 bit machine that doesn't have the leaf for
4857 * physical address bits; fall back to 36 that's most older
4858 * Intel.
4859 */
4860 host_phys_bits = 36;
4861 }
4862
4863 return host_phys_bits;
4864 }
4865
4866 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4867 {
4868 if (*min < value) {
4869 *min = value;
4870 }
4871 }
4872
4873 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4874 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4875 {
4876 CPUX86State *env = &cpu->env;
4877 FeatureWordInfo *fi = &feature_word_info[w];
4878 uint32_t eax = fi->cpuid.eax;
4879 uint32_t region = eax & 0xF0000000;
4880
4881 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
4882 if (!env->features[w]) {
4883 return;
4884 }
4885
4886 switch (region) {
4887 case 0x00000000:
4888 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4889 break;
4890 case 0x80000000:
4891 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4892 break;
4893 case 0xC0000000:
4894 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4895 break;
4896 }
4897 }
4898
4899 /* Calculate XSAVE components based on the configured CPU feature flags */
4900 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4901 {
4902 CPUX86State *env = &cpu->env;
4903 int i;
4904 uint64_t mask;
4905
4906 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4907 return;
4908 }
4909
4910 mask = 0;
4911 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4912 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4913 if (env->features[esa->feature] & esa->bits) {
4914 mask |= (1ULL << i);
4915 }
4916 }
4917
4918 env->features[FEAT_XSAVE_COMP_LO] = mask;
4919 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4920 }
4921
4922 /***** Steps involved on loading and filtering CPUID data
4923 *
4924 * When initializing and realizing a CPU object, the steps
4925 * involved in setting up CPUID data are:
4926 *
4927 * 1) Loading CPU model definition (X86CPUDefinition). This is
4928 * implemented by x86_cpu_load_def() and should be completely
4929 * transparent, as it is done automatically by instance_init.
4930 * No code should need to look at X86CPUDefinition structs
4931 * outside instance_init.
4932 *
4933 * 2) CPU expansion. This is done by realize before CPUID
4934 * filtering, and will make sure host/accelerator data is
4935 * loaded for CPU models that depend on host capabilities
4936 * (e.g. "host"). Done by x86_cpu_expand_features().
4937 *
4938 * 3) CPUID filtering. This initializes extra data related to
4939 * CPUID, and checks if the host supports all capabilities
4940 * required by the CPU. Runnability of a CPU model is
4941 * determined at this step. Done by x86_cpu_filter_features().
4942 *
4943 * Some operations don't require all steps to be performed.
4944 * More precisely:
4945 *
4946 * - CPU instance creation (instance_init) will run only CPU
4947 * model loading. CPU expansion can't run at instance_init-time
4948 * because host/accelerator data may be not available yet.
4949 * - CPU realization will perform both CPU model expansion and CPUID
4950 * filtering, and return an error in case one of them fails.
4951 * - query-cpu-definitions needs to run all 3 steps. It needs
4952 * to run CPUID filtering, as the 'unavailable-features'
4953 * field is set based on the filtering results.
4954 * - The query-cpu-model-expansion QMP command only needs to run
4955 * CPU model loading and CPU expansion. It should not filter
4956 * any CPUID data based on host capabilities.
4957 */
4958
4959 /* Expand CPU configuration data, based on configured features
4960 * and host/accelerator capabilities when appropriate.
4961 */
4962 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4963 {
4964 CPUX86State *env = &cpu->env;
4965 FeatureWord w;
4966 GList *l;
4967 Error *local_err = NULL;
4968
4969 /*TODO: Now cpu->max_features doesn't overwrite features
4970 * set using QOM properties, and we can convert
4971 * plus_features & minus_features to global properties
4972 * inside x86_cpu_parse_featurestr() too.
4973 */
4974 if (cpu->max_features) {
4975 for (w = 0; w < FEATURE_WORDS; w++) {
4976 /* Override only features that weren't set explicitly
4977 * by the user.
4978 */
4979 env->features[w] |=
4980 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4981 ~env->user_features[w] & \
4982 ~feature_word_info[w].no_autoenable_flags;
4983 }
4984 }
4985
4986 for (l = plus_features; l; l = l->next) {
4987 const char *prop = l->data;
4988 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4989 if (local_err) {
4990 goto out;
4991 }
4992 }
4993
4994 for (l = minus_features; l; l = l->next) {
4995 const char *prop = l->data;
4996 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4997 if (local_err) {
4998 goto out;
4999 }
5000 }
5001
5002 if (!kvm_enabled() || !cpu->expose_kvm) {
5003 env->features[FEAT_KVM] = 0;
5004 }
5005
5006 x86_cpu_enable_xsave_components(cpu);
5007
5008 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5009 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5010 if (cpu->full_cpuid_auto_level) {
5011 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5012 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5013 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5014 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5015 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5016 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5017 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5018 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5019 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5020 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5021 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5022 /* SVM requires CPUID[0x8000000A] */
5023 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5024 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5025 }
5026
5027 /* SEV requires CPUID[0x8000001F] */
5028 if (sev_enabled()) {
5029 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5030 }
5031 }
5032
5033 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5034 if (env->cpuid_level == UINT32_MAX) {
5035 env->cpuid_level = env->cpuid_min_level;
5036 }
5037 if (env->cpuid_xlevel == UINT32_MAX) {
5038 env->cpuid_xlevel = env->cpuid_min_xlevel;
5039 }
5040 if (env->cpuid_xlevel2 == UINT32_MAX) {
5041 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5042 }
5043
5044 out:
5045 if (local_err != NULL) {
5046 error_propagate(errp, local_err);
5047 }
5048 }
5049
5050 /*
5051 * Finishes initialization of CPUID data, filters CPU feature
5052 * words based on host availability of each feature.
5053 *
5054 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5055 */
5056 static int x86_cpu_filter_features(X86CPU *cpu)
5057 {
5058 CPUX86State *env = &cpu->env;
5059 FeatureWord w;
5060 int rv = 0;
5061
5062 for (w = 0; w < FEATURE_WORDS; w++) {
5063 uint32_t host_feat =
5064 x86_cpu_get_supported_feature_word(w, false);
5065 uint32_t requested_features = env->features[w];
5066 env->features[w] &= host_feat;
5067 cpu->filtered_features[w] = requested_features & ~env->features[w];
5068 if (cpu->filtered_features[w]) {
5069 rv = 1;
5070 }
5071 }
5072
5073 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5074 kvm_enabled()) {
5075 KVMState *s = CPU(cpu)->kvm_state;
5076 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5077 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5078 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5079 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5080 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5081
5082 if (!eax_0 ||
5083 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5084 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5085 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5086 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5087 INTEL_PT_ADDR_RANGES_NUM) ||
5088 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5089 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5090 (ecx_0 & INTEL_PT_IP_LIP)) {
5091 /*
5092 * Processor Trace capabilities aren't configurable, so if the
5093 * host can't emulate the capabilities we report on
5094 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5095 */
5096 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5097 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5098 rv = 1;
5099 }
5100 }
5101
5102 return rv;
5103 }
5104
5105 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
5106 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
5107 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
5108 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
5109 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
5110 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
5111 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5112 {
5113 CPUState *cs = CPU(dev);
5114 X86CPU *cpu = X86_CPU(dev);
5115 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5116 CPUX86State *env = &cpu->env;
5117 Error *local_err = NULL;
5118 static bool ht_warned;
5119
5120 if (xcc->host_cpuid_required) {
5121 if (!accel_uses_host_cpuid()) {
5122 char *name = x86_cpu_class_get_model_name(xcc);
5123 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5124 g_free(name);
5125 goto out;
5126 }
5127
5128 if (enable_cpu_pm) {
5129 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5130 &cpu->mwait.ecx, &cpu->mwait.edx);
5131 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5132 }
5133 }
5134
5135 /* mwait extended info: needed for Core compatibility */
5136 /* We always wake on interrupt even if host does not have the capability */
5137 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5138
5139 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5140 error_setg(errp, "apic-id property was not initialized properly");
5141 return;
5142 }
5143
5144 x86_cpu_expand_features(cpu, &local_err);
5145 if (local_err) {
5146 goto out;
5147 }
5148
5149 if (x86_cpu_filter_features(cpu) &&
5150 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5151 x86_cpu_report_filtered_features(cpu);
5152 if (cpu->enforce_cpuid) {
5153 error_setg(&local_err,
5154 accel_uses_host_cpuid() ?
5155 "Host doesn't support requested features" :
5156 "TCG doesn't support requested features");
5157 goto out;
5158 }
5159 }
5160
5161 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5162 * CPUID[1].EDX.
5163 */
5164 if (IS_AMD_CPU(env)) {
5165 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5166 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5167 & CPUID_EXT2_AMD_ALIASES);
5168 }
5169
5170 /* For 64bit systems think about the number of physical bits to present.
5171 * ideally this should be the same as the host; anything other than matching
5172 * the host can cause incorrect guest behaviour.
5173 * QEMU used to pick the magic value of 40 bits that corresponds to
5174 * consumer AMD devices but nothing else.
5175 */
5176 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5177 if (accel_uses_host_cpuid()) {
5178 uint32_t host_phys_bits = x86_host_phys_bits();
5179 static bool warned;
5180
5181 if (cpu->host_phys_bits) {
5182 /* The user asked for us to use the host physical bits */
5183 cpu->phys_bits = host_phys_bits;
5184 }
5185
5186 /* Print a warning if the user set it to a value that's not the
5187 * host value.
5188 */
5189 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5190 !warned) {
5191 warn_report("Host physical bits (%u)"
5192 " does not match phys-bits property (%u)",
5193 host_phys_bits, cpu->phys_bits);
5194 warned = true;
5195 }
5196
5197 if (cpu->phys_bits &&
5198 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5199 cpu->phys_bits < 32)) {
5200 error_setg(errp, "phys-bits should be between 32 and %u "
5201 " (but is %u)",
5202 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5203 return;
5204 }
5205 } else {
5206 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5207 error_setg(errp, "TCG only supports phys-bits=%u",
5208 TCG_PHYS_ADDR_BITS);
5209 return;
5210 }
5211 }
5212 /* 0 means it was not explicitly set by the user (or by machine
5213 * compat_props or by the host code above). In this case, the default
5214 * is the value used by TCG (40).
5215 */
5216 if (cpu->phys_bits == 0) {
5217 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5218 }
5219 } else {
5220 /* For 32 bit systems don't use the user set value, but keep
5221 * phys_bits consistent with what we tell the guest.
5222 */
5223 if (cpu->phys_bits != 0) {
5224 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5225 return;
5226 }
5227
5228 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5229 cpu->phys_bits = 36;
5230 } else {
5231 cpu->phys_bits = 32;
5232 }
5233 }
5234
5235 /* Cache information initialization */
5236 if (!cpu->legacy_cache) {
5237 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5238 char *name = x86_cpu_class_get_model_name(xcc);
5239 error_setg(errp,
5240 "CPU model '%s' doesn't support legacy-cache=off", name);
5241 g_free(name);
5242 return;
5243 }
5244 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5245 *xcc->cpu_def->cache_info;
5246 } else {
5247 /* Build legacy cache information */
5248 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5249 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5250 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5251 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5252
5253 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5254 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5255 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5256 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5257
5258 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5259 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5260 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5261 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5262 }
5263
5264
5265 cpu_exec_realizefn(cs, &local_err);
5266 if (local_err != NULL) {
5267 error_propagate(errp, local_err);
5268 return;
5269 }
5270
5271 #ifndef CONFIG_USER_ONLY
5272 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5273
5274 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
5275 x86_cpu_apic_create(cpu, &local_err);
5276 if (local_err != NULL) {
5277 goto out;
5278 }
5279 }
5280 #endif
5281
5282 mce_init(cpu);
5283
5284 #ifndef CONFIG_USER_ONLY
5285 if (tcg_enabled()) {
5286 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5287 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5288
5289 /* Outer container... */
5290 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5291 memory_region_set_enabled(cpu->cpu_as_root, true);
5292
5293 /* ... with two regions inside: normal system memory with low
5294 * priority, and...
5295 */
5296 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5297 get_system_memory(), 0, ~0ull);
5298 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5299 memory_region_set_enabled(cpu->cpu_as_mem, true);
5300
5301 cs->num_ases = 2;
5302 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5303 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5304
5305 /* ... SMRAM with higher priority, linked from /machine/smram. */
5306 cpu->machine_done.notify = x86_cpu_machine_done;
5307 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5308 }
5309 #endif
5310
5311 qemu_init_vcpu(cs);
5312
5313 /*
5314 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5315 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5316 * based on inputs (sockets,cores,threads), it is still better to give
5317 * users a warning.
5318 *
5319 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5320 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5321 */
5322 if (IS_AMD_CPU(env) &&
5323 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5324 cs->nr_threads > 1 && !ht_warned) {
5325 warn_report("This family of AMD CPU doesn't support "
5326 "hyperthreading(%d)",
5327 cs->nr_threads);
5328 error_printf("Please configure -smp options properly"
5329 " or try enabling topoext feature.\n");
5330 ht_warned = true;
5331 }
5332
5333 x86_cpu_apic_realize(cpu, &local_err);
5334 if (local_err != NULL) {
5335 goto out;
5336 }
5337 cpu_reset(cs);
5338
5339 xcc->parent_realize(dev, &local_err);
5340
5341 out:
5342 if (local_err != NULL) {
5343 error_propagate(errp, local_err);
5344 return;
5345 }
5346 }
5347
5348 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5349 {
5350 X86CPU *cpu = X86_CPU(dev);
5351 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5352 Error *local_err = NULL;
5353
5354 #ifndef CONFIG_USER_ONLY
5355 cpu_remove_sync(CPU(dev));
5356 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5357 #endif
5358
5359 if (cpu->apic_state) {
5360 object_unparent(OBJECT(cpu->apic_state));
5361 cpu->apic_state = NULL;
5362 }
5363
5364 xcc->parent_unrealize(dev, &local_err);
5365 if (local_err != NULL) {
5366 error_propagate(errp, local_err);
5367 return;
5368 }
5369 }
5370
5371 typedef struct BitProperty {
5372 FeatureWord w;
5373 uint32_t mask;
5374 } BitProperty;
5375
5376 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5377 void *opaque, Error **errp)
5378 {
5379 X86CPU *cpu = X86_CPU(obj);
5380 BitProperty *fp = opaque;
5381 uint32_t f = cpu->env.features[fp->w];
5382 bool value = (f & fp->mask) == fp->mask;
5383 visit_type_bool(v, name, &value, errp);
5384 }
5385
5386 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5387 void *opaque, Error **errp)
5388 {
5389 DeviceState *dev = DEVICE(obj);
5390 X86CPU *cpu = X86_CPU(obj);
5391 BitProperty *fp = opaque;
5392 Error *local_err = NULL;
5393 bool value;
5394
5395 if (dev->realized) {
5396 qdev_prop_set_after_realize(dev, name, errp);
5397 return;
5398 }
5399
5400 visit_type_bool(v, name, &value, &local_err);
5401 if (local_err) {
5402 error_propagate(errp, local_err);
5403 return;
5404 }
5405
5406 if (value) {
5407 cpu->env.features[fp->w] |= fp->mask;
5408 } else {
5409 cpu->env.features[fp->w] &= ~fp->mask;
5410 }
5411 cpu->env.user_features[fp->w] |= fp->mask;
5412 }
5413
5414 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5415 void *opaque)
5416 {
5417 BitProperty *prop = opaque;
5418 g_free(prop);
5419 }
5420
5421 /* Register a boolean property to get/set a single bit in a uint32_t field.
5422 *
5423 * The same property name can be registered multiple times to make it affect
5424 * multiple bits in the same FeatureWord. In that case, the getter will return
5425 * true only if all bits are set.
5426 */
5427 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5428 const char *prop_name,
5429 FeatureWord w,
5430 int bitnr)
5431 {
5432 BitProperty *fp;
5433 ObjectProperty *op;
5434 uint32_t mask = (1UL << bitnr);
5435
5436 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5437 if (op) {
5438 fp = op->opaque;
5439 assert(fp->w == w);
5440 fp->mask |= mask;
5441 } else {
5442 fp = g_new0(BitProperty, 1);
5443 fp->w = w;
5444 fp->mask = mask;
5445 object_property_add(OBJECT(cpu), prop_name, "bool",
5446 x86_cpu_get_bit_prop,
5447 x86_cpu_set_bit_prop,
5448 x86_cpu_release_bit_prop, fp, &error_abort);
5449 }
5450 }
5451
5452 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5453 FeatureWord w,
5454 int bitnr)
5455 {
5456 FeatureWordInfo *fi = &feature_word_info[w];
5457 const char *name = fi->feat_names[bitnr];
5458
5459 if (!name) {
5460 return;
5461 }
5462
5463 /* Property names should use "-" instead of "_".
5464 * Old names containing underscores are registered as aliases
5465 * using object_property_add_alias()
5466 */
5467 assert(!strchr(name, '_'));
5468 /* aliases don't use "|" delimiters anymore, they are registered
5469 * manually using object_property_add_alias() */
5470 assert(!strchr(name, '|'));
5471 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5472 }
5473
5474 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5475 {
5476 X86CPU *cpu = X86_CPU(cs);
5477 CPUX86State *env = &cpu->env;
5478 GuestPanicInformation *panic_info = NULL;
5479
5480 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5481 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5482
5483 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5484
5485 assert(HV_CRASH_PARAMS >= 5);
5486 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5487 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5488 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5489 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5490 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5491 }
5492
5493 return panic_info;
5494 }
5495 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5496 const char *name, void *opaque,
5497 Error **errp)
5498 {
5499 CPUState *cs = CPU(obj);
5500 GuestPanicInformation *panic_info;
5501
5502 if (!cs->crash_occurred) {
5503 error_setg(errp, "No crash occured");
5504 return;
5505 }
5506
5507 panic_info = x86_cpu_get_crash_info(cs);
5508 if (panic_info == NULL) {
5509 error_setg(errp, "No crash information");
5510 return;
5511 }
5512
5513 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5514 errp);
5515 qapi_free_GuestPanicInformation(panic_info);
5516 }
5517
5518 static void x86_cpu_initfn(Object *obj)
5519 {
5520 CPUState *cs = CPU(obj);
5521 X86CPU *cpu = X86_CPU(obj);
5522 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5523 CPUX86State *env = &cpu->env;
5524 FeatureWord w;
5525
5526 cs->env_ptr = env;
5527
5528 object_property_add(obj, "family", "int",
5529 x86_cpuid_version_get_family,
5530 x86_cpuid_version_set_family, NULL, NULL, NULL);
5531 object_property_add(obj, "model", "int",
5532 x86_cpuid_version_get_model,
5533 x86_cpuid_version_set_model, NULL, NULL, NULL);
5534 object_property_add(obj, "stepping", "int",
5535 x86_cpuid_version_get_stepping,
5536 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5537 object_property_add_str(obj, "vendor",
5538 x86_cpuid_get_vendor,
5539 x86_cpuid_set_vendor, NULL);
5540 object_property_add_str(obj, "model-id",
5541 x86_cpuid_get_model_id,
5542 x86_cpuid_set_model_id, NULL);
5543 object_property_add(obj, "tsc-frequency", "int",
5544 x86_cpuid_get_tsc_freq,
5545 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5546 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5547 x86_cpu_get_feature_words,
5548 NULL, NULL, (void *)env->features, NULL);
5549 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5550 x86_cpu_get_feature_words,
5551 NULL, NULL, (void *)cpu->filtered_features, NULL);
5552
5553 object_property_add(obj, "crash-information", "GuestPanicInformation",
5554 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5555
5556 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5557
5558 for (w = 0; w < FEATURE_WORDS; w++) {
5559 int bitnr;
5560
5561 for (bitnr = 0; bitnr < 32; bitnr++) {
5562 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5563 }
5564 }
5565
5566 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5567 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5568 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5569 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5570 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5571 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5572 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5573
5574 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5575 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5576 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5577 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5578 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5579 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5580 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5581 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5582 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5583 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5584 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5585 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5586 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5587 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5588 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5589 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5590 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5591 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5592 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5593 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5594 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5595
5596 if (xcc->cpu_def) {
5597 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5598 }
5599 }
5600
5601 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5602 {
5603 X86CPU *cpu = X86_CPU(cs);
5604
5605 return cpu->apic_id;
5606 }
5607
5608 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5609 {
5610 X86CPU *cpu = X86_CPU(cs);
5611
5612 return cpu->env.cr[0] & CR0_PG_MASK;
5613 }
5614
5615 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5616 {
5617 X86CPU *cpu = X86_CPU(cs);
5618
5619 cpu->env.eip = value;
5620 }
5621
5622 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5623 {
5624 X86CPU *cpu = X86_CPU(cs);
5625
5626 cpu->env.eip = tb->pc - tb->cs_base;
5627 }
5628
5629 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5630 {
5631 X86CPU *cpu = X86_CPU(cs);
5632 CPUX86State *env = &cpu->env;
5633
5634 #if !defined(CONFIG_USER_ONLY)
5635 if (interrupt_request & CPU_INTERRUPT_POLL) {
5636 return CPU_INTERRUPT_POLL;
5637 }
5638 #endif
5639 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5640 return CPU_INTERRUPT_SIPI;
5641 }
5642
5643 if (env->hflags2 & HF2_GIF_MASK) {
5644 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5645 !(env->hflags & HF_SMM_MASK)) {
5646 return CPU_INTERRUPT_SMI;
5647 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5648 !(env->hflags2 & HF2_NMI_MASK)) {
5649 return CPU_INTERRUPT_NMI;
5650 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5651 return CPU_INTERRUPT_MCE;
5652 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5653 (((env->hflags2 & HF2_VINTR_MASK) &&
5654 (env->hflags2 & HF2_HIF_MASK)) ||
5655 (!(env->hflags2 & HF2_VINTR_MASK) &&
5656 (env->eflags & IF_MASK &&
5657 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5658 return CPU_INTERRUPT_HARD;
5659 #if !defined(CONFIG_USER_ONLY)
5660 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5661 (env->eflags & IF_MASK) &&
5662 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5663 return CPU_INTERRUPT_VIRQ;
5664 #endif
5665 }
5666 }
5667
5668 return 0;
5669 }
5670
5671 static bool x86_cpu_has_work(CPUState *cs)
5672 {
5673 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5674 }
5675
5676 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5677 {
5678 X86CPU *cpu = X86_CPU(cs);
5679 CPUX86State *env = &cpu->env;
5680
5681 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5682 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5683 : bfd_mach_i386_i8086);
5684 info->print_insn = print_insn_i386;
5685
5686 info->cap_arch = CS_ARCH_X86;
5687 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5688 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5689 : CS_MODE_16);
5690 info->cap_insn_unit = 1;
5691 info->cap_insn_split = 8;
5692 }
5693
5694 void x86_update_hflags(CPUX86State *env)
5695 {
5696 uint32_t hflags;
5697 #define HFLAG_COPY_MASK \
5698 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5699 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5700 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5701 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5702
5703 hflags = env->hflags & HFLAG_COPY_MASK;
5704 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5705 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5706 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5707 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5708 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5709
5710 if (env->cr[4] & CR4_OSFXSR_MASK) {
5711 hflags |= HF_OSFXSR_MASK;
5712 }
5713
5714 if (env->efer & MSR_EFER_LMA) {
5715 hflags |= HF_LMA_MASK;
5716 }
5717
5718 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5719 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5720 } else {
5721 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5722 (DESC_B_SHIFT - HF_CS32_SHIFT);
5723 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5724 (DESC_B_SHIFT - HF_SS32_SHIFT);
5725 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5726 !(hflags & HF_CS32_MASK)) {
5727 hflags |= HF_ADDSEG_MASK;
5728 } else {
5729 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5730 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5731 }
5732 }
5733 env->hflags = hflags;
5734 }
5735
5736 static Property x86_cpu_properties[] = {
5737 #ifdef CONFIG_USER_ONLY
5738 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5739 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5740 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5741 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5742 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5743 #else
5744 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5745 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5746 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5747 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5748 #endif
5749 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5750 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5751 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5752 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5753 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5754 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5755 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5756 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5757 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5758 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5759 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5760 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5761 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5762 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5763 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
5764 DEFINE_PROP_BOOL("hv-evmcs", X86CPU, hyperv_evmcs, false),
5765 DEFINE_PROP_BOOL("hv-ipi", X86CPU, hyperv_ipi, false),
5766 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5767 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5768 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5769 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5770 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5771 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5772 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5773 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5774 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5775 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5776 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5777 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5778 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5779 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5780 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5781 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5782 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5783 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5784 false),
5785 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5786 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5787 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5788 true),
5789 /*
5790 * lecacy_cache defaults to true unless the CPU model provides its
5791 * own cache information (see x86_cpu_load_def()).
5792 */
5793 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5794
5795 /*
5796 * From "Requirements for Implementing the Microsoft
5797 * Hypervisor Interface":
5798 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5799 *
5800 * "Starting with Windows Server 2012 and Windows 8, if
5801 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5802 * the hypervisor imposes no specific limit to the number of VPs.
5803 * In this case, Windows Server 2012 guest VMs may use more than
5804 * 64 VPs, up to the maximum supported number of processors applicable
5805 * to the specific Windows version being used."
5806 */
5807 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5808 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
5809 false),
5810 DEFINE_PROP_END_OF_LIST()
5811 };
5812
5813 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5814 {
5815 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5816 CPUClass *cc = CPU_CLASS(oc);
5817 DeviceClass *dc = DEVICE_CLASS(oc);
5818
5819 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5820 &xcc->parent_realize);
5821 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5822 &xcc->parent_unrealize);
5823 dc->props = x86_cpu_properties;
5824
5825 xcc->parent_reset = cc->reset;
5826 cc->reset = x86_cpu_reset;
5827 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5828
5829 cc->class_by_name = x86_cpu_class_by_name;
5830 cc->parse_features = x86_cpu_parse_featurestr;
5831 cc->has_work = x86_cpu_has_work;
5832 #ifdef CONFIG_TCG
5833 cc->do_interrupt = x86_cpu_do_interrupt;
5834 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5835 #endif
5836 cc->dump_state = x86_cpu_dump_state;
5837 cc->get_crash_info = x86_cpu_get_crash_info;
5838 cc->set_pc = x86_cpu_set_pc;
5839 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5840 cc->gdb_read_register = x86_cpu_gdb_read_register;
5841 cc->gdb_write_register = x86_cpu_gdb_write_register;
5842 cc->get_arch_id = x86_cpu_get_arch_id;
5843 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5844 #ifdef CONFIG_USER_ONLY
5845 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5846 #else
5847 cc->asidx_from_attrs = x86_asidx_from_attrs;
5848 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5849 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5850 cc->write_elf64_note = x86_cpu_write_elf64_note;
5851 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5852 cc->write_elf32_note = x86_cpu_write_elf32_note;
5853 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5854 cc->vmsd = &vmstate_x86_cpu;
5855 #endif
5856 cc->gdb_arch_name = x86_gdb_arch_name;
5857 #ifdef TARGET_X86_64
5858 cc->gdb_core_xml_file = "i386-64bit.xml";
5859 cc->gdb_num_core_regs = 57;
5860 #else
5861 cc->gdb_core_xml_file = "i386-32bit.xml";
5862 cc->gdb_num_core_regs = 41;
5863 #endif
5864 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5865 cc->debug_excp_handler = breakpoint_handler;
5866 #endif
5867 cc->cpu_exec_enter = x86_cpu_exec_enter;
5868 cc->cpu_exec_exit = x86_cpu_exec_exit;
5869 #ifdef CONFIG_TCG
5870 cc->tcg_initialize = tcg_x86_init;
5871 #endif
5872 cc->disas_set_info = x86_disas_set_info;
5873
5874 dc->user_creatable = true;
5875 }
5876
5877 static const TypeInfo x86_cpu_type_info = {
5878 .name = TYPE_X86_CPU,
5879 .parent = TYPE_CPU,
5880 .instance_size = sizeof(X86CPU),
5881 .instance_init = x86_cpu_initfn,
5882 .abstract = true,
5883 .class_size = sizeof(X86CPUClass),
5884 .class_init = x86_cpu_common_class_init,
5885 };
5886
5887
5888 /* "base" CPU model, used by query-cpu-model-expansion */
5889 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5890 {
5891 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5892
5893 xcc->static_model = true;
5894 xcc->migration_safe = true;
5895 xcc->model_description = "base CPU model type with no features enabled";
5896 xcc->ordering = 8;
5897 }
5898
5899 static const TypeInfo x86_base_cpu_type_info = {
5900 .name = X86_CPU_TYPE_NAME("base"),
5901 .parent = TYPE_X86_CPU,
5902 .class_init = x86_cpu_base_class_init,
5903 };
5904
5905 static void x86_cpu_register_types(void)
5906 {
5907 int i;
5908
5909 type_register_static(&x86_cpu_type_info);
5910 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5911 x86_register_cpudef_type(&builtin_x86_defs[i]);
5912 }
5913 type_register_static(&max_x86_cpu_type_info);
5914 type_register_static(&x86_base_cpu_type_info);
5915 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5916 type_register_static(&host_x86_cpu_type_info);
5917 #endif
5918 }
5919
5920 type_init(x86_cpu_register_types)