]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Add x-force-features option for testing
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/hvf.h"
30 #include "sysemu/cpus.h"
31 #include "kvm_i386.h"
32 #include "sev_i386.h"
33
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qemu/option.h"
37 #include "qemu/config-file.h"
38 #include "qapi/error.h"
39 #include "qapi/qapi-visit-machine.h"
40 #include "qapi/qapi-visit-run-state.h"
41 #include "qapi/qmp/qdict.h"
42 #include "qapi/qmp/qerror.h"
43 #include "qapi/visitor.h"
44 #include "qom/qom-qobject.h"
45 #include "sysemu/arch_init.h"
46 #include "qapi/qapi-commands-machine-target.h"
47
48 #include "standard-headers/asm-x86/kvm_para.h"
49
50 #include "sysemu/sysemu.h"
51 #include "sysemu/tcg.h"
52 #include "hw/qdev-properties.h"
53 #include "hw/i386/topology.h"
54 #ifndef CONFIG_USER_ONLY
55 #include "exec/address-spaces.h"
56 #include "hw/hw.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /*
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
348 */
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
357
358 /*
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
361 */
362 static int nodes_in_socket(int nr_cores)
363 {
364 int nodes;
365
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
367
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
370 }
371
372 /*
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
378 */
379 static int cores_in_core_complex(int nr_cores)
380 {
381 int nodes;
382
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
386 }
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
389
390 /*
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
393 */
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
395 }
396
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
401 {
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
405
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
408
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
415 }
416
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
425
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
428
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
432 }
433
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
438 /*
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
441 */
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
447 };
448
449 /*
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
456 */
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
459 {
460 int nodes, cores_in_ccx;
461
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
464
465 cores_in_ccx = cores_in_core_complex(nr_cores);
466
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
471 }
472
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
477 {
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
481
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
484 /*
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
498 */
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
504 }
505 /*
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
512 */
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
517 /*
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
529 */
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
534 }
535 *edx = 0;
536 }
537
538 /*
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
542 */
543
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
555 };
556
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
569 };
570
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
582 };
583
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
596 };
597
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
609 };
610
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
618 };
619
620
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
631 };
632
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
646 };
647
648 /* TLB definitions: */
649
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
654
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
659
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
664
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
669
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
672 /*
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
679 */
680 #define INTEL_PT_MINIMAL_EBX 0xf
681 /*
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
689 */
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
698
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
701 {
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
707 }
708 dst[CPUID_VENDOR_SZ] = '\0';
709 }
710
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
722
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
744
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
750
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_APM_FEATURES 0
774 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
775 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
776 /* missing:
777 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
778
779 typedef enum FeatureWordType {
780 CPUID_FEATURE_WORD,
781 MSR_FEATURE_WORD,
782 } FeatureWordType;
783
784 typedef struct FeatureWordInfo {
785 FeatureWordType type;
786 /* feature flags names are taken from "Intel Processor Identification and
787 * the CPUID Instruction" and AMD's "CPUID Specification".
788 * In cases of disagreement between feature naming conventions,
789 * aliases may be added.
790 */
791 const char *feat_names[32];
792 union {
793 /* If type==CPUID_FEATURE_WORD */
794 struct {
795 uint32_t eax; /* Input EAX for CPUID */
796 bool needs_ecx; /* CPUID instruction uses ECX as input */
797 uint32_t ecx; /* Input ECX value for CPUID */
798 int reg; /* output register (R_* constant) */
799 } cpuid;
800 /* If type==MSR_FEATURE_WORD */
801 struct {
802 uint32_t index;
803 struct { /*CPUID that enumerate this MSR*/
804 FeatureWord cpuid_class;
805 uint32_t cpuid_flag;
806 } cpuid_dep;
807 } msr;
808 };
809 uint32_t tcg_features; /* Feature flags supported by TCG */
810 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
811 uint32_t migratable_flags; /* Feature flags known to be migratable */
812 /* Features that shouldn't be auto-enabled by "-cpu host" */
813 uint32_t no_autoenable_flags;
814 } FeatureWordInfo;
815
816 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
817 [FEAT_1_EDX] = {
818 .type = CPUID_FEATURE_WORD,
819 .feat_names = {
820 "fpu", "vme", "de", "pse",
821 "tsc", "msr", "pae", "mce",
822 "cx8", "apic", NULL, "sep",
823 "mtrr", "pge", "mca", "cmov",
824 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
825 NULL, "ds" /* Intel dts */, "acpi", "mmx",
826 "fxsr", "sse", "sse2", "ss",
827 "ht" /* Intel htt */, "tm", "ia64", "pbe",
828 },
829 .cpuid = {.eax = 1, .reg = R_EDX, },
830 .tcg_features = TCG_FEATURES,
831 },
832 [FEAT_1_ECX] = {
833 .type = CPUID_FEATURE_WORD,
834 .feat_names = {
835 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
836 "ds-cpl", "vmx", "smx", "est",
837 "tm2", "ssse3", "cid", NULL,
838 "fma", "cx16", "xtpr", "pdcm",
839 NULL, "pcid", "dca", "sse4.1",
840 "sse4.2", "x2apic", "movbe", "popcnt",
841 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
842 "avx", "f16c", "rdrand", "hypervisor",
843 },
844 .cpuid = { .eax = 1, .reg = R_ECX, },
845 .tcg_features = TCG_EXT_FEATURES,
846 },
847 /* Feature names that are already defined on feature_name[] but
848 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
849 * names on feat_names below. They are copied automatically
850 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
851 */
852 [FEAT_8000_0001_EDX] = {
853 .type = CPUID_FEATURE_WORD,
854 .feat_names = {
855 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
856 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
857 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
858 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
859 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
860 "nx", NULL, "mmxext", NULL /* mmx */,
861 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
862 NULL, "lm", "3dnowext", "3dnow",
863 },
864 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
865 .tcg_features = TCG_EXT2_FEATURES,
866 },
867 [FEAT_8000_0001_ECX] = {
868 .type = CPUID_FEATURE_WORD,
869 .feat_names = {
870 "lahf-lm", "cmp-legacy", "svm", "extapic",
871 "cr8legacy", "abm", "sse4a", "misalignsse",
872 "3dnowprefetch", "osvw", "ibs", "xop",
873 "skinit", "wdt", NULL, "lwp",
874 "fma4", "tce", NULL, "nodeid-msr",
875 NULL, "tbm", "topoext", "perfctr-core",
876 "perfctr-nb", NULL, NULL, NULL,
877 NULL, NULL, NULL, NULL,
878 },
879 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
880 .tcg_features = TCG_EXT3_FEATURES,
881 /*
882 * TOPOEXT is always allowed but can't be enabled blindly by
883 * "-cpu host", as it requires consistent cache topology info
884 * to be provided so it doesn't confuse guests.
885 */
886 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
887 },
888 [FEAT_C000_0001_EDX] = {
889 .type = CPUID_FEATURE_WORD,
890 .feat_names = {
891 NULL, NULL, "xstore", "xstore-en",
892 NULL, NULL, "xcrypt", "xcrypt-en",
893 "ace2", "ace2-en", "phe", "phe-en",
894 "pmm", "pmm-en", NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL,
898 NULL, NULL, NULL, NULL,
899 },
900 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
901 .tcg_features = TCG_EXT4_FEATURES,
902 },
903 [FEAT_KVM] = {
904 .type = CPUID_FEATURE_WORD,
905 .feat_names = {
906 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
907 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
908 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
909 NULL, NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 "kvmclock-stable-bit", NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
914 },
915 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
916 .tcg_features = TCG_KVM_FEATURES,
917 },
918 [FEAT_KVM_HINTS] = {
919 .type = CPUID_FEATURE_WORD,
920 .feat_names = {
921 "kvm-hint-dedicated", NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 },
930 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
931 .tcg_features = TCG_KVM_FEATURES,
932 /*
933 * KVM hints aren't auto-enabled by -cpu host, they need to be
934 * explicitly enabled in the command-line.
935 */
936 .no_autoenable_flags = ~0U,
937 },
938 /*
939 * .feat_names are commented out for Hyper-V enlightenments because we
940 * don't want to have two different ways for enabling them on QEMU command
941 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
942 * enabling several feature bits simultaneously, exposing these bits
943 * individually may just confuse guests.
944 */
945 [FEAT_HYPERV_EAX] = {
946 .type = CPUID_FEATURE_WORD,
947 .feat_names = {
948 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
949 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
950 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
951 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
952 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
953 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
954 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
955 NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 NULL, NULL, NULL, NULL,
960 },
961 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
962 },
963 [FEAT_HYPERV_EBX] = {
964 .type = CPUID_FEATURE_WORD,
965 .feat_names = {
966 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
967 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
968 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
969 NULL /* hv_create_port */, NULL /* hv_connect_port */,
970 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
971 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
972 NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 NULL, NULL, NULL, NULL,
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
977 },
978 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
979 },
980 [FEAT_HYPERV_EDX] = {
981 .type = CPUID_FEATURE_WORD,
982 .feat_names = {
983 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
984 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
985 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
986 NULL, NULL,
987 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL, NULL,
991 NULL, NULL, NULL, NULL,
992 NULL, NULL, NULL, NULL,
993 },
994 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
995 },
996 [FEAT_HV_RECOMM_EAX] = {
997 .type = CPUID_FEATURE_WORD,
998 .feat_names = {
999 NULL /* hv_recommend_pv_as_switch */,
1000 NULL /* hv_recommend_pv_tlbflush_local */,
1001 NULL /* hv_recommend_pv_tlbflush_remote */,
1002 NULL /* hv_recommend_msr_apic_access */,
1003 NULL /* hv_recommend_msr_reset */,
1004 NULL /* hv_recommend_relaxed_timing */,
1005 NULL /* hv_recommend_dma_remapping */,
1006 NULL /* hv_recommend_int_remapping */,
1007 NULL /* hv_recommend_x2apic_msrs */,
1008 NULL /* hv_recommend_autoeoi_deprecation */,
1009 NULL /* hv_recommend_pv_ipi */,
1010 NULL /* hv_recommend_ex_hypercalls */,
1011 NULL /* hv_hypervisor_is_nested */,
1012 NULL /* hv_recommend_int_mbec */,
1013 NULL /* hv_recommend_evmcs */,
1014 NULL,
1015 NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1019 },
1020 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1021 },
1022 [FEAT_HV_NESTED_EAX] = {
1023 .type = CPUID_FEATURE_WORD,
1024 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1025 },
1026 [FEAT_SVM] = {
1027 .type = CPUID_FEATURE_WORD,
1028 .feat_names = {
1029 "npt", "lbrv", "svm-lock", "nrip-save",
1030 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1031 NULL, NULL, "pause-filter", NULL,
1032 "pfthreshold", NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 NULL, NULL, NULL, NULL,
1036 NULL, NULL, NULL, NULL,
1037 },
1038 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1039 .tcg_features = TCG_SVM_FEATURES,
1040 },
1041 [FEAT_7_0_EBX] = {
1042 .type = CPUID_FEATURE_WORD,
1043 .feat_names = {
1044 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1045 "hle", "avx2", NULL, "smep",
1046 "bmi2", "erms", "invpcid", "rtm",
1047 NULL, NULL, "mpx", NULL,
1048 "avx512f", "avx512dq", "rdseed", "adx",
1049 "smap", "avx512ifma", "pcommit", "clflushopt",
1050 "clwb", "intel-pt", "avx512pf", "avx512er",
1051 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1052 },
1053 .cpuid = {
1054 .eax = 7,
1055 .needs_ecx = true, .ecx = 0,
1056 .reg = R_EBX,
1057 },
1058 .tcg_features = TCG_7_0_EBX_FEATURES,
1059 },
1060 [FEAT_7_0_ECX] = {
1061 .type = CPUID_FEATURE_WORD,
1062 .feat_names = {
1063 NULL, "avx512vbmi", "umip", "pku",
1064 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1065 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1066 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1067 "la57", NULL, NULL, NULL,
1068 NULL, NULL, "rdpid", NULL,
1069 NULL, "cldemote", NULL, "movdiri",
1070 "movdir64b", NULL, NULL, NULL,
1071 },
1072 .cpuid = {
1073 .eax = 7,
1074 .needs_ecx = true, .ecx = 0,
1075 .reg = R_ECX,
1076 },
1077 .tcg_features = TCG_7_0_ECX_FEATURES,
1078 },
1079 [FEAT_7_0_EDX] = {
1080 .type = CPUID_FEATURE_WORD,
1081 .feat_names = {
1082 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1083 NULL, NULL, NULL, NULL,
1084 NULL, NULL, "md-clear", NULL,
1085 NULL, NULL, NULL, NULL,
1086 NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL,
1088 NULL, NULL, "spec-ctrl", "stibp",
1089 NULL, "arch-capabilities", "core-capability", "ssbd",
1090 },
1091 .cpuid = {
1092 .eax = 7,
1093 .needs_ecx = true, .ecx = 0,
1094 .reg = R_EDX,
1095 },
1096 .tcg_features = TCG_7_0_EDX_FEATURES,
1097 },
1098 [FEAT_8000_0007_EDX] = {
1099 .type = CPUID_FEATURE_WORD,
1100 .feat_names = {
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 "invtsc", NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL,
1108 NULL, NULL, NULL, NULL,
1109 },
1110 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1111 .tcg_features = TCG_APM_FEATURES,
1112 .unmigratable_flags = CPUID_APM_INVTSC,
1113 },
1114 [FEAT_8000_0008_EBX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 NULL, "wbnoinvd", NULL, NULL,
1120 "ibpb", NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1124 NULL, NULL, NULL, NULL,
1125 },
1126 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1127 .tcg_features = 0,
1128 .unmigratable_flags = 0,
1129 },
1130 [FEAT_XSAVE] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 NULL, NULL, NULL, NULL,
1140 NULL, NULL, NULL, NULL,
1141 },
1142 .cpuid = {
1143 .eax = 0xd,
1144 .needs_ecx = true, .ecx = 1,
1145 .reg = R_EAX,
1146 },
1147 .tcg_features = TCG_XSAVE_FEATURES,
1148 },
1149 [FEAT_6_EAX] = {
1150 .type = CPUID_FEATURE_WORD,
1151 .feat_names = {
1152 NULL, NULL, "arat", NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 NULL, NULL, NULL, NULL,
1158 NULL, NULL, NULL, NULL,
1159 NULL, NULL, NULL, NULL,
1160 },
1161 .cpuid = { .eax = 6, .reg = R_EAX, },
1162 .tcg_features = TCG_6_EAX_FEATURES,
1163 },
1164 [FEAT_XSAVE_COMP_LO] = {
1165 .type = CPUID_FEATURE_WORD,
1166 .cpuid = {
1167 .eax = 0xD,
1168 .needs_ecx = true, .ecx = 0,
1169 .reg = R_EAX,
1170 },
1171 .tcg_features = ~0U,
1172 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1173 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1174 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1175 XSTATE_PKRU_MASK,
1176 },
1177 [FEAT_XSAVE_COMP_HI] = {
1178 .type = CPUID_FEATURE_WORD,
1179 .cpuid = {
1180 .eax = 0xD,
1181 .needs_ecx = true, .ecx = 0,
1182 .reg = R_EDX,
1183 },
1184 .tcg_features = ~0U,
1185 },
1186 /*Below are MSR exposed features*/
1187 [FEAT_ARCH_CAPABILITIES] = {
1188 .type = MSR_FEATURE_WORD,
1189 .feat_names = {
1190 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1191 "ssb-no", "mds-no", NULL, NULL,
1192 NULL, NULL, NULL, NULL,
1193 NULL, NULL, NULL, NULL,
1194 NULL, NULL, NULL, NULL,
1195 NULL, NULL, NULL, NULL,
1196 NULL, NULL, NULL, NULL,
1197 NULL, NULL, NULL, NULL,
1198 },
1199 .msr = {
1200 .index = MSR_IA32_ARCH_CAPABILITIES,
1201 .cpuid_dep = {
1202 FEAT_7_0_EDX,
1203 CPUID_7_0_EDX_ARCH_CAPABILITIES
1204 }
1205 },
1206 },
1207 [FEAT_CORE_CAPABILITY] = {
1208 .type = MSR_FEATURE_WORD,
1209 .feat_names = {
1210 NULL, NULL, NULL, NULL,
1211 NULL, "split-lock-detect", NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1214 NULL, NULL, NULL, NULL,
1215 NULL, NULL, NULL, NULL,
1216 NULL, NULL, NULL, NULL,
1217 NULL, NULL, NULL, NULL,
1218 },
1219 .msr = {
1220 .index = MSR_IA32_CORE_CAPABILITY,
1221 .cpuid_dep = {
1222 FEAT_7_0_EDX,
1223 CPUID_7_0_EDX_CORE_CAPABILITY,
1224 },
1225 },
1226 },
1227 };
1228
1229 typedef struct X86RegisterInfo32 {
1230 /* Name of register */
1231 const char *name;
1232 /* QAPI enum value register */
1233 X86CPURegister32 qapi_enum;
1234 } X86RegisterInfo32;
1235
1236 #define REGISTER(reg) \
1237 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1238 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1239 REGISTER(EAX),
1240 REGISTER(ECX),
1241 REGISTER(EDX),
1242 REGISTER(EBX),
1243 REGISTER(ESP),
1244 REGISTER(EBP),
1245 REGISTER(ESI),
1246 REGISTER(EDI),
1247 };
1248 #undef REGISTER
1249
1250 typedef struct ExtSaveArea {
1251 uint32_t feature, bits;
1252 uint32_t offset, size;
1253 } ExtSaveArea;
1254
1255 static const ExtSaveArea x86_ext_save_areas[] = {
1256 [XSTATE_FP_BIT] = {
1257 /* x87 FP state component is always enabled if XSAVE is supported */
1258 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1259 /* x87 state is in the legacy region of the XSAVE area */
1260 .offset = 0,
1261 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1262 },
1263 [XSTATE_SSE_BIT] = {
1264 /* SSE state component is always enabled if XSAVE is supported */
1265 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1266 /* SSE state is in the legacy region of the XSAVE area */
1267 .offset = 0,
1268 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1269 },
1270 [XSTATE_YMM_BIT] =
1271 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1272 .offset = offsetof(X86XSaveArea, avx_state),
1273 .size = sizeof(XSaveAVX) },
1274 [XSTATE_BNDREGS_BIT] =
1275 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1276 .offset = offsetof(X86XSaveArea, bndreg_state),
1277 .size = sizeof(XSaveBNDREG) },
1278 [XSTATE_BNDCSR_BIT] =
1279 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1280 .offset = offsetof(X86XSaveArea, bndcsr_state),
1281 .size = sizeof(XSaveBNDCSR) },
1282 [XSTATE_OPMASK_BIT] =
1283 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1284 .offset = offsetof(X86XSaveArea, opmask_state),
1285 .size = sizeof(XSaveOpmask) },
1286 [XSTATE_ZMM_Hi256_BIT] =
1287 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1288 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1289 .size = sizeof(XSaveZMM_Hi256) },
1290 [XSTATE_Hi16_ZMM_BIT] =
1291 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1292 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1293 .size = sizeof(XSaveHi16_ZMM) },
1294 [XSTATE_PKRU_BIT] =
1295 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1296 .offset = offsetof(X86XSaveArea, pkru_state),
1297 .size = sizeof(XSavePKRU) },
1298 };
1299
1300 static uint32_t xsave_area_size(uint64_t mask)
1301 {
1302 int i;
1303 uint64_t ret = 0;
1304
1305 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1306 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1307 if ((mask >> i) & 1) {
1308 ret = MAX(ret, esa->offset + esa->size);
1309 }
1310 }
1311 return ret;
1312 }
1313
1314 static inline bool accel_uses_host_cpuid(void)
1315 {
1316 return kvm_enabled() || hvf_enabled();
1317 }
1318
1319 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1320 {
1321 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1322 cpu->env.features[FEAT_XSAVE_COMP_LO];
1323 }
1324
1325 const char *get_register_name_32(unsigned int reg)
1326 {
1327 if (reg >= CPU_NB_REGS32) {
1328 return NULL;
1329 }
1330 return x86_reg_info_32[reg].name;
1331 }
1332
1333 /*
1334 * Returns the set of feature flags that are supported and migratable by
1335 * QEMU, for a given FeatureWord.
1336 */
1337 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1338 {
1339 FeatureWordInfo *wi = &feature_word_info[w];
1340 uint32_t r = 0;
1341 int i;
1342
1343 for (i = 0; i < 32; i++) {
1344 uint32_t f = 1U << i;
1345
1346 /* If the feature name is known, it is implicitly considered migratable,
1347 * unless it is explicitly set in unmigratable_flags */
1348 if ((wi->migratable_flags & f) ||
1349 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1350 r |= f;
1351 }
1352 }
1353 return r;
1354 }
1355
1356 void host_cpuid(uint32_t function, uint32_t count,
1357 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1358 {
1359 uint32_t vec[4];
1360
1361 #ifdef __x86_64__
1362 asm volatile("cpuid"
1363 : "=a"(vec[0]), "=b"(vec[1]),
1364 "=c"(vec[2]), "=d"(vec[3])
1365 : "0"(function), "c"(count) : "cc");
1366 #elif defined(__i386__)
1367 asm volatile("pusha \n\t"
1368 "cpuid \n\t"
1369 "mov %%eax, 0(%2) \n\t"
1370 "mov %%ebx, 4(%2) \n\t"
1371 "mov %%ecx, 8(%2) \n\t"
1372 "mov %%edx, 12(%2) \n\t"
1373 "popa"
1374 : : "a"(function), "c"(count), "S"(vec)
1375 : "memory", "cc");
1376 #else
1377 abort();
1378 #endif
1379
1380 if (eax)
1381 *eax = vec[0];
1382 if (ebx)
1383 *ebx = vec[1];
1384 if (ecx)
1385 *ecx = vec[2];
1386 if (edx)
1387 *edx = vec[3];
1388 }
1389
1390 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1391 {
1392 uint32_t eax, ebx, ecx, edx;
1393
1394 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1395 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1396
1397 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1398 if (family) {
1399 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1400 }
1401 if (model) {
1402 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1403 }
1404 if (stepping) {
1405 *stepping = eax & 0x0F;
1406 }
1407 }
1408
1409 /* CPU class name definitions: */
1410
1411 /* Return type name for a given CPU model name
1412 * Caller is responsible for freeing the returned string.
1413 */
1414 static char *x86_cpu_type_name(const char *model_name)
1415 {
1416 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1417 }
1418
1419 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1420 {
1421 ObjectClass *oc;
1422 char *typename = x86_cpu_type_name(cpu_model);
1423 oc = object_class_by_name(typename);
1424 g_free(typename);
1425 return oc;
1426 }
1427
1428 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1429 {
1430 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1431 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1432 return g_strndup(class_name,
1433 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1434 }
1435
1436 struct X86CPUDefinition {
1437 const char *name;
1438 uint32_t level;
1439 uint32_t xlevel;
1440 /* vendor is zero-terminated, 12 character ASCII string */
1441 char vendor[CPUID_VENDOR_SZ + 1];
1442 int family;
1443 int model;
1444 int stepping;
1445 FeatureWordArray features;
1446 const char *model_id;
1447 CPUCaches *cache_info;
1448 };
1449
1450 static CPUCaches epyc_cache_info = {
1451 .l1d_cache = &(CPUCacheInfo) {
1452 .type = DATA_CACHE,
1453 .level = 1,
1454 .size = 32 * KiB,
1455 .line_size = 64,
1456 .associativity = 8,
1457 .partitions = 1,
1458 .sets = 64,
1459 .lines_per_tag = 1,
1460 .self_init = 1,
1461 .no_invd_sharing = true,
1462 },
1463 .l1i_cache = &(CPUCacheInfo) {
1464 .type = INSTRUCTION_CACHE,
1465 .level = 1,
1466 .size = 64 * KiB,
1467 .line_size = 64,
1468 .associativity = 4,
1469 .partitions = 1,
1470 .sets = 256,
1471 .lines_per_tag = 1,
1472 .self_init = 1,
1473 .no_invd_sharing = true,
1474 },
1475 .l2_cache = &(CPUCacheInfo) {
1476 .type = UNIFIED_CACHE,
1477 .level = 2,
1478 .size = 512 * KiB,
1479 .line_size = 64,
1480 .associativity = 8,
1481 .partitions = 1,
1482 .sets = 1024,
1483 .lines_per_tag = 1,
1484 },
1485 .l3_cache = &(CPUCacheInfo) {
1486 .type = UNIFIED_CACHE,
1487 .level = 3,
1488 .size = 8 * MiB,
1489 .line_size = 64,
1490 .associativity = 16,
1491 .partitions = 1,
1492 .sets = 8192,
1493 .lines_per_tag = 1,
1494 .self_init = true,
1495 .inclusive = true,
1496 .complex_indexing = true,
1497 },
1498 };
1499
1500 static X86CPUDefinition builtin_x86_defs[] = {
1501 {
1502 .name = "qemu64",
1503 .level = 0xd,
1504 .vendor = CPUID_VENDOR_AMD,
1505 .family = 6,
1506 .model = 6,
1507 .stepping = 3,
1508 .features[FEAT_1_EDX] =
1509 PPRO_FEATURES |
1510 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1511 CPUID_PSE36,
1512 .features[FEAT_1_ECX] =
1513 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1514 .features[FEAT_8000_0001_EDX] =
1515 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1516 .features[FEAT_8000_0001_ECX] =
1517 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1518 .xlevel = 0x8000000A,
1519 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1520 },
1521 {
1522 .name = "phenom",
1523 .level = 5,
1524 .vendor = CPUID_VENDOR_AMD,
1525 .family = 16,
1526 .model = 2,
1527 .stepping = 3,
1528 /* Missing: CPUID_HT */
1529 .features[FEAT_1_EDX] =
1530 PPRO_FEATURES |
1531 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1532 CPUID_PSE36 | CPUID_VME,
1533 .features[FEAT_1_ECX] =
1534 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1535 CPUID_EXT_POPCNT,
1536 .features[FEAT_8000_0001_EDX] =
1537 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1538 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1539 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1540 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1541 CPUID_EXT3_CR8LEG,
1542 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1543 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1544 .features[FEAT_8000_0001_ECX] =
1545 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1546 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1547 /* Missing: CPUID_SVM_LBRV */
1548 .features[FEAT_SVM] =
1549 CPUID_SVM_NPT,
1550 .xlevel = 0x8000001A,
1551 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1552 },
1553 {
1554 .name = "core2duo",
1555 .level = 10,
1556 .vendor = CPUID_VENDOR_INTEL,
1557 .family = 6,
1558 .model = 15,
1559 .stepping = 11,
1560 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1561 .features[FEAT_1_EDX] =
1562 PPRO_FEATURES |
1563 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1564 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1565 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1566 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1567 .features[FEAT_1_ECX] =
1568 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1569 CPUID_EXT_CX16,
1570 .features[FEAT_8000_0001_EDX] =
1571 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1572 .features[FEAT_8000_0001_ECX] =
1573 CPUID_EXT3_LAHF_LM,
1574 .xlevel = 0x80000008,
1575 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1576 },
1577 {
1578 .name = "kvm64",
1579 .level = 0xd,
1580 .vendor = CPUID_VENDOR_INTEL,
1581 .family = 15,
1582 .model = 6,
1583 .stepping = 1,
1584 /* Missing: CPUID_HT */
1585 .features[FEAT_1_EDX] =
1586 PPRO_FEATURES | CPUID_VME |
1587 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1588 CPUID_PSE36,
1589 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1590 .features[FEAT_1_ECX] =
1591 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1592 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1593 .features[FEAT_8000_0001_EDX] =
1594 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1595 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1596 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1597 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1598 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1599 .features[FEAT_8000_0001_ECX] =
1600 0,
1601 .xlevel = 0x80000008,
1602 .model_id = "Common KVM processor"
1603 },
1604 {
1605 .name = "qemu32",
1606 .level = 4,
1607 .vendor = CPUID_VENDOR_INTEL,
1608 .family = 6,
1609 .model = 6,
1610 .stepping = 3,
1611 .features[FEAT_1_EDX] =
1612 PPRO_FEATURES,
1613 .features[FEAT_1_ECX] =
1614 CPUID_EXT_SSE3,
1615 .xlevel = 0x80000004,
1616 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1617 },
1618 {
1619 .name = "kvm32",
1620 .level = 5,
1621 .vendor = CPUID_VENDOR_INTEL,
1622 .family = 15,
1623 .model = 6,
1624 .stepping = 1,
1625 .features[FEAT_1_EDX] =
1626 PPRO_FEATURES | CPUID_VME |
1627 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1628 .features[FEAT_1_ECX] =
1629 CPUID_EXT_SSE3,
1630 .features[FEAT_8000_0001_ECX] =
1631 0,
1632 .xlevel = 0x80000008,
1633 .model_id = "Common 32-bit KVM processor"
1634 },
1635 {
1636 .name = "coreduo",
1637 .level = 10,
1638 .vendor = CPUID_VENDOR_INTEL,
1639 .family = 6,
1640 .model = 14,
1641 .stepping = 8,
1642 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1643 .features[FEAT_1_EDX] =
1644 PPRO_FEATURES | CPUID_VME |
1645 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1646 CPUID_SS,
1647 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1648 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1649 .features[FEAT_1_ECX] =
1650 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1651 .features[FEAT_8000_0001_EDX] =
1652 CPUID_EXT2_NX,
1653 .xlevel = 0x80000008,
1654 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1655 },
1656 {
1657 .name = "486",
1658 .level = 1,
1659 .vendor = CPUID_VENDOR_INTEL,
1660 .family = 4,
1661 .model = 8,
1662 .stepping = 0,
1663 .features[FEAT_1_EDX] =
1664 I486_FEATURES,
1665 .xlevel = 0,
1666 .model_id = "",
1667 },
1668 {
1669 .name = "pentium",
1670 .level = 1,
1671 .vendor = CPUID_VENDOR_INTEL,
1672 .family = 5,
1673 .model = 4,
1674 .stepping = 3,
1675 .features[FEAT_1_EDX] =
1676 PENTIUM_FEATURES,
1677 .xlevel = 0,
1678 .model_id = "",
1679 },
1680 {
1681 .name = "pentium2",
1682 .level = 2,
1683 .vendor = CPUID_VENDOR_INTEL,
1684 .family = 6,
1685 .model = 5,
1686 .stepping = 2,
1687 .features[FEAT_1_EDX] =
1688 PENTIUM2_FEATURES,
1689 .xlevel = 0,
1690 .model_id = "",
1691 },
1692 {
1693 .name = "pentium3",
1694 .level = 3,
1695 .vendor = CPUID_VENDOR_INTEL,
1696 .family = 6,
1697 .model = 7,
1698 .stepping = 3,
1699 .features[FEAT_1_EDX] =
1700 PENTIUM3_FEATURES,
1701 .xlevel = 0,
1702 .model_id = "",
1703 },
1704 {
1705 .name = "athlon",
1706 .level = 2,
1707 .vendor = CPUID_VENDOR_AMD,
1708 .family = 6,
1709 .model = 2,
1710 .stepping = 3,
1711 .features[FEAT_1_EDX] =
1712 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1713 CPUID_MCA,
1714 .features[FEAT_8000_0001_EDX] =
1715 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1716 .xlevel = 0x80000008,
1717 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1718 },
1719 {
1720 .name = "n270",
1721 .level = 10,
1722 .vendor = CPUID_VENDOR_INTEL,
1723 .family = 6,
1724 .model = 28,
1725 .stepping = 2,
1726 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1727 .features[FEAT_1_EDX] =
1728 PPRO_FEATURES |
1729 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1730 CPUID_ACPI | CPUID_SS,
1731 /* Some CPUs got no CPUID_SEP */
1732 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1733 * CPUID_EXT_XTPR */
1734 .features[FEAT_1_ECX] =
1735 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1736 CPUID_EXT_MOVBE,
1737 .features[FEAT_8000_0001_EDX] =
1738 CPUID_EXT2_NX,
1739 .features[FEAT_8000_0001_ECX] =
1740 CPUID_EXT3_LAHF_LM,
1741 .xlevel = 0x80000008,
1742 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1743 },
1744 {
1745 .name = "Conroe",
1746 .level = 10,
1747 .vendor = CPUID_VENDOR_INTEL,
1748 .family = 6,
1749 .model = 15,
1750 .stepping = 3,
1751 .features[FEAT_1_EDX] =
1752 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1753 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1754 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1755 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1756 CPUID_DE | CPUID_FP87,
1757 .features[FEAT_1_ECX] =
1758 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1759 .features[FEAT_8000_0001_EDX] =
1760 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1761 .features[FEAT_8000_0001_ECX] =
1762 CPUID_EXT3_LAHF_LM,
1763 .xlevel = 0x80000008,
1764 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1765 },
1766 {
1767 .name = "Penryn",
1768 .level = 10,
1769 .vendor = CPUID_VENDOR_INTEL,
1770 .family = 6,
1771 .model = 23,
1772 .stepping = 3,
1773 .features[FEAT_1_EDX] =
1774 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1775 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1776 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1777 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1778 CPUID_DE | CPUID_FP87,
1779 .features[FEAT_1_ECX] =
1780 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1781 CPUID_EXT_SSE3,
1782 .features[FEAT_8000_0001_EDX] =
1783 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1784 .features[FEAT_8000_0001_ECX] =
1785 CPUID_EXT3_LAHF_LM,
1786 .xlevel = 0x80000008,
1787 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1788 },
1789 {
1790 .name = "Nehalem",
1791 .level = 11,
1792 .vendor = CPUID_VENDOR_INTEL,
1793 .family = 6,
1794 .model = 26,
1795 .stepping = 3,
1796 .features[FEAT_1_EDX] =
1797 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1798 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1799 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1800 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1801 CPUID_DE | CPUID_FP87,
1802 .features[FEAT_1_ECX] =
1803 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1804 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1805 .features[FEAT_8000_0001_EDX] =
1806 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1807 .features[FEAT_8000_0001_ECX] =
1808 CPUID_EXT3_LAHF_LM,
1809 .xlevel = 0x80000008,
1810 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1811 },
1812 {
1813 .name = "Nehalem-IBRS",
1814 .level = 11,
1815 .vendor = CPUID_VENDOR_INTEL,
1816 .family = 6,
1817 .model = 26,
1818 .stepping = 3,
1819 .features[FEAT_1_EDX] =
1820 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1821 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1822 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1823 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1824 CPUID_DE | CPUID_FP87,
1825 .features[FEAT_1_ECX] =
1826 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1827 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1828 .features[FEAT_7_0_EDX] =
1829 CPUID_7_0_EDX_SPEC_CTRL,
1830 .features[FEAT_8000_0001_EDX] =
1831 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1832 .features[FEAT_8000_0001_ECX] =
1833 CPUID_EXT3_LAHF_LM,
1834 .xlevel = 0x80000008,
1835 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1836 },
1837 {
1838 .name = "Westmere",
1839 .level = 11,
1840 .vendor = CPUID_VENDOR_INTEL,
1841 .family = 6,
1842 .model = 44,
1843 .stepping = 1,
1844 .features[FEAT_1_EDX] =
1845 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1846 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1847 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1848 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1849 CPUID_DE | CPUID_FP87,
1850 .features[FEAT_1_ECX] =
1851 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1852 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1853 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1854 .features[FEAT_8000_0001_EDX] =
1855 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1856 .features[FEAT_8000_0001_ECX] =
1857 CPUID_EXT3_LAHF_LM,
1858 .features[FEAT_6_EAX] =
1859 CPUID_6_EAX_ARAT,
1860 .xlevel = 0x80000008,
1861 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1862 },
1863 {
1864 .name = "Westmere-IBRS",
1865 .level = 11,
1866 .vendor = CPUID_VENDOR_INTEL,
1867 .family = 6,
1868 .model = 44,
1869 .stepping = 1,
1870 .features[FEAT_1_EDX] =
1871 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1872 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1873 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1874 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1875 CPUID_DE | CPUID_FP87,
1876 .features[FEAT_1_ECX] =
1877 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1878 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1879 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1880 .features[FEAT_8000_0001_EDX] =
1881 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1882 .features[FEAT_8000_0001_ECX] =
1883 CPUID_EXT3_LAHF_LM,
1884 .features[FEAT_7_0_EDX] =
1885 CPUID_7_0_EDX_SPEC_CTRL,
1886 .features[FEAT_6_EAX] =
1887 CPUID_6_EAX_ARAT,
1888 .xlevel = 0x80000008,
1889 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1890 },
1891 {
1892 .name = "SandyBridge",
1893 .level = 0xd,
1894 .vendor = CPUID_VENDOR_INTEL,
1895 .family = 6,
1896 .model = 42,
1897 .stepping = 1,
1898 .features[FEAT_1_EDX] =
1899 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1903 CPUID_DE | CPUID_FP87,
1904 .features[FEAT_1_ECX] =
1905 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1906 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1907 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1908 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1909 CPUID_EXT_SSE3,
1910 .features[FEAT_8000_0001_EDX] =
1911 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1912 CPUID_EXT2_SYSCALL,
1913 .features[FEAT_8000_0001_ECX] =
1914 CPUID_EXT3_LAHF_LM,
1915 .features[FEAT_XSAVE] =
1916 CPUID_XSAVE_XSAVEOPT,
1917 .features[FEAT_6_EAX] =
1918 CPUID_6_EAX_ARAT,
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1921 },
1922 {
1923 .name = "SandyBridge-IBRS",
1924 .level = 0xd,
1925 .vendor = CPUID_VENDOR_INTEL,
1926 .family = 6,
1927 .model = 42,
1928 .stepping = 1,
1929 .features[FEAT_1_EDX] =
1930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1934 CPUID_DE | CPUID_FP87,
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1937 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1938 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1939 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1940 CPUID_EXT_SSE3,
1941 .features[FEAT_8000_0001_EDX] =
1942 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1943 CPUID_EXT2_SYSCALL,
1944 .features[FEAT_8000_0001_ECX] =
1945 CPUID_EXT3_LAHF_LM,
1946 .features[FEAT_7_0_EDX] =
1947 CPUID_7_0_EDX_SPEC_CTRL,
1948 .features[FEAT_XSAVE] =
1949 CPUID_XSAVE_XSAVEOPT,
1950 .features[FEAT_6_EAX] =
1951 CPUID_6_EAX_ARAT,
1952 .xlevel = 0x80000008,
1953 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1954 },
1955 {
1956 .name = "IvyBridge",
1957 .level = 0xd,
1958 .vendor = CPUID_VENDOR_INTEL,
1959 .family = 6,
1960 .model = 58,
1961 .stepping = 9,
1962 .features[FEAT_1_EDX] =
1963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1967 CPUID_DE | CPUID_FP87,
1968 .features[FEAT_1_ECX] =
1969 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1970 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1971 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1972 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1973 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1974 .features[FEAT_7_0_EBX] =
1975 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1976 CPUID_7_0_EBX_ERMS,
1977 .features[FEAT_8000_0001_EDX] =
1978 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1979 CPUID_EXT2_SYSCALL,
1980 .features[FEAT_8000_0001_ECX] =
1981 CPUID_EXT3_LAHF_LM,
1982 .features[FEAT_XSAVE] =
1983 CPUID_XSAVE_XSAVEOPT,
1984 .features[FEAT_6_EAX] =
1985 CPUID_6_EAX_ARAT,
1986 .xlevel = 0x80000008,
1987 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1988 },
1989 {
1990 .name = "IvyBridge-IBRS",
1991 .level = 0xd,
1992 .vendor = CPUID_VENDOR_INTEL,
1993 .family = 6,
1994 .model = 58,
1995 .stepping = 9,
1996 .features[FEAT_1_EDX] =
1997 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1998 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1999 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2000 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2001 CPUID_DE | CPUID_FP87,
2002 .features[FEAT_1_ECX] =
2003 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2004 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2005 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2006 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2007 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2008 .features[FEAT_7_0_EBX] =
2009 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2010 CPUID_7_0_EBX_ERMS,
2011 .features[FEAT_8000_0001_EDX] =
2012 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2013 CPUID_EXT2_SYSCALL,
2014 .features[FEAT_8000_0001_ECX] =
2015 CPUID_EXT3_LAHF_LM,
2016 .features[FEAT_7_0_EDX] =
2017 CPUID_7_0_EDX_SPEC_CTRL,
2018 .features[FEAT_XSAVE] =
2019 CPUID_XSAVE_XSAVEOPT,
2020 .features[FEAT_6_EAX] =
2021 CPUID_6_EAX_ARAT,
2022 .xlevel = 0x80000008,
2023 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
2024 },
2025 {
2026 .name = "Haswell-noTSX",
2027 .level = 0xd,
2028 .vendor = CPUID_VENDOR_INTEL,
2029 .family = 6,
2030 .model = 60,
2031 .stepping = 1,
2032 .features[FEAT_1_EDX] =
2033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2037 CPUID_DE | CPUID_FP87,
2038 .features[FEAT_1_ECX] =
2039 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2040 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2041 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2042 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2043 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2044 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2045 .features[FEAT_8000_0001_EDX] =
2046 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2047 CPUID_EXT2_SYSCALL,
2048 .features[FEAT_8000_0001_ECX] =
2049 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2050 .features[FEAT_7_0_EBX] =
2051 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2052 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2053 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2054 .features[FEAT_XSAVE] =
2055 CPUID_XSAVE_XSAVEOPT,
2056 .features[FEAT_6_EAX] =
2057 CPUID_6_EAX_ARAT,
2058 .xlevel = 0x80000008,
2059 .model_id = "Intel Core Processor (Haswell, no TSX)",
2060 },
2061 {
2062 .name = "Haswell-noTSX-IBRS",
2063 .level = 0xd,
2064 .vendor = CPUID_VENDOR_INTEL,
2065 .family = 6,
2066 .model = 60,
2067 .stepping = 1,
2068 .features[FEAT_1_EDX] =
2069 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2070 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2071 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2072 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2073 CPUID_DE | CPUID_FP87,
2074 .features[FEAT_1_ECX] =
2075 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2076 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2077 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2078 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2079 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2080 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2081 .features[FEAT_8000_0001_EDX] =
2082 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2083 CPUID_EXT2_SYSCALL,
2084 .features[FEAT_8000_0001_ECX] =
2085 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2086 .features[FEAT_7_0_EDX] =
2087 CPUID_7_0_EDX_SPEC_CTRL,
2088 .features[FEAT_7_0_EBX] =
2089 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2090 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2091 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2092 .features[FEAT_XSAVE] =
2093 CPUID_XSAVE_XSAVEOPT,
2094 .features[FEAT_6_EAX] =
2095 CPUID_6_EAX_ARAT,
2096 .xlevel = 0x80000008,
2097 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2098 },
2099 {
2100 .name = "Haswell",
2101 .level = 0xd,
2102 .vendor = CPUID_VENDOR_INTEL,
2103 .family = 6,
2104 .model = 60,
2105 .stepping = 4,
2106 .features[FEAT_1_EDX] =
2107 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2108 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2109 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2110 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2111 CPUID_DE | CPUID_FP87,
2112 .features[FEAT_1_ECX] =
2113 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2114 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2115 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2116 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2117 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2118 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2119 .features[FEAT_8000_0001_EDX] =
2120 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2121 CPUID_EXT2_SYSCALL,
2122 .features[FEAT_8000_0001_ECX] =
2123 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2124 .features[FEAT_7_0_EBX] =
2125 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2126 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2127 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2128 CPUID_7_0_EBX_RTM,
2129 .features[FEAT_XSAVE] =
2130 CPUID_XSAVE_XSAVEOPT,
2131 .features[FEAT_6_EAX] =
2132 CPUID_6_EAX_ARAT,
2133 .xlevel = 0x80000008,
2134 .model_id = "Intel Core Processor (Haswell)",
2135 },
2136 {
2137 .name = "Haswell-IBRS",
2138 .level = 0xd,
2139 .vendor = CPUID_VENDOR_INTEL,
2140 .family = 6,
2141 .model = 60,
2142 .stepping = 4,
2143 .features[FEAT_1_EDX] =
2144 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2145 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2146 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2147 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2148 CPUID_DE | CPUID_FP87,
2149 .features[FEAT_1_ECX] =
2150 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2151 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2152 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2153 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2154 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2155 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2156 .features[FEAT_8000_0001_EDX] =
2157 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2158 CPUID_EXT2_SYSCALL,
2159 .features[FEAT_8000_0001_ECX] =
2160 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2161 .features[FEAT_7_0_EDX] =
2162 CPUID_7_0_EDX_SPEC_CTRL,
2163 .features[FEAT_7_0_EBX] =
2164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2165 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2167 CPUID_7_0_EBX_RTM,
2168 .features[FEAT_XSAVE] =
2169 CPUID_XSAVE_XSAVEOPT,
2170 .features[FEAT_6_EAX] =
2171 CPUID_6_EAX_ARAT,
2172 .xlevel = 0x80000008,
2173 .model_id = "Intel Core Processor (Haswell, IBRS)",
2174 },
2175 {
2176 .name = "Broadwell-noTSX",
2177 .level = 0xd,
2178 .vendor = CPUID_VENDOR_INTEL,
2179 .family = 6,
2180 .model = 61,
2181 .stepping = 2,
2182 .features[FEAT_1_EDX] =
2183 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2184 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2185 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2186 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2187 CPUID_DE | CPUID_FP87,
2188 .features[FEAT_1_ECX] =
2189 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2190 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2191 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2192 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2193 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2194 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2195 .features[FEAT_8000_0001_EDX] =
2196 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2197 CPUID_EXT2_SYSCALL,
2198 .features[FEAT_8000_0001_ECX] =
2199 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2200 .features[FEAT_7_0_EBX] =
2201 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2202 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2203 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2204 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2205 CPUID_7_0_EBX_SMAP,
2206 .features[FEAT_XSAVE] =
2207 CPUID_XSAVE_XSAVEOPT,
2208 .features[FEAT_6_EAX] =
2209 CPUID_6_EAX_ARAT,
2210 .xlevel = 0x80000008,
2211 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2212 },
2213 {
2214 .name = "Broadwell-noTSX-IBRS",
2215 .level = 0xd,
2216 .vendor = CPUID_VENDOR_INTEL,
2217 .family = 6,
2218 .model = 61,
2219 .stepping = 2,
2220 .features[FEAT_1_EDX] =
2221 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2222 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2223 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2224 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2225 CPUID_DE | CPUID_FP87,
2226 .features[FEAT_1_ECX] =
2227 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2228 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2229 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2230 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2231 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2232 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2233 .features[FEAT_8000_0001_EDX] =
2234 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2235 CPUID_EXT2_SYSCALL,
2236 .features[FEAT_8000_0001_ECX] =
2237 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2238 .features[FEAT_7_0_EDX] =
2239 CPUID_7_0_EDX_SPEC_CTRL,
2240 .features[FEAT_7_0_EBX] =
2241 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2242 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2243 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2244 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2245 CPUID_7_0_EBX_SMAP,
2246 .features[FEAT_XSAVE] =
2247 CPUID_XSAVE_XSAVEOPT,
2248 .features[FEAT_6_EAX] =
2249 CPUID_6_EAX_ARAT,
2250 .xlevel = 0x80000008,
2251 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2252 },
2253 {
2254 .name = "Broadwell",
2255 .level = 0xd,
2256 .vendor = CPUID_VENDOR_INTEL,
2257 .family = 6,
2258 .model = 61,
2259 .stepping = 2,
2260 .features[FEAT_1_EDX] =
2261 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2262 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2263 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2264 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2265 CPUID_DE | CPUID_FP87,
2266 .features[FEAT_1_ECX] =
2267 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2268 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2269 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2270 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2271 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2272 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2273 .features[FEAT_8000_0001_EDX] =
2274 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2275 CPUID_EXT2_SYSCALL,
2276 .features[FEAT_8000_0001_ECX] =
2277 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2278 .features[FEAT_7_0_EBX] =
2279 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2280 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2281 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2282 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2283 CPUID_7_0_EBX_SMAP,
2284 .features[FEAT_XSAVE] =
2285 CPUID_XSAVE_XSAVEOPT,
2286 .features[FEAT_6_EAX] =
2287 CPUID_6_EAX_ARAT,
2288 .xlevel = 0x80000008,
2289 .model_id = "Intel Core Processor (Broadwell)",
2290 },
2291 {
2292 .name = "Broadwell-IBRS",
2293 .level = 0xd,
2294 .vendor = CPUID_VENDOR_INTEL,
2295 .family = 6,
2296 .model = 61,
2297 .stepping = 2,
2298 .features[FEAT_1_EDX] =
2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2303 CPUID_DE | CPUID_FP87,
2304 .features[FEAT_1_ECX] =
2305 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2306 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2307 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2308 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2309 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2310 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2311 .features[FEAT_8000_0001_EDX] =
2312 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2313 CPUID_EXT2_SYSCALL,
2314 .features[FEAT_8000_0001_ECX] =
2315 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2316 .features[FEAT_7_0_EDX] =
2317 CPUID_7_0_EDX_SPEC_CTRL,
2318 .features[FEAT_7_0_EBX] =
2319 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2320 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2321 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2322 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2323 CPUID_7_0_EBX_SMAP,
2324 .features[FEAT_XSAVE] =
2325 CPUID_XSAVE_XSAVEOPT,
2326 .features[FEAT_6_EAX] =
2327 CPUID_6_EAX_ARAT,
2328 .xlevel = 0x80000008,
2329 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2330 },
2331 {
2332 .name = "Skylake-Client",
2333 .level = 0xd,
2334 .vendor = CPUID_VENDOR_INTEL,
2335 .family = 6,
2336 .model = 94,
2337 .stepping = 3,
2338 .features[FEAT_1_EDX] =
2339 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2340 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2341 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2342 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2343 CPUID_DE | CPUID_FP87,
2344 .features[FEAT_1_ECX] =
2345 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2346 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2347 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2348 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2349 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2350 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2351 .features[FEAT_8000_0001_EDX] =
2352 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2353 CPUID_EXT2_SYSCALL,
2354 .features[FEAT_8000_0001_ECX] =
2355 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2356 .features[FEAT_7_0_EBX] =
2357 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2358 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2359 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2360 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2361 CPUID_7_0_EBX_SMAP,
2362 /* Missing: XSAVES (not supported by some Linux versions,
2363 * including v4.1 to v4.12).
2364 * KVM doesn't yet expose any XSAVES state save component,
2365 * and the only one defined in Skylake (processor tracing)
2366 * probably will block migration anyway.
2367 */
2368 .features[FEAT_XSAVE] =
2369 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2370 CPUID_XSAVE_XGETBV1,
2371 .features[FEAT_6_EAX] =
2372 CPUID_6_EAX_ARAT,
2373 .xlevel = 0x80000008,
2374 .model_id = "Intel Core Processor (Skylake)",
2375 },
2376 {
2377 .name = "Skylake-Client-IBRS",
2378 .level = 0xd,
2379 .vendor = CPUID_VENDOR_INTEL,
2380 .family = 6,
2381 .model = 94,
2382 .stepping = 3,
2383 .features[FEAT_1_EDX] =
2384 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2385 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2386 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2387 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2388 CPUID_DE | CPUID_FP87,
2389 .features[FEAT_1_ECX] =
2390 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2391 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2392 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2393 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2394 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2395 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2396 .features[FEAT_8000_0001_EDX] =
2397 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2398 CPUID_EXT2_SYSCALL,
2399 .features[FEAT_8000_0001_ECX] =
2400 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2401 .features[FEAT_7_0_EDX] =
2402 CPUID_7_0_EDX_SPEC_CTRL,
2403 .features[FEAT_7_0_EBX] =
2404 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2405 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2406 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2407 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2408 CPUID_7_0_EBX_SMAP,
2409 /* Missing: XSAVES (not supported by some Linux versions,
2410 * including v4.1 to v4.12).
2411 * KVM doesn't yet expose any XSAVES state save component,
2412 * and the only one defined in Skylake (processor tracing)
2413 * probably will block migration anyway.
2414 */
2415 .features[FEAT_XSAVE] =
2416 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2417 CPUID_XSAVE_XGETBV1,
2418 .features[FEAT_6_EAX] =
2419 CPUID_6_EAX_ARAT,
2420 .xlevel = 0x80000008,
2421 .model_id = "Intel Core Processor (Skylake, IBRS)",
2422 },
2423 {
2424 .name = "Skylake-Server",
2425 .level = 0xd,
2426 .vendor = CPUID_VENDOR_INTEL,
2427 .family = 6,
2428 .model = 85,
2429 .stepping = 4,
2430 .features[FEAT_1_EDX] =
2431 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2432 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2433 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2434 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2435 CPUID_DE | CPUID_FP87,
2436 .features[FEAT_1_ECX] =
2437 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2438 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2439 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2440 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2441 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2442 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2443 .features[FEAT_8000_0001_EDX] =
2444 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2445 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2446 .features[FEAT_8000_0001_ECX] =
2447 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2448 .features[FEAT_7_0_EBX] =
2449 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2450 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2451 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2452 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2453 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2454 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2455 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2456 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2457 .features[FEAT_7_0_ECX] =
2458 CPUID_7_0_ECX_PKU,
2459 /* Missing: XSAVES (not supported by some Linux versions,
2460 * including v4.1 to v4.12).
2461 * KVM doesn't yet expose any XSAVES state save component,
2462 * and the only one defined in Skylake (processor tracing)
2463 * probably will block migration anyway.
2464 */
2465 .features[FEAT_XSAVE] =
2466 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2467 CPUID_XSAVE_XGETBV1,
2468 .features[FEAT_6_EAX] =
2469 CPUID_6_EAX_ARAT,
2470 .xlevel = 0x80000008,
2471 .model_id = "Intel Xeon Processor (Skylake)",
2472 },
2473 {
2474 .name = "Skylake-Server-IBRS",
2475 .level = 0xd,
2476 .vendor = CPUID_VENDOR_INTEL,
2477 .family = 6,
2478 .model = 85,
2479 .stepping = 4,
2480 .features[FEAT_1_EDX] =
2481 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2482 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2483 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2484 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2485 CPUID_DE | CPUID_FP87,
2486 .features[FEAT_1_ECX] =
2487 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2488 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2489 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2490 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2491 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2492 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2493 .features[FEAT_8000_0001_EDX] =
2494 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2495 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2496 .features[FEAT_8000_0001_ECX] =
2497 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2498 .features[FEAT_7_0_EDX] =
2499 CPUID_7_0_EDX_SPEC_CTRL,
2500 .features[FEAT_7_0_EBX] =
2501 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2502 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2503 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2504 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2505 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2506 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2507 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2508 CPUID_7_0_EBX_AVX512VL,
2509 .features[FEAT_7_0_ECX] =
2510 CPUID_7_0_ECX_PKU,
2511 /* Missing: XSAVES (not supported by some Linux versions,
2512 * including v4.1 to v4.12).
2513 * KVM doesn't yet expose any XSAVES state save component,
2514 * and the only one defined in Skylake (processor tracing)
2515 * probably will block migration anyway.
2516 */
2517 .features[FEAT_XSAVE] =
2518 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2519 CPUID_XSAVE_XGETBV1,
2520 .features[FEAT_6_EAX] =
2521 CPUID_6_EAX_ARAT,
2522 .xlevel = 0x80000008,
2523 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2524 },
2525 {
2526 .name = "Cascadelake-Server",
2527 .level = 0xd,
2528 .vendor = CPUID_VENDOR_INTEL,
2529 .family = 6,
2530 .model = 85,
2531 .stepping = 6,
2532 .features[FEAT_1_EDX] =
2533 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2534 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2535 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2536 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2537 CPUID_DE | CPUID_FP87,
2538 .features[FEAT_1_ECX] =
2539 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2540 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2541 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2542 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2543 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2544 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2545 .features[FEAT_8000_0001_EDX] =
2546 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2547 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2548 .features[FEAT_8000_0001_ECX] =
2549 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2550 .features[FEAT_7_0_EBX] =
2551 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2552 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2553 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2554 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2555 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2556 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2557 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2558 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2559 .features[FEAT_7_0_ECX] =
2560 CPUID_7_0_ECX_PKU |
2561 CPUID_7_0_ECX_AVX512VNNI,
2562 .features[FEAT_7_0_EDX] =
2563 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2564 /* Missing: XSAVES (not supported by some Linux versions,
2565 * including v4.1 to v4.12).
2566 * KVM doesn't yet expose any XSAVES state save component,
2567 * and the only one defined in Skylake (processor tracing)
2568 * probably will block migration anyway.
2569 */
2570 .features[FEAT_XSAVE] =
2571 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2572 CPUID_XSAVE_XGETBV1,
2573 .features[FEAT_6_EAX] =
2574 CPUID_6_EAX_ARAT,
2575 .xlevel = 0x80000008,
2576 .model_id = "Intel Xeon Processor (Cascadelake)",
2577 },
2578 {
2579 .name = "Icelake-Client",
2580 .level = 0xd,
2581 .vendor = CPUID_VENDOR_INTEL,
2582 .family = 6,
2583 .model = 126,
2584 .stepping = 0,
2585 .features[FEAT_1_EDX] =
2586 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2587 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2588 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2589 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2590 CPUID_DE | CPUID_FP87,
2591 .features[FEAT_1_ECX] =
2592 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2593 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2594 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2595 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2596 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2597 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2598 .features[FEAT_8000_0001_EDX] =
2599 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2600 CPUID_EXT2_SYSCALL,
2601 .features[FEAT_8000_0001_ECX] =
2602 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2603 .features[FEAT_8000_0008_EBX] =
2604 CPUID_8000_0008_EBX_WBNOINVD,
2605 .features[FEAT_7_0_EBX] =
2606 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2607 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2608 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2609 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2610 CPUID_7_0_EBX_SMAP,
2611 .features[FEAT_7_0_ECX] =
2612 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2613 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2614 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2615 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2616 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2617 .features[FEAT_7_0_EDX] =
2618 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2619 /* Missing: XSAVES (not supported by some Linux versions,
2620 * including v4.1 to v4.12).
2621 * KVM doesn't yet expose any XSAVES state save component,
2622 * and the only one defined in Skylake (processor tracing)
2623 * probably will block migration anyway.
2624 */
2625 .features[FEAT_XSAVE] =
2626 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2627 CPUID_XSAVE_XGETBV1,
2628 .features[FEAT_6_EAX] =
2629 CPUID_6_EAX_ARAT,
2630 .xlevel = 0x80000008,
2631 .model_id = "Intel Core Processor (Icelake)",
2632 },
2633 {
2634 .name = "Icelake-Server",
2635 .level = 0xd,
2636 .vendor = CPUID_VENDOR_INTEL,
2637 .family = 6,
2638 .model = 134,
2639 .stepping = 0,
2640 .features[FEAT_1_EDX] =
2641 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2642 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2643 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2644 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2645 CPUID_DE | CPUID_FP87,
2646 .features[FEAT_1_ECX] =
2647 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2648 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2649 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2650 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2651 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2652 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2653 .features[FEAT_8000_0001_EDX] =
2654 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2655 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2656 .features[FEAT_8000_0001_ECX] =
2657 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2658 .features[FEAT_8000_0008_EBX] =
2659 CPUID_8000_0008_EBX_WBNOINVD,
2660 .features[FEAT_7_0_EBX] =
2661 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2662 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2663 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2664 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2665 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2666 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2667 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2668 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2669 .features[FEAT_7_0_ECX] =
2670 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2671 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2672 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2673 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2674 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2675 .features[FEAT_7_0_EDX] =
2676 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2677 /* Missing: XSAVES (not supported by some Linux versions,
2678 * including v4.1 to v4.12).
2679 * KVM doesn't yet expose any XSAVES state save component,
2680 * and the only one defined in Skylake (processor tracing)
2681 * probably will block migration anyway.
2682 */
2683 .features[FEAT_XSAVE] =
2684 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2685 CPUID_XSAVE_XGETBV1,
2686 .features[FEAT_6_EAX] =
2687 CPUID_6_EAX_ARAT,
2688 .xlevel = 0x80000008,
2689 .model_id = "Intel Xeon Processor (Icelake)",
2690 },
2691 {
2692 .name = "SnowRidge-Server",
2693 .level = 27,
2694 .vendor = CPUID_VENDOR_INTEL,
2695 .family = 6,
2696 .model = 134,
2697 .stepping = 1,
2698 .features[FEAT_1_EDX] =
2699 /* missing: CPUID_PN CPUID_IA64 */
2700 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2701 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
2702 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
2703 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
2704 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
2705 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
2706 CPUID_MMX |
2707 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
2708 .features[FEAT_1_ECX] =
2709 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
2710 CPUID_EXT_VMX |
2711 CPUID_EXT_SSSE3 |
2712 CPUID_EXT_CX16 |
2713 CPUID_EXT_SSE41 |
2714 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
2715 CPUID_EXT_POPCNT |
2716 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
2717 CPUID_EXT_RDRAND,
2718 .features[FEAT_8000_0001_EDX] =
2719 CPUID_EXT2_SYSCALL |
2720 CPUID_EXT2_NX |
2721 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2722 CPUID_EXT2_LM,
2723 .features[FEAT_8000_0001_ECX] =
2724 CPUID_EXT3_LAHF_LM |
2725 CPUID_EXT3_3DNOWPREFETCH,
2726 .features[FEAT_7_0_EBX] =
2727 CPUID_7_0_EBX_FSGSBASE |
2728 CPUID_7_0_EBX_SMEP |
2729 CPUID_7_0_EBX_ERMS |
2730 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
2731 CPUID_7_0_EBX_RDSEED |
2732 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2733 CPUID_7_0_EBX_CLWB |
2734 CPUID_7_0_EBX_SHA_NI,
2735 .features[FEAT_7_0_ECX] =
2736 CPUID_7_0_ECX_UMIP |
2737 /* missing bit 5 */
2738 CPUID_7_0_ECX_GFNI |
2739 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
2740 CPUID_7_0_ECX_MOVDIR64B,
2741 .features[FEAT_7_0_EDX] =
2742 CPUID_7_0_EDX_SPEC_CTRL |
2743 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
2744 CPUID_7_0_EDX_CORE_CAPABILITY,
2745 .features[FEAT_CORE_CAPABILITY] =
2746 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
2747 /*
2748 * Missing: XSAVES (not supported by some Linux versions,
2749 * including v4.1 to v4.12).
2750 * KVM doesn't yet expose any XSAVES state save component,
2751 * and the only one defined in Skylake (processor tracing)
2752 * probably will block migration anyway.
2753 */
2754 .features[FEAT_XSAVE] =
2755 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2756 CPUID_XSAVE_XGETBV1,
2757 .features[FEAT_6_EAX] =
2758 CPUID_6_EAX_ARAT,
2759 .xlevel = 0x80000008,
2760 .model_id = "Intel Atom Processor (SnowRidge)",
2761 },
2762 {
2763 .name = "KnightsMill",
2764 .level = 0xd,
2765 .vendor = CPUID_VENDOR_INTEL,
2766 .family = 6,
2767 .model = 133,
2768 .stepping = 0,
2769 .features[FEAT_1_EDX] =
2770 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2771 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2772 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2773 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2774 CPUID_PSE | CPUID_DE | CPUID_FP87,
2775 .features[FEAT_1_ECX] =
2776 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2777 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2778 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2779 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2780 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2781 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2782 .features[FEAT_8000_0001_EDX] =
2783 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2784 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2785 .features[FEAT_8000_0001_ECX] =
2786 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2787 .features[FEAT_7_0_EBX] =
2788 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2789 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2790 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2791 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2792 CPUID_7_0_EBX_AVX512ER,
2793 .features[FEAT_7_0_ECX] =
2794 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2795 .features[FEAT_7_0_EDX] =
2796 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2797 .features[FEAT_XSAVE] =
2798 CPUID_XSAVE_XSAVEOPT,
2799 .features[FEAT_6_EAX] =
2800 CPUID_6_EAX_ARAT,
2801 .xlevel = 0x80000008,
2802 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2803 },
2804 {
2805 .name = "Opteron_G1",
2806 .level = 5,
2807 .vendor = CPUID_VENDOR_AMD,
2808 .family = 15,
2809 .model = 6,
2810 .stepping = 1,
2811 .features[FEAT_1_EDX] =
2812 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2813 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2814 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2815 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2816 CPUID_DE | CPUID_FP87,
2817 .features[FEAT_1_ECX] =
2818 CPUID_EXT_SSE3,
2819 .features[FEAT_8000_0001_EDX] =
2820 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2821 .xlevel = 0x80000008,
2822 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2823 },
2824 {
2825 .name = "Opteron_G2",
2826 .level = 5,
2827 .vendor = CPUID_VENDOR_AMD,
2828 .family = 15,
2829 .model = 6,
2830 .stepping = 1,
2831 .features[FEAT_1_EDX] =
2832 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2833 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2834 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2835 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2836 CPUID_DE | CPUID_FP87,
2837 .features[FEAT_1_ECX] =
2838 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2839 .features[FEAT_8000_0001_EDX] =
2840 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2841 .features[FEAT_8000_0001_ECX] =
2842 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2843 .xlevel = 0x80000008,
2844 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2845 },
2846 {
2847 .name = "Opteron_G3",
2848 .level = 5,
2849 .vendor = CPUID_VENDOR_AMD,
2850 .family = 16,
2851 .model = 2,
2852 .stepping = 3,
2853 .features[FEAT_1_EDX] =
2854 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2855 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2856 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2857 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2858 CPUID_DE | CPUID_FP87,
2859 .features[FEAT_1_ECX] =
2860 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2861 CPUID_EXT_SSE3,
2862 .features[FEAT_8000_0001_EDX] =
2863 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2864 CPUID_EXT2_RDTSCP,
2865 .features[FEAT_8000_0001_ECX] =
2866 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2867 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2868 .xlevel = 0x80000008,
2869 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2870 },
2871 {
2872 .name = "Opteron_G4",
2873 .level = 0xd,
2874 .vendor = CPUID_VENDOR_AMD,
2875 .family = 21,
2876 .model = 1,
2877 .stepping = 2,
2878 .features[FEAT_1_EDX] =
2879 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2880 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2881 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2882 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2883 CPUID_DE | CPUID_FP87,
2884 .features[FEAT_1_ECX] =
2885 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2886 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2887 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2888 CPUID_EXT_SSE3,
2889 .features[FEAT_8000_0001_EDX] =
2890 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2891 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2892 .features[FEAT_8000_0001_ECX] =
2893 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2894 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2895 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2896 CPUID_EXT3_LAHF_LM,
2897 .features[FEAT_SVM] =
2898 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2899 /* no xsaveopt! */
2900 .xlevel = 0x8000001A,
2901 .model_id = "AMD Opteron 62xx class CPU",
2902 },
2903 {
2904 .name = "Opteron_G5",
2905 .level = 0xd,
2906 .vendor = CPUID_VENDOR_AMD,
2907 .family = 21,
2908 .model = 2,
2909 .stepping = 0,
2910 .features[FEAT_1_EDX] =
2911 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2912 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2913 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2914 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2915 CPUID_DE | CPUID_FP87,
2916 .features[FEAT_1_ECX] =
2917 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2918 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2919 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2920 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2921 .features[FEAT_8000_0001_EDX] =
2922 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2923 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2924 .features[FEAT_8000_0001_ECX] =
2925 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2926 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2927 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2928 CPUID_EXT3_LAHF_LM,
2929 .features[FEAT_SVM] =
2930 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2931 /* no xsaveopt! */
2932 .xlevel = 0x8000001A,
2933 .model_id = "AMD Opteron 63xx class CPU",
2934 },
2935 {
2936 .name = "EPYC",
2937 .level = 0xd,
2938 .vendor = CPUID_VENDOR_AMD,
2939 .family = 23,
2940 .model = 1,
2941 .stepping = 2,
2942 .features[FEAT_1_EDX] =
2943 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2944 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2945 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2946 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2947 CPUID_VME | CPUID_FP87,
2948 .features[FEAT_1_ECX] =
2949 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2950 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2951 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2952 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2953 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2954 .features[FEAT_8000_0001_EDX] =
2955 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2956 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2957 CPUID_EXT2_SYSCALL,
2958 .features[FEAT_8000_0001_ECX] =
2959 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2960 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2961 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2962 CPUID_EXT3_TOPOEXT,
2963 .features[FEAT_7_0_EBX] =
2964 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2965 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2966 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2967 CPUID_7_0_EBX_SHA_NI,
2968 /* Missing: XSAVES (not supported by some Linux versions,
2969 * including v4.1 to v4.12).
2970 * KVM doesn't yet expose any XSAVES state save component.
2971 */
2972 .features[FEAT_XSAVE] =
2973 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2974 CPUID_XSAVE_XGETBV1,
2975 .features[FEAT_6_EAX] =
2976 CPUID_6_EAX_ARAT,
2977 .features[FEAT_SVM] =
2978 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2979 .xlevel = 0x8000001E,
2980 .model_id = "AMD EPYC Processor",
2981 .cache_info = &epyc_cache_info,
2982 },
2983 {
2984 .name = "EPYC-IBPB",
2985 .level = 0xd,
2986 .vendor = CPUID_VENDOR_AMD,
2987 .family = 23,
2988 .model = 1,
2989 .stepping = 2,
2990 .features[FEAT_1_EDX] =
2991 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2992 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2993 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2994 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2995 CPUID_VME | CPUID_FP87,
2996 .features[FEAT_1_ECX] =
2997 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2998 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2999 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3000 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3001 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3002 .features[FEAT_8000_0001_EDX] =
3003 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3004 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3005 CPUID_EXT2_SYSCALL,
3006 .features[FEAT_8000_0001_ECX] =
3007 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3008 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3009 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3010 CPUID_EXT3_TOPOEXT,
3011 .features[FEAT_8000_0008_EBX] =
3012 CPUID_8000_0008_EBX_IBPB,
3013 .features[FEAT_7_0_EBX] =
3014 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3015 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3016 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3017 CPUID_7_0_EBX_SHA_NI,
3018 /* Missing: XSAVES (not supported by some Linux versions,
3019 * including v4.1 to v4.12).
3020 * KVM doesn't yet expose any XSAVES state save component.
3021 */
3022 .features[FEAT_XSAVE] =
3023 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3024 CPUID_XSAVE_XGETBV1,
3025 .features[FEAT_6_EAX] =
3026 CPUID_6_EAX_ARAT,
3027 .features[FEAT_SVM] =
3028 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3029 .xlevel = 0x8000001E,
3030 .model_id = "AMD EPYC Processor (with IBPB)",
3031 .cache_info = &epyc_cache_info,
3032 },
3033 {
3034 .name = "Dhyana",
3035 .level = 0xd,
3036 .vendor = CPUID_VENDOR_HYGON,
3037 .family = 24,
3038 .model = 0,
3039 .stepping = 1,
3040 .features[FEAT_1_EDX] =
3041 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3042 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3043 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3044 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3045 CPUID_VME | CPUID_FP87,
3046 .features[FEAT_1_ECX] =
3047 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3048 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3049 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3050 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3051 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3052 .features[FEAT_8000_0001_EDX] =
3053 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3054 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3055 CPUID_EXT2_SYSCALL,
3056 .features[FEAT_8000_0001_ECX] =
3057 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3058 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3059 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3060 CPUID_EXT3_TOPOEXT,
3061 .features[FEAT_8000_0008_EBX] =
3062 CPUID_8000_0008_EBX_IBPB,
3063 .features[FEAT_7_0_EBX] =
3064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3065 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3066 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
3067 /*
3068 * Missing: XSAVES (not supported by some Linux versions,
3069 * including v4.1 to v4.12).
3070 * KVM doesn't yet expose any XSAVES state save component.
3071 */
3072 .features[FEAT_XSAVE] =
3073 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3074 CPUID_XSAVE_XGETBV1,
3075 .features[FEAT_6_EAX] =
3076 CPUID_6_EAX_ARAT,
3077 .features[FEAT_SVM] =
3078 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3079 .xlevel = 0x8000001E,
3080 .model_id = "Hygon Dhyana Processor",
3081 .cache_info = &epyc_cache_info,
3082 },
3083 };
3084
3085 typedef struct PropValue {
3086 const char *prop, *value;
3087 } PropValue;
3088
3089 /* KVM-specific features that are automatically added/removed
3090 * from all CPU models when KVM is enabled.
3091 */
3092 static PropValue kvm_default_props[] = {
3093 { "kvmclock", "on" },
3094 { "kvm-nopiodelay", "on" },
3095 { "kvm-asyncpf", "on" },
3096 { "kvm-steal-time", "on" },
3097 { "kvm-pv-eoi", "on" },
3098 { "kvmclock-stable-bit", "on" },
3099 { "x2apic", "on" },
3100 { "acpi", "off" },
3101 { "monitor", "off" },
3102 { "svm", "off" },
3103 { NULL, NULL },
3104 };
3105
3106 /* TCG-specific defaults that override all CPU models when using TCG
3107 */
3108 static PropValue tcg_default_props[] = {
3109 { "vme", "off" },
3110 { NULL, NULL },
3111 };
3112
3113
3114 void x86_cpu_change_kvm_default(const char *prop, const char *value)
3115 {
3116 PropValue *pv;
3117 for (pv = kvm_default_props; pv->prop; pv++) {
3118 if (!strcmp(pv->prop, prop)) {
3119 pv->value = value;
3120 break;
3121 }
3122 }
3123
3124 /* It is valid to call this function only for properties that
3125 * are already present in the kvm_default_props table.
3126 */
3127 assert(pv->prop);
3128 }
3129
3130 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3131 bool migratable_only);
3132
3133 static bool lmce_supported(void)
3134 {
3135 uint64_t mce_cap = 0;
3136
3137 #ifdef CONFIG_KVM
3138 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
3139 return false;
3140 }
3141 #endif
3142
3143 return !!(mce_cap & MCG_LMCE_P);
3144 }
3145
3146 #define CPUID_MODEL_ID_SZ 48
3147
3148 /**
3149 * cpu_x86_fill_model_id:
3150 * Get CPUID model ID string from host CPU.
3151 *
3152 * @str should have at least CPUID_MODEL_ID_SZ bytes
3153 *
3154 * The function does NOT add a null terminator to the string
3155 * automatically.
3156 */
3157 static int cpu_x86_fill_model_id(char *str)
3158 {
3159 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3160 int i;
3161
3162 for (i = 0; i < 3; i++) {
3163 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3164 memcpy(str + i * 16 + 0, &eax, 4);
3165 memcpy(str + i * 16 + 4, &ebx, 4);
3166 memcpy(str + i * 16 + 8, &ecx, 4);
3167 memcpy(str + i * 16 + 12, &edx, 4);
3168 }
3169 return 0;
3170 }
3171
3172 static Property max_x86_cpu_properties[] = {
3173 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3174 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3175 DEFINE_PROP_END_OF_LIST()
3176 };
3177
3178 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3179 {
3180 DeviceClass *dc = DEVICE_CLASS(oc);
3181 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3182
3183 xcc->ordering = 9;
3184
3185 xcc->model_description =
3186 "Enables all features supported by the accelerator in the current host";
3187
3188 dc->props = max_x86_cpu_properties;
3189 }
3190
3191 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3192
3193 static void max_x86_cpu_initfn(Object *obj)
3194 {
3195 X86CPU *cpu = X86_CPU(obj);
3196 CPUX86State *env = &cpu->env;
3197 KVMState *s = kvm_state;
3198
3199 /* We can't fill the features array here because we don't know yet if
3200 * "migratable" is true or false.
3201 */
3202 cpu->max_features = true;
3203
3204 if (accel_uses_host_cpuid()) {
3205 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3206 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3207 int family, model, stepping;
3208
3209 host_vendor_fms(vendor, &family, &model, &stepping);
3210 cpu_x86_fill_model_id(model_id);
3211
3212 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3213 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3214 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3215 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3216 &error_abort);
3217 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3218 &error_abort);
3219
3220 if (kvm_enabled()) {
3221 env->cpuid_min_level =
3222 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3223 env->cpuid_min_xlevel =
3224 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3225 env->cpuid_min_xlevel2 =
3226 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3227 } else {
3228 env->cpuid_min_level =
3229 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3230 env->cpuid_min_xlevel =
3231 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3232 env->cpuid_min_xlevel2 =
3233 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3234 }
3235
3236 if (lmce_supported()) {
3237 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3238 }
3239 } else {
3240 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3241 "vendor", &error_abort);
3242 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3243 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3244 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3245 object_property_set_str(OBJECT(cpu),
3246 "QEMU TCG CPU version " QEMU_HW_VERSION,
3247 "model-id", &error_abort);
3248 }
3249
3250 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3251 }
3252
3253 static const TypeInfo max_x86_cpu_type_info = {
3254 .name = X86_CPU_TYPE_NAME("max"),
3255 .parent = TYPE_X86_CPU,
3256 .instance_init = max_x86_cpu_initfn,
3257 .class_init = max_x86_cpu_class_init,
3258 };
3259
3260 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3261 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3262 {
3263 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3264
3265 xcc->host_cpuid_required = true;
3266 xcc->ordering = 8;
3267
3268 #if defined(CONFIG_KVM)
3269 xcc->model_description =
3270 "KVM processor with all supported host features ";
3271 #elif defined(CONFIG_HVF)
3272 xcc->model_description =
3273 "HVF processor with all supported host features ";
3274 #endif
3275 }
3276
3277 static const TypeInfo host_x86_cpu_type_info = {
3278 .name = X86_CPU_TYPE_NAME("host"),
3279 .parent = X86_CPU_TYPE_NAME("max"),
3280 .class_init = host_x86_cpu_class_init,
3281 };
3282
3283 #endif
3284
3285 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3286 {
3287 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3288
3289 switch (f->type) {
3290 case CPUID_FEATURE_WORD:
3291 {
3292 const char *reg = get_register_name_32(f->cpuid.reg);
3293 assert(reg);
3294 return g_strdup_printf("CPUID.%02XH:%s",
3295 f->cpuid.eax, reg);
3296 }
3297 case MSR_FEATURE_WORD:
3298 return g_strdup_printf("MSR(%02XH)",
3299 f->msr.index);
3300 }
3301
3302 return NULL;
3303 }
3304
3305 static void report_unavailable_features(FeatureWord w, uint32_t mask)
3306 {
3307 FeatureWordInfo *f = &feature_word_info[w];
3308 int i;
3309 char *feat_word_str;
3310
3311 for (i = 0; i < 32; ++i) {
3312 if ((1UL << i) & mask) {
3313 feat_word_str = feature_word_description(f, i);
3314 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3315 accel_uses_host_cpuid() ? "host" : "TCG",
3316 feat_word_str,
3317 f->feat_names[i] ? "." : "",
3318 f->feat_names[i] ? f->feat_names[i] : "", i);
3319 g_free(feat_word_str);
3320 }
3321 }
3322 }
3323
3324 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3325 const char *name, void *opaque,
3326 Error **errp)
3327 {
3328 X86CPU *cpu = X86_CPU(obj);
3329 CPUX86State *env = &cpu->env;
3330 int64_t value;
3331
3332 value = (env->cpuid_version >> 8) & 0xf;
3333 if (value == 0xf) {
3334 value += (env->cpuid_version >> 20) & 0xff;
3335 }
3336 visit_type_int(v, name, &value, errp);
3337 }
3338
3339 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3340 const char *name, void *opaque,
3341 Error **errp)
3342 {
3343 X86CPU *cpu = X86_CPU(obj);
3344 CPUX86State *env = &cpu->env;
3345 const int64_t min = 0;
3346 const int64_t max = 0xff + 0xf;
3347 Error *local_err = NULL;
3348 int64_t value;
3349
3350 visit_type_int(v, name, &value, &local_err);
3351 if (local_err) {
3352 error_propagate(errp, local_err);
3353 return;
3354 }
3355 if (value < min || value > max) {
3356 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3357 name ? name : "null", value, min, max);
3358 return;
3359 }
3360
3361 env->cpuid_version &= ~0xff00f00;
3362 if (value > 0x0f) {
3363 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3364 } else {
3365 env->cpuid_version |= value << 8;
3366 }
3367 }
3368
3369 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3370 const char *name, void *opaque,
3371 Error **errp)
3372 {
3373 X86CPU *cpu = X86_CPU(obj);
3374 CPUX86State *env = &cpu->env;
3375 int64_t value;
3376
3377 value = (env->cpuid_version >> 4) & 0xf;
3378 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3379 visit_type_int(v, name, &value, errp);
3380 }
3381
3382 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3383 const char *name, void *opaque,
3384 Error **errp)
3385 {
3386 X86CPU *cpu = X86_CPU(obj);
3387 CPUX86State *env = &cpu->env;
3388 const int64_t min = 0;
3389 const int64_t max = 0xff;
3390 Error *local_err = NULL;
3391 int64_t value;
3392
3393 visit_type_int(v, name, &value, &local_err);
3394 if (local_err) {
3395 error_propagate(errp, local_err);
3396 return;
3397 }
3398 if (value < min || value > max) {
3399 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3400 name ? name : "null", value, min, max);
3401 return;
3402 }
3403
3404 env->cpuid_version &= ~0xf00f0;
3405 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3406 }
3407
3408 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3409 const char *name, void *opaque,
3410 Error **errp)
3411 {
3412 X86CPU *cpu = X86_CPU(obj);
3413 CPUX86State *env = &cpu->env;
3414 int64_t value;
3415
3416 value = env->cpuid_version & 0xf;
3417 visit_type_int(v, name, &value, errp);
3418 }
3419
3420 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3421 const char *name, void *opaque,
3422 Error **errp)
3423 {
3424 X86CPU *cpu = X86_CPU(obj);
3425 CPUX86State *env = &cpu->env;
3426 const int64_t min = 0;
3427 const int64_t max = 0xf;
3428 Error *local_err = NULL;
3429 int64_t value;
3430
3431 visit_type_int(v, name, &value, &local_err);
3432 if (local_err) {
3433 error_propagate(errp, local_err);
3434 return;
3435 }
3436 if (value < min || value > max) {
3437 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3438 name ? name : "null", value, min, max);
3439 return;
3440 }
3441
3442 env->cpuid_version &= ~0xf;
3443 env->cpuid_version |= value & 0xf;
3444 }
3445
3446 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3447 {
3448 X86CPU *cpu = X86_CPU(obj);
3449 CPUX86State *env = &cpu->env;
3450 char *value;
3451
3452 value = g_malloc(CPUID_VENDOR_SZ + 1);
3453 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3454 env->cpuid_vendor3);
3455 return value;
3456 }
3457
3458 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3459 Error **errp)
3460 {
3461 X86CPU *cpu = X86_CPU(obj);
3462 CPUX86State *env = &cpu->env;
3463 int i;
3464
3465 if (strlen(value) != CPUID_VENDOR_SZ) {
3466 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3467 return;
3468 }
3469
3470 env->cpuid_vendor1 = 0;
3471 env->cpuid_vendor2 = 0;
3472 env->cpuid_vendor3 = 0;
3473 for (i = 0; i < 4; i++) {
3474 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3475 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3476 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3477 }
3478 }
3479
3480 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3481 {
3482 X86CPU *cpu = X86_CPU(obj);
3483 CPUX86State *env = &cpu->env;
3484 char *value;
3485 int i;
3486
3487 value = g_malloc(48 + 1);
3488 for (i = 0; i < 48; i++) {
3489 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3490 }
3491 value[48] = '\0';
3492 return value;
3493 }
3494
3495 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3496 Error **errp)
3497 {
3498 X86CPU *cpu = X86_CPU(obj);
3499 CPUX86State *env = &cpu->env;
3500 int c, len, i;
3501
3502 if (model_id == NULL) {
3503 model_id = "";
3504 }
3505 len = strlen(model_id);
3506 memset(env->cpuid_model, 0, 48);
3507 for (i = 0; i < 48; i++) {
3508 if (i >= len) {
3509 c = '\0';
3510 } else {
3511 c = (uint8_t)model_id[i];
3512 }
3513 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3514 }
3515 }
3516
3517 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3518 void *opaque, Error **errp)
3519 {
3520 X86CPU *cpu = X86_CPU(obj);
3521 int64_t value;
3522
3523 value = cpu->env.tsc_khz * 1000;
3524 visit_type_int(v, name, &value, errp);
3525 }
3526
3527 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3528 void *opaque, Error **errp)
3529 {
3530 X86CPU *cpu = X86_CPU(obj);
3531 const int64_t min = 0;
3532 const int64_t max = INT64_MAX;
3533 Error *local_err = NULL;
3534 int64_t value;
3535
3536 visit_type_int(v, name, &value, &local_err);
3537 if (local_err) {
3538 error_propagate(errp, local_err);
3539 return;
3540 }
3541 if (value < min || value > max) {
3542 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3543 name ? name : "null", value, min, max);
3544 return;
3545 }
3546
3547 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3548 }
3549
3550 /* Generic getter for "feature-words" and "filtered-features" properties */
3551 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3552 const char *name, void *opaque,
3553 Error **errp)
3554 {
3555 uint32_t *array = (uint32_t *)opaque;
3556 FeatureWord w;
3557 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3558 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3559 X86CPUFeatureWordInfoList *list = NULL;
3560
3561 for (w = 0; w < FEATURE_WORDS; w++) {
3562 FeatureWordInfo *wi = &feature_word_info[w];
3563 /*
3564 * We didn't have MSR features when "feature-words" was
3565 * introduced. Therefore skipped other type entries.
3566 */
3567 if (wi->type != CPUID_FEATURE_WORD) {
3568 continue;
3569 }
3570 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3571 qwi->cpuid_input_eax = wi->cpuid.eax;
3572 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3573 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3574 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3575 qwi->features = array[w];
3576
3577 /* List will be in reverse order, but order shouldn't matter */
3578 list_entries[w].next = list;
3579 list_entries[w].value = &word_infos[w];
3580 list = &list_entries[w];
3581 }
3582
3583 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3584 }
3585
3586 /* Convert all '_' in a feature string option name to '-', to make feature
3587 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3588 */
3589 static inline void feat2prop(char *s)
3590 {
3591 while ((s = strchr(s, '_'))) {
3592 *s = '-';
3593 }
3594 }
3595
3596 /* Return the feature property name for a feature flag bit */
3597 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3598 {
3599 /* XSAVE components are automatically enabled by other features,
3600 * so return the original feature name instead
3601 */
3602 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3603 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3604
3605 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3606 x86_ext_save_areas[comp].bits) {
3607 w = x86_ext_save_areas[comp].feature;
3608 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3609 }
3610 }
3611
3612 assert(bitnr < 32);
3613 assert(w < FEATURE_WORDS);
3614 return feature_word_info[w].feat_names[bitnr];
3615 }
3616
3617 /* Compatibily hack to maintain legacy +-feat semantic,
3618 * where +-feat overwrites any feature set by
3619 * feat=on|feat even if the later is parsed after +-feat
3620 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3621 */
3622 static GList *plus_features, *minus_features;
3623
3624 static gint compare_string(gconstpointer a, gconstpointer b)
3625 {
3626 return g_strcmp0(a, b);
3627 }
3628
3629 /* Parse "+feature,-feature,feature=foo" CPU feature string
3630 */
3631 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3632 Error **errp)
3633 {
3634 char *featurestr; /* Single 'key=value" string being parsed */
3635 static bool cpu_globals_initialized;
3636 bool ambiguous = false;
3637
3638 if (cpu_globals_initialized) {
3639 return;
3640 }
3641 cpu_globals_initialized = true;
3642
3643 if (!features) {
3644 return;
3645 }
3646
3647 for (featurestr = strtok(features, ",");
3648 featurestr;
3649 featurestr = strtok(NULL, ",")) {
3650 const char *name;
3651 const char *val = NULL;
3652 char *eq = NULL;
3653 char num[32];
3654 GlobalProperty *prop;
3655
3656 /* Compatibility syntax: */
3657 if (featurestr[0] == '+') {
3658 plus_features = g_list_append(plus_features,
3659 g_strdup(featurestr + 1));
3660 continue;
3661 } else if (featurestr[0] == '-') {
3662 minus_features = g_list_append(minus_features,
3663 g_strdup(featurestr + 1));
3664 continue;
3665 }
3666
3667 eq = strchr(featurestr, '=');
3668 if (eq) {
3669 *eq++ = 0;
3670 val = eq;
3671 } else {
3672 val = "on";
3673 }
3674
3675 feat2prop(featurestr);
3676 name = featurestr;
3677
3678 if (g_list_find_custom(plus_features, name, compare_string)) {
3679 warn_report("Ambiguous CPU model string. "
3680 "Don't mix both \"+%s\" and \"%s=%s\"",
3681 name, name, val);
3682 ambiguous = true;
3683 }
3684 if (g_list_find_custom(minus_features, name, compare_string)) {
3685 warn_report("Ambiguous CPU model string. "
3686 "Don't mix both \"-%s\" and \"%s=%s\"",
3687 name, name, val);
3688 ambiguous = true;
3689 }
3690
3691 /* Special case: */
3692 if (!strcmp(name, "tsc-freq")) {
3693 int ret;
3694 uint64_t tsc_freq;
3695
3696 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3697 if (ret < 0 || tsc_freq > INT64_MAX) {
3698 error_setg(errp, "bad numerical value %s", val);
3699 return;
3700 }
3701 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3702 val = num;
3703 name = "tsc-frequency";
3704 }
3705
3706 prop = g_new0(typeof(*prop), 1);
3707 prop->driver = typename;
3708 prop->property = g_strdup(name);
3709 prop->value = g_strdup(val);
3710 qdev_prop_register_global(prop);
3711 }
3712
3713 if (ambiguous) {
3714 warn_report("Compatibility of ambiguous CPU model "
3715 "strings won't be kept on future QEMU versions");
3716 }
3717 }
3718
3719 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3720 static int x86_cpu_filter_features(X86CPU *cpu);
3721
3722 /* Build a list with the name of all features on a feature word array */
3723 static void x86_cpu_list_feature_names(FeatureWordArray features,
3724 strList **feat_names)
3725 {
3726 FeatureWord w;
3727 strList **next = feat_names;
3728
3729 for (w = 0; w < FEATURE_WORDS; w++) {
3730 uint32_t filtered = features[w];
3731 int i;
3732 for (i = 0; i < 32; i++) {
3733 if (filtered & (1UL << i)) {
3734 strList *new = g_new0(strList, 1);
3735 new->value = g_strdup(x86_cpu_feature_name(w, i));
3736 *next = new;
3737 next = &new->next;
3738 }
3739 }
3740 }
3741 }
3742
3743 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
3744 const char *name, void *opaque,
3745 Error **errp)
3746 {
3747 X86CPU *xc = X86_CPU(obj);
3748 strList *result = NULL;
3749
3750 x86_cpu_list_feature_names(xc->filtered_features, &result);
3751 visit_type_strList(v, "unavailable-features", &result, errp);
3752 }
3753
3754 /* Check for missing features that may prevent the CPU class from
3755 * running using the current machine and accelerator.
3756 */
3757 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3758 strList **missing_feats)
3759 {
3760 X86CPU *xc;
3761 Error *err = NULL;
3762 strList **next = missing_feats;
3763
3764 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3765 strList *new = g_new0(strList, 1);
3766 new->value = g_strdup("kvm");
3767 *missing_feats = new;
3768 return;
3769 }
3770
3771 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3772
3773 x86_cpu_expand_features(xc, &err);
3774 if (err) {
3775 /* Errors at x86_cpu_expand_features should never happen,
3776 * but in case it does, just report the model as not
3777 * runnable at all using the "type" property.
3778 */
3779 strList *new = g_new0(strList, 1);
3780 new->value = g_strdup("type");
3781 *next = new;
3782 next = &new->next;
3783 }
3784
3785 x86_cpu_filter_features(xc);
3786
3787 x86_cpu_list_feature_names(xc->filtered_features, next);
3788
3789 object_unref(OBJECT(xc));
3790 }
3791
3792 /* Print all cpuid feature names in featureset
3793 */
3794 static void listflags(GList *features)
3795 {
3796 size_t len = 0;
3797 GList *tmp;
3798
3799 for (tmp = features; tmp; tmp = tmp->next) {
3800 const char *name = tmp->data;
3801 if ((len + strlen(name) + 1) >= 75) {
3802 qemu_printf("\n");
3803 len = 0;
3804 }
3805 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3806 len += strlen(name) + 1;
3807 }
3808 qemu_printf("\n");
3809 }
3810
3811 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3812 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3813 {
3814 ObjectClass *class_a = (ObjectClass *)a;
3815 ObjectClass *class_b = (ObjectClass *)b;
3816 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3817 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3818 char *name_a, *name_b;
3819 int ret;
3820
3821 if (cc_a->ordering != cc_b->ordering) {
3822 ret = cc_a->ordering - cc_b->ordering;
3823 } else {
3824 name_a = x86_cpu_class_get_model_name(cc_a);
3825 name_b = x86_cpu_class_get_model_name(cc_b);
3826 ret = strcmp(name_a, name_b);
3827 g_free(name_a);
3828 g_free(name_b);
3829 }
3830 return ret;
3831 }
3832
3833 static GSList *get_sorted_cpu_model_list(void)
3834 {
3835 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3836 list = g_slist_sort(list, x86_cpu_list_compare);
3837 return list;
3838 }
3839
3840 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3841 {
3842 ObjectClass *oc = data;
3843 X86CPUClass *cc = X86_CPU_CLASS(oc);
3844 char *name = x86_cpu_class_get_model_name(cc);
3845 const char *desc = cc->model_description;
3846 if (!desc && cc->cpu_def) {
3847 desc = cc->cpu_def->model_id;
3848 }
3849
3850 qemu_printf("x86 %-20s %-48s\n", name, desc);
3851 g_free(name);
3852 }
3853
3854 /* list available CPU models and flags */
3855 void x86_cpu_list(void)
3856 {
3857 int i, j;
3858 GSList *list;
3859 GList *names = NULL;
3860
3861 qemu_printf("Available CPUs:\n");
3862 list = get_sorted_cpu_model_list();
3863 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3864 g_slist_free(list);
3865
3866 names = NULL;
3867 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3868 FeatureWordInfo *fw = &feature_word_info[i];
3869 for (j = 0; j < 32; j++) {
3870 if (fw->feat_names[j]) {
3871 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3872 }
3873 }
3874 }
3875
3876 names = g_list_sort(names, (GCompareFunc)strcmp);
3877
3878 qemu_printf("\nRecognized CPUID flags:\n");
3879 listflags(names);
3880 qemu_printf("\n");
3881 g_list_free(names);
3882 }
3883
3884 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3885 {
3886 ObjectClass *oc = data;
3887 X86CPUClass *cc = X86_CPU_CLASS(oc);
3888 CpuDefinitionInfoList **cpu_list = user_data;
3889 CpuDefinitionInfoList *entry;
3890 CpuDefinitionInfo *info;
3891
3892 info = g_malloc0(sizeof(*info));
3893 info->name = x86_cpu_class_get_model_name(cc);
3894 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3895 info->has_unavailable_features = true;
3896 info->q_typename = g_strdup(object_class_get_name(oc));
3897 info->migration_safe = cc->migration_safe;
3898 info->has_migration_safe = true;
3899 info->q_static = cc->static_model;
3900
3901 entry = g_malloc0(sizeof(*entry));
3902 entry->value = info;
3903 entry->next = *cpu_list;
3904 *cpu_list = entry;
3905 }
3906
3907 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
3908 {
3909 CpuDefinitionInfoList *cpu_list = NULL;
3910 GSList *list = get_sorted_cpu_model_list();
3911 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3912 g_slist_free(list);
3913 return cpu_list;
3914 }
3915
3916 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3917 bool migratable_only)
3918 {
3919 FeatureWordInfo *wi = &feature_word_info[w];
3920 uint32_t r = 0;
3921
3922 if (kvm_enabled()) {
3923 switch (wi->type) {
3924 case CPUID_FEATURE_WORD:
3925 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3926 wi->cpuid.ecx,
3927 wi->cpuid.reg);
3928 break;
3929 case MSR_FEATURE_WORD:
3930 r = kvm_arch_get_supported_msr_feature(kvm_state,
3931 wi->msr.index);
3932 break;
3933 }
3934 } else if (hvf_enabled()) {
3935 if (wi->type != CPUID_FEATURE_WORD) {
3936 return 0;
3937 }
3938 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3939 wi->cpuid.ecx,
3940 wi->cpuid.reg);
3941 } else if (tcg_enabled()) {
3942 r = wi->tcg_features;
3943 } else {
3944 return ~0;
3945 }
3946 if (migratable_only) {
3947 r &= x86_cpu_get_migratable_flags(w);
3948 }
3949 return r;
3950 }
3951
3952 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3953 {
3954 FeatureWord w;
3955
3956 for (w = 0; w < FEATURE_WORDS; w++) {
3957 report_unavailable_features(w, cpu->filtered_features[w]);
3958 }
3959 }
3960
3961 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3962 {
3963 PropValue *pv;
3964 for (pv = props; pv->prop; pv++) {
3965 if (!pv->value) {
3966 continue;
3967 }
3968 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3969 &error_abort);
3970 }
3971 }
3972
3973 /* Load data from X86CPUDefinition into a X86CPU object
3974 */
3975 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3976 {
3977 CPUX86State *env = &cpu->env;
3978 const char *vendor;
3979 char host_vendor[CPUID_VENDOR_SZ + 1];
3980 FeatureWord w;
3981
3982 /*NOTE: any property set by this function should be returned by
3983 * x86_cpu_static_props(), so static expansion of
3984 * query-cpu-model-expansion is always complete.
3985 */
3986
3987 /* CPU models only set _minimum_ values for level/xlevel: */
3988 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3989 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3990
3991 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3992 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3993 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3994 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3995 for (w = 0; w < FEATURE_WORDS; w++) {
3996 env->features[w] = def->features[w];
3997 }
3998
3999 /* legacy-cache defaults to 'off' if CPU model provides cache info */
4000 cpu->legacy_cache = !def->cache_info;
4001
4002 /* Special cases not set in the X86CPUDefinition structs: */
4003 /* TODO: in-kernel irqchip for hvf */
4004 if (kvm_enabled()) {
4005 if (!kvm_irqchip_in_kernel()) {
4006 x86_cpu_change_kvm_default("x2apic", "off");
4007 }
4008
4009 x86_cpu_apply_props(cpu, kvm_default_props);
4010 } else if (tcg_enabled()) {
4011 x86_cpu_apply_props(cpu, tcg_default_props);
4012 }
4013
4014 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
4015
4016 /* sysenter isn't supported in compatibility mode on AMD,
4017 * syscall isn't supported in compatibility mode on Intel.
4018 * Normally we advertise the actual CPU vendor, but you can
4019 * override this using the 'vendor' property if you want to use
4020 * KVM's sysenter/syscall emulation in compatibility mode and
4021 * when doing cross vendor migration
4022 */
4023 vendor = def->vendor;
4024 if (accel_uses_host_cpuid()) {
4025 uint32_t ebx = 0, ecx = 0, edx = 0;
4026 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
4027 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
4028 vendor = host_vendor;
4029 }
4030
4031 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
4032
4033 }
4034
4035 #ifndef CONFIG_USER_ONLY
4036 /* Return a QDict containing keys for all properties that can be included
4037 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
4038 * must be included in the dictionary.
4039 */
4040 static QDict *x86_cpu_static_props(void)
4041 {
4042 FeatureWord w;
4043 int i;
4044 static const char *props[] = {
4045 "min-level",
4046 "min-xlevel",
4047 "family",
4048 "model",
4049 "stepping",
4050 "model-id",
4051 "vendor",
4052 "lmce",
4053 NULL,
4054 };
4055 static QDict *d;
4056
4057 if (d) {
4058 return d;
4059 }
4060
4061 d = qdict_new();
4062 for (i = 0; props[i]; i++) {
4063 qdict_put_null(d, props[i]);
4064 }
4065
4066 for (w = 0; w < FEATURE_WORDS; w++) {
4067 FeatureWordInfo *fi = &feature_word_info[w];
4068 int bit;
4069 for (bit = 0; bit < 32; bit++) {
4070 if (!fi->feat_names[bit]) {
4071 continue;
4072 }
4073 qdict_put_null(d, fi->feat_names[bit]);
4074 }
4075 }
4076
4077 return d;
4078 }
4079
4080 /* Add an entry to @props dict, with the value for property. */
4081 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
4082 {
4083 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
4084 &error_abort);
4085
4086 qdict_put_obj(props, prop, value);
4087 }
4088
4089 /* Convert CPU model data from X86CPU object to a property dictionary
4090 * that can recreate exactly the same CPU model.
4091 */
4092 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
4093 {
4094 QDict *sprops = x86_cpu_static_props();
4095 const QDictEntry *e;
4096
4097 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
4098 const char *prop = qdict_entry_key(e);
4099 x86_cpu_expand_prop(cpu, props, prop);
4100 }
4101 }
4102
4103 /* Convert CPU model data from X86CPU object to a property dictionary
4104 * that can recreate exactly the same CPU model, including every
4105 * writeable QOM property.
4106 */
4107 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
4108 {
4109 ObjectPropertyIterator iter;
4110 ObjectProperty *prop;
4111
4112 object_property_iter_init(&iter, OBJECT(cpu));
4113 while ((prop = object_property_iter_next(&iter))) {
4114 /* skip read-only or write-only properties */
4115 if (!prop->get || !prop->set) {
4116 continue;
4117 }
4118
4119 /* "hotplugged" is the only property that is configurable
4120 * on the command-line but will be set differently on CPUs
4121 * created using "-cpu ... -smp ..." and by CPUs created
4122 * on the fly by x86_cpu_from_model() for querying. Skip it.
4123 */
4124 if (!strcmp(prop->name, "hotplugged")) {
4125 continue;
4126 }
4127 x86_cpu_expand_prop(cpu, props, prop->name);
4128 }
4129 }
4130
4131 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4132 {
4133 const QDictEntry *prop;
4134 Error *err = NULL;
4135
4136 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4137 object_property_set_qobject(obj, qdict_entry_value(prop),
4138 qdict_entry_key(prop), &err);
4139 if (err) {
4140 break;
4141 }
4142 }
4143
4144 error_propagate(errp, err);
4145 }
4146
4147 /* Create X86CPU object according to model+props specification */
4148 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4149 {
4150 X86CPU *xc = NULL;
4151 X86CPUClass *xcc;
4152 Error *err = NULL;
4153
4154 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4155 if (xcc == NULL) {
4156 error_setg(&err, "CPU model '%s' not found", model);
4157 goto out;
4158 }
4159
4160 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4161 if (props) {
4162 object_apply_props(OBJECT(xc), props, &err);
4163 if (err) {
4164 goto out;
4165 }
4166 }
4167
4168 x86_cpu_expand_features(xc, &err);
4169 if (err) {
4170 goto out;
4171 }
4172
4173 out:
4174 if (err) {
4175 error_propagate(errp, err);
4176 object_unref(OBJECT(xc));
4177 xc = NULL;
4178 }
4179 return xc;
4180 }
4181
4182 CpuModelExpansionInfo *
4183 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
4184 CpuModelInfo *model,
4185 Error **errp)
4186 {
4187 X86CPU *xc = NULL;
4188 Error *err = NULL;
4189 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4190 QDict *props = NULL;
4191 const char *base_name;
4192
4193 xc = x86_cpu_from_model(model->name,
4194 model->has_props ?
4195 qobject_to(QDict, model->props) :
4196 NULL, &err);
4197 if (err) {
4198 goto out;
4199 }
4200
4201 props = qdict_new();
4202 ret->model = g_new0(CpuModelInfo, 1);
4203 ret->model->props = QOBJECT(props);
4204 ret->model->has_props = true;
4205
4206 switch (type) {
4207 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4208 /* Static expansion will be based on "base" only */
4209 base_name = "base";
4210 x86_cpu_to_dict(xc, props);
4211 break;
4212 case CPU_MODEL_EXPANSION_TYPE_FULL:
4213 /* As we don't return every single property, full expansion needs
4214 * to keep the original model name+props, and add extra
4215 * properties on top of that.
4216 */
4217 base_name = model->name;
4218 x86_cpu_to_dict_full(xc, props);
4219 break;
4220 default:
4221 error_setg(&err, "Unsupported expansion type");
4222 goto out;
4223 }
4224
4225 x86_cpu_to_dict(xc, props);
4226
4227 ret->model->name = g_strdup(base_name);
4228
4229 out:
4230 object_unref(OBJECT(xc));
4231 if (err) {
4232 error_propagate(errp, err);
4233 qapi_free_CpuModelExpansionInfo(ret);
4234 ret = NULL;
4235 }
4236 return ret;
4237 }
4238 #endif /* !CONFIG_USER_ONLY */
4239
4240 static gchar *x86_gdb_arch_name(CPUState *cs)
4241 {
4242 #ifdef TARGET_X86_64
4243 return g_strdup("i386:x86-64");
4244 #else
4245 return g_strdup("i386");
4246 #endif
4247 }
4248
4249 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4250 {
4251 X86CPUDefinition *cpudef = data;
4252 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4253
4254 xcc->cpu_def = cpudef;
4255 xcc->migration_safe = true;
4256 }
4257
4258 static void x86_register_cpudef_type(X86CPUDefinition *def)
4259 {
4260 char *typename = x86_cpu_type_name(def->name);
4261 TypeInfo ti = {
4262 .name = typename,
4263 .parent = TYPE_X86_CPU,
4264 .class_init = x86_cpu_cpudef_class_init,
4265 .class_data = def,
4266 };
4267
4268 /* AMD aliases are handled at runtime based on CPUID vendor, so
4269 * they shouldn't be set on the CPU model table.
4270 */
4271 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4272 /* catch mistakes instead of silently truncating model_id when too long */
4273 assert(def->model_id && strlen(def->model_id) <= 48);
4274
4275
4276 type_register(&ti);
4277 g_free(typename);
4278 }
4279
4280 #if !defined(CONFIG_USER_ONLY)
4281
4282 void cpu_clear_apic_feature(CPUX86State *env)
4283 {
4284 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4285 }
4286
4287 #endif /* !CONFIG_USER_ONLY */
4288
4289 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4290 uint32_t *eax, uint32_t *ebx,
4291 uint32_t *ecx, uint32_t *edx)
4292 {
4293 X86CPU *cpu = env_archcpu(env);
4294 CPUState *cs = env_cpu(env);
4295 uint32_t die_offset;
4296 uint32_t limit;
4297 uint32_t signature[3];
4298
4299 /* Calculate & apply limits for different index ranges */
4300 if (index >= 0xC0000000) {
4301 limit = env->cpuid_xlevel2;
4302 } else if (index >= 0x80000000) {
4303 limit = env->cpuid_xlevel;
4304 } else if (index >= 0x40000000) {
4305 limit = 0x40000001;
4306 } else {
4307 limit = env->cpuid_level;
4308 }
4309
4310 if (index > limit) {
4311 /* Intel documentation states that invalid EAX input will
4312 * return the same information as EAX=cpuid_level
4313 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4314 */
4315 index = env->cpuid_level;
4316 }
4317
4318 switch(index) {
4319 case 0:
4320 *eax = env->cpuid_level;
4321 *ebx = env->cpuid_vendor1;
4322 *edx = env->cpuid_vendor2;
4323 *ecx = env->cpuid_vendor3;
4324 break;
4325 case 1:
4326 *eax = env->cpuid_version;
4327 *ebx = (cpu->apic_id << 24) |
4328 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4329 *ecx = env->features[FEAT_1_ECX];
4330 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4331 *ecx |= CPUID_EXT_OSXSAVE;
4332 }
4333 *edx = env->features[FEAT_1_EDX];
4334 if (cs->nr_cores * cs->nr_threads > 1) {
4335 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4336 *edx |= CPUID_HT;
4337 }
4338 break;
4339 case 2:
4340 /* cache info: needed for Pentium Pro compatibility */
4341 if (cpu->cache_info_passthrough) {
4342 host_cpuid(index, 0, eax, ebx, ecx, edx);
4343 break;
4344 }
4345 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4346 *ebx = 0;
4347 if (!cpu->enable_l3_cache) {
4348 *ecx = 0;
4349 } else {
4350 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4351 }
4352 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4353 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4354 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4355 break;
4356 case 4:
4357 /* cache info: needed for Core compatibility */
4358 if (cpu->cache_info_passthrough) {
4359 host_cpuid(index, count, eax, ebx, ecx, edx);
4360 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4361 *eax &= ~0xFC000000;
4362 if ((*eax & 31) && cs->nr_cores > 1) {
4363 *eax |= (cs->nr_cores - 1) << 26;
4364 }
4365 } else {
4366 *eax = 0;
4367 switch (count) {
4368 case 0: /* L1 dcache info */
4369 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4370 1, cs->nr_cores,
4371 eax, ebx, ecx, edx);
4372 break;
4373 case 1: /* L1 icache info */
4374 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4375 1, cs->nr_cores,
4376 eax, ebx, ecx, edx);
4377 break;
4378 case 2: /* L2 cache info */
4379 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4380 cs->nr_threads, cs->nr_cores,
4381 eax, ebx, ecx, edx);
4382 break;
4383 case 3: /* L3 cache info */
4384 die_offset = apicid_die_offset(env->nr_dies,
4385 cs->nr_cores, cs->nr_threads);
4386 if (cpu->enable_l3_cache) {
4387 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4388 (1 << die_offset), cs->nr_cores,
4389 eax, ebx, ecx, edx);
4390 break;
4391 }
4392 /* fall through */
4393 default: /* end of info */
4394 *eax = *ebx = *ecx = *edx = 0;
4395 break;
4396 }
4397 }
4398 break;
4399 case 5:
4400 /* MONITOR/MWAIT Leaf */
4401 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4402 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4403 *ecx = cpu->mwait.ecx; /* flags */
4404 *edx = cpu->mwait.edx; /* mwait substates */
4405 break;
4406 case 6:
4407 /* Thermal and Power Leaf */
4408 *eax = env->features[FEAT_6_EAX];
4409 *ebx = 0;
4410 *ecx = 0;
4411 *edx = 0;
4412 break;
4413 case 7:
4414 /* Structured Extended Feature Flags Enumeration Leaf */
4415 if (count == 0) {
4416 *eax = 0; /* Maximum ECX value for sub-leaves */
4417 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4418 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4419 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4420 *ecx |= CPUID_7_0_ECX_OSPKE;
4421 }
4422 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4423 } else {
4424 *eax = 0;
4425 *ebx = 0;
4426 *ecx = 0;
4427 *edx = 0;
4428 }
4429 break;
4430 case 9:
4431 /* Direct Cache Access Information Leaf */
4432 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4433 *ebx = 0;
4434 *ecx = 0;
4435 *edx = 0;
4436 break;
4437 case 0xA:
4438 /* Architectural Performance Monitoring Leaf */
4439 if (kvm_enabled() && cpu->enable_pmu) {
4440 KVMState *s = cs->kvm_state;
4441
4442 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4443 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4444 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4445 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4446 } else if (hvf_enabled() && cpu->enable_pmu) {
4447 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4448 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4449 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4450 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4451 } else {
4452 *eax = 0;
4453 *ebx = 0;
4454 *ecx = 0;
4455 *edx = 0;
4456 }
4457 break;
4458 case 0xB:
4459 /* Extended Topology Enumeration Leaf */
4460 if (!cpu->enable_cpuid_0xb) {
4461 *eax = *ebx = *ecx = *edx = 0;
4462 break;
4463 }
4464
4465 *ecx = count & 0xff;
4466 *edx = cpu->apic_id;
4467
4468 switch (count) {
4469 case 0:
4470 *eax = apicid_core_offset(env->nr_dies,
4471 cs->nr_cores, cs->nr_threads);
4472 *ebx = cs->nr_threads;
4473 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4474 break;
4475 case 1:
4476 *eax = apicid_pkg_offset(env->nr_dies,
4477 cs->nr_cores, cs->nr_threads);
4478 *ebx = cs->nr_cores * cs->nr_threads;
4479 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4480 break;
4481 default:
4482 *eax = 0;
4483 *ebx = 0;
4484 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4485 }
4486
4487 assert(!(*eax & ~0x1f));
4488 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4489 break;
4490 case 0x1F:
4491 /* V2 Extended Topology Enumeration Leaf */
4492 if (env->nr_dies < 2) {
4493 *eax = *ebx = *ecx = *edx = 0;
4494 break;
4495 }
4496
4497 *ecx = count & 0xff;
4498 *edx = cpu->apic_id;
4499 switch (count) {
4500 case 0:
4501 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores,
4502 cs->nr_threads);
4503 *ebx = cs->nr_threads;
4504 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4505 break;
4506 case 1:
4507 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores,
4508 cs->nr_threads);
4509 *ebx = cs->nr_cores * cs->nr_threads;
4510 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4511 break;
4512 case 2:
4513 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores,
4514 cs->nr_threads);
4515 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
4516 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
4517 break;
4518 default:
4519 *eax = 0;
4520 *ebx = 0;
4521 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4522 }
4523 assert(!(*eax & ~0x1f));
4524 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4525 break;
4526 case 0xD: {
4527 /* Processor Extended State */
4528 *eax = 0;
4529 *ebx = 0;
4530 *ecx = 0;
4531 *edx = 0;
4532 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4533 break;
4534 }
4535
4536 if (count == 0) {
4537 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4538 *eax = env->features[FEAT_XSAVE_COMP_LO];
4539 *edx = env->features[FEAT_XSAVE_COMP_HI];
4540 *ebx = xsave_area_size(env->xcr0);
4541 } else if (count == 1) {
4542 *eax = env->features[FEAT_XSAVE];
4543 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4544 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4545 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4546 *eax = esa->size;
4547 *ebx = esa->offset;
4548 }
4549 }
4550 break;
4551 }
4552 case 0x14: {
4553 /* Intel Processor Trace Enumeration */
4554 *eax = 0;
4555 *ebx = 0;
4556 *ecx = 0;
4557 *edx = 0;
4558 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4559 !kvm_enabled()) {
4560 break;
4561 }
4562
4563 if (count == 0) {
4564 *eax = INTEL_PT_MAX_SUBLEAF;
4565 *ebx = INTEL_PT_MINIMAL_EBX;
4566 *ecx = INTEL_PT_MINIMAL_ECX;
4567 } else if (count == 1) {
4568 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4569 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4570 }
4571 break;
4572 }
4573 case 0x40000000:
4574 /*
4575 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4576 * set here, but we restrict to TCG none the less.
4577 */
4578 if (tcg_enabled() && cpu->expose_tcg) {
4579 memcpy(signature, "TCGTCGTCGTCG", 12);
4580 *eax = 0x40000001;
4581 *ebx = signature[0];
4582 *ecx = signature[1];
4583 *edx = signature[2];
4584 } else {
4585 *eax = 0;
4586 *ebx = 0;
4587 *ecx = 0;
4588 *edx = 0;
4589 }
4590 break;
4591 case 0x40000001:
4592 *eax = 0;
4593 *ebx = 0;
4594 *ecx = 0;
4595 *edx = 0;
4596 break;
4597 case 0x80000000:
4598 *eax = env->cpuid_xlevel;
4599 *ebx = env->cpuid_vendor1;
4600 *edx = env->cpuid_vendor2;
4601 *ecx = env->cpuid_vendor3;
4602 break;
4603 case 0x80000001:
4604 *eax = env->cpuid_version;
4605 *ebx = 0;
4606 *ecx = env->features[FEAT_8000_0001_ECX];
4607 *edx = env->features[FEAT_8000_0001_EDX];
4608
4609 /* The Linux kernel checks for the CMPLegacy bit and
4610 * discards multiple thread information if it is set.
4611 * So don't set it here for Intel to make Linux guests happy.
4612 */
4613 if (cs->nr_cores * cs->nr_threads > 1) {
4614 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4615 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4616 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4617 *ecx |= 1 << 1; /* CmpLegacy bit */
4618 }
4619 }
4620 break;
4621 case 0x80000002:
4622 case 0x80000003:
4623 case 0x80000004:
4624 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4625 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4626 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4627 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4628 break;
4629 case 0x80000005:
4630 /* cache info (L1 cache) */
4631 if (cpu->cache_info_passthrough) {
4632 host_cpuid(index, 0, eax, ebx, ecx, edx);
4633 break;
4634 }
4635 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4636 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4637 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4638 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4639 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4640 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4641 break;
4642 case 0x80000006:
4643 /* cache info (L2 cache) */
4644 if (cpu->cache_info_passthrough) {
4645 host_cpuid(index, 0, eax, ebx, ecx, edx);
4646 break;
4647 }
4648 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4649 (L2_DTLB_2M_ENTRIES << 16) | \
4650 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4651 (L2_ITLB_2M_ENTRIES);
4652 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4653 (L2_DTLB_4K_ENTRIES << 16) | \
4654 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4655 (L2_ITLB_4K_ENTRIES);
4656 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4657 cpu->enable_l3_cache ?
4658 env->cache_info_amd.l3_cache : NULL,
4659 ecx, edx);
4660 break;
4661 case 0x80000007:
4662 *eax = 0;
4663 *ebx = 0;
4664 *ecx = 0;
4665 *edx = env->features[FEAT_8000_0007_EDX];
4666 break;
4667 case 0x80000008:
4668 /* virtual & phys address size in low 2 bytes. */
4669 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4670 /* 64 bit processor */
4671 *eax = cpu->phys_bits; /* configurable physical bits */
4672 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4673 *eax |= 0x00003900; /* 57 bits virtual */
4674 } else {
4675 *eax |= 0x00003000; /* 48 bits virtual */
4676 }
4677 } else {
4678 *eax = cpu->phys_bits;
4679 }
4680 *ebx = env->features[FEAT_8000_0008_EBX];
4681 *ecx = 0;
4682 *edx = 0;
4683 if (cs->nr_cores * cs->nr_threads > 1) {
4684 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4685 }
4686 break;
4687 case 0x8000000A:
4688 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4689 *eax = 0x00000001; /* SVM Revision */
4690 *ebx = 0x00000010; /* nr of ASIDs */
4691 *ecx = 0;
4692 *edx = env->features[FEAT_SVM]; /* optional features */
4693 } else {
4694 *eax = 0;
4695 *ebx = 0;
4696 *ecx = 0;
4697 *edx = 0;
4698 }
4699 break;
4700 case 0x8000001D:
4701 *eax = 0;
4702 if (cpu->cache_info_passthrough) {
4703 host_cpuid(index, count, eax, ebx, ecx, edx);
4704 break;
4705 }
4706 switch (count) {
4707 case 0: /* L1 dcache info */
4708 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4709 eax, ebx, ecx, edx);
4710 break;
4711 case 1: /* L1 icache info */
4712 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4713 eax, ebx, ecx, edx);
4714 break;
4715 case 2: /* L2 cache info */
4716 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4717 eax, ebx, ecx, edx);
4718 break;
4719 case 3: /* L3 cache info */
4720 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4721 eax, ebx, ecx, edx);
4722 break;
4723 default: /* end of info */
4724 *eax = *ebx = *ecx = *edx = 0;
4725 break;
4726 }
4727 break;
4728 case 0x8000001E:
4729 assert(cpu->core_id <= 255);
4730 encode_topo_cpuid8000001e(cs, cpu,
4731 eax, ebx, ecx, edx);
4732 break;
4733 case 0xC0000000:
4734 *eax = env->cpuid_xlevel2;
4735 *ebx = 0;
4736 *ecx = 0;
4737 *edx = 0;
4738 break;
4739 case 0xC0000001:
4740 /* Support for VIA CPU's CPUID instruction */
4741 *eax = env->cpuid_version;
4742 *ebx = 0;
4743 *ecx = 0;
4744 *edx = env->features[FEAT_C000_0001_EDX];
4745 break;
4746 case 0xC0000002:
4747 case 0xC0000003:
4748 case 0xC0000004:
4749 /* Reserved for the future, and now filled with zero */
4750 *eax = 0;
4751 *ebx = 0;
4752 *ecx = 0;
4753 *edx = 0;
4754 break;
4755 case 0x8000001F:
4756 *eax = sev_enabled() ? 0x2 : 0;
4757 *ebx = sev_get_cbit_position();
4758 *ebx |= sev_get_reduced_phys_bits() << 6;
4759 *ecx = 0;
4760 *edx = 0;
4761 break;
4762 default:
4763 /* reserved values: zero */
4764 *eax = 0;
4765 *ebx = 0;
4766 *ecx = 0;
4767 *edx = 0;
4768 break;
4769 }
4770 }
4771
4772 /* CPUClass::reset() */
4773 static void x86_cpu_reset(CPUState *s)
4774 {
4775 X86CPU *cpu = X86_CPU(s);
4776 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4777 CPUX86State *env = &cpu->env;
4778 target_ulong cr4;
4779 uint64_t xcr0;
4780 int i;
4781
4782 xcc->parent_reset(s);
4783
4784 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4785
4786 env->old_exception = -1;
4787
4788 /* init to reset state */
4789
4790 env->hflags2 |= HF2_GIF_MASK;
4791
4792 cpu_x86_update_cr0(env, 0x60000010);
4793 env->a20_mask = ~0x0;
4794 env->smbase = 0x30000;
4795 env->msr_smi_count = 0;
4796
4797 env->idt.limit = 0xffff;
4798 env->gdt.limit = 0xffff;
4799 env->ldt.limit = 0xffff;
4800 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4801 env->tr.limit = 0xffff;
4802 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4803
4804 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4805 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4806 DESC_R_MASK | DESC_A_MASK);
4807 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4808 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4809 DESC_A_MASK);
4810 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4811 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4812 DESC_A_MASK);
4813 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4814 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4815 DESC_A_MASK);
4816 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4817 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4818 DESC_A_MASK);
4819 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4820 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4821 DESC_A_MASK);
4822
4823 env->eip = 0xfff0;
4824 env->regs[R_EDX] = env->cpuid_version;
4825
4826 env->eflags = 0x2;
4827
4828 /* FPU init */
4829 for (i = 0; i < 8; i++) {
4830 env->fptags[i] = 1;
4831 }
4832 cpu_set_fpuc(env, 0x37f);
4833
4834 env->mxcsr = 0x1f80;
4835 /* All units are in INIT state. */
4836 env->xstate_bv = 0;
4837
4838 env->pat = 0x0007040600070406ULL;
4839 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4840 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
4841 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
4842 }
4843
4844 memset(env->dr, 0, sizeof(env->dr));
4845 env->dr[6] = DR6_FIXED_1;
4846 env->dr[7] = DR7_FIXED_1;
4847 cpu_breakpoint_remove_all(s, BP_CPU);
4848 cpu_watchpoint_remove_all(s, BP_CPU);
4849
4850 cr4 = 0;
4851 xcr0 = XSTATE_FP_MASK;
4852
4853 #ifdef CONFIG_USER_ONLY
4854 /* Enable all the features for user-mode. */
4855 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4856 xcr0 |= XSTATE_SSE_MASK;
4857 }
4858 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4859 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4860 if (env->features[esa->feature] & esa->bits) {
4861 xcr0 |= 1ull << i;
4862 }
4863 }
4864
4865 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4866 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4867 }
4868 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4869 cr4 |= CR4_FSGSBASE_MASK;
4870 }
4871 #endif
4872
4873 env->xcr0 = xcr0;
4874 cpu_x86_update_cr4(env, cr4);
4875
4876 /*
4877 * SDM 11.11.5 requires:
4878 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4879 * - IA32_MTRR_PHYSMASKn.V = 0
4880 * All other bits are undefined. For simplification, zero it all.
4881 */
4882 env->mtrr_deftype = 0;
4883 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4884 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4885
4886 env->interrupt_injected = -1;
4887 env->exception_nr = -1;
4888 env->exception_pending = 0;
4889 env->exception_injected = 0;
4890 env->exception_has_payload = false;
4891 env->exception_payload = 0;
4892 env->nmi_injected = false;
4893 #if !defined(CONFIG_USER_ONLY)
4894 /* We hard-wire the BSP to the first CPU. */
4895 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4896
4897 s->halted = !cpu_is_bsp(cpu);
4898
4899 if (kvm_enabled()) {
4900 kvm_arch_reset_vcpu(cpu);
4901 }
4902 else if (hvf_enabled()) {
4903 hvf_reset_vcpu(s);
4904 }
4905 #endif
4906 }
4907
4908 #ifndef CONFIG_USER_ONLY
4909 bool cpu_is_bsp(X86CPU *cpu)
4910 {
4911 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4912 }
4913
4914 /* TODO: remove me, when reset over QOM tree is implemented */
4915 static void x86_cpu_machine_reset_cb(void *opaque)
4916 {
4917 X86CPU *cpu = opaque;
4918 cpu_reset(CPU(cpu));
4919 }
4920 #endif
4921
4922 static void mce_init(X86CPU *cpu)
4923 {
4924 CPUX86State *cenv = &cpu->env;
4925 unsigned int bank;
4926
4927 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4928 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4929 (CPUID_MCE | CPUID_MCA)) {
4930 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4931 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4932 cenv->mcg_ctl = ~(uint64_t)0;
4933 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4934 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4935 }
4936 }
4937 }
4938
4939 #ifndef CONFIG_USER_ONLY
4940 APICCommonClass *apic_get_class(void)
4941 {
4942 const char *apic_type = "apic";
4943
4944 /* TODO: in-kernel irqchip for hvf */
4945 if (kvm_apic_in_kernel()) {
4946 apic_type = "kvm-apic";
4947 } else if (xen_enabled()) {
4948 apic_type = "xen-apic";
4949 }
4950
4951 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4952 }
4953
4954 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4955 {
4956 APICCommonState *apic;
4957 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4958
4959 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4960
4961 object_property_add_child(OBJECT(cpu), "lapic",
4962 OBJECT(cpu->apic_state), &error_abort);
4963 object_unref(OBJECT(cpu->apic_state));
4964
4965 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4966 /* TODO: convert to link<> */
4967 apic = APIC_COMMON(cpu->apic_state);
4968 apic->cpu = cpu;
4969 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4970 }
4971
4972 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4973 {
4974 APICCommonState *apic;
4975 static bool apic_mmio_map_once;
4976
4977 if (cpu->apic_state == NULL) {
4978 return;
4979 }
4980 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4981 errp);
4982
4983 /* Map APIC MMIO area */
4984 apic = APIC_COMMON(cpu->apic_state);
4985 if (!apic_mmio_map_once) {
4986 memory_region_add_subregion_overlap(get_system_memory(),
4987 apic->apicbase &
4988 MSR_IA32_APICBASE_BASE,
4989 &apic->io_memory,
4990 0x1000);
4991 apic_mmio_map_once = true;
4992 }
4993 }
4994
4995 static void x86_cpu_machine_done(Notifier *n, void *unused)
4996 {
4997 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4998 MemoryRegion *smram =
4999 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
5000
5001 if (smram) {
5002 cpu->smram = g_new(MemoryRegion, 1);
5003 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
5004 smram, 0, 1ull << 32);
5005 memory_region_set_enabled(cpu->smram, true);
5006 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
5007 }
5008 }
5009 #else
5010 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
5011 {
5012 }
5013 #endif
5014
5015 /* Note: Only safe for use on x86(-64) hosts */
5016 static uint32_t x86_host_phys_bits(void)
5017 {
5018 uint32_t eax;
5019 uint32_t host_phys_bits;
5020
5021 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
5022 if (eax >= 0x80000008) {
5023 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
5024 /* Note: According to AMD doc 25481 rev 2.34 they have a field
5025 * at 23:16 that can specify a maximum physical address bits for
5026 * the guest that can override this value; but I've not seen
5027 * anything with that set.
5028 */
5029 host_phys_bits = eax & 0xff;
5030 } else {
5031 /* It's an odd 64 bit machine that doesn't have the leaf for
5032 * physical address bits; fall back to 36 that's most older
5033 * Intel.
5034 */
5035 host_phys_bits = 36;
5036 }
5037
5038 return host_phys_bits;
5039 }
5040
5041 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
5042 {
5043 if (*min < value) {
5044 *min = value;
5045 }
5046 }
5047
5048 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
5049 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
5050 {
5051 CPUX86State *env = &cpu->env;
5052 FeatureWordInfo *fi = &feature_word_info[w];
5053 uint32_t eax = fi->cpuid.eax;
5054 uint32_t region = eax & 0xF0000000;
5055
5056 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
5057 if (!env->features[w]) {
5058 return;
5059 }
5060
5061 switch (region) {
5062 case 0x00000000:
5063 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
5064 break;
5065 case 0x80000000:
5066 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
5067 break;
5068 case 0xC0000000:
5069 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
5070 break;
5071 }
5072 }
5073
5074 /* Calculate XSAVE components based on the configured CPU feature flags */
5075 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
5076 {
5077 CPUX86State *env = &cpu->env;
5078 int i;
5079 uint64_t mask;
5080
5081 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5082 return;
5083 }
5084
5085 mask = 0;
5086 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5087 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5088 if (env->features[esa->feature] & esa->bits) {
5089 mask |= (1ULL << i);
5090 }
5091 }
5092
5093 env->features[FEAT_XSAVE_COMP_LO] = mask;
5094 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
5095 }
5096
5097 /***** Steps involved on loading and filtering CPUID data
5098 *
5099 * When initializing and realizing a CPU object, the steps
5100 * involved in setting up CPUID data are:
5101 *
5102 * 1) Loading CPU model definition (X86CPUDefinition). This is
5103 * implemented by x86_cpu_load_def() and should be completely
5104 * transparent, as it is done automatically by instance_init.
5105 * No code should need to look at X86CPUDefinition structs
5106 * outside instance_init.
5107 *
5108 * 2) CPU expansion. This is done by realize before CPUID
5109 * filtering, and will make sure host/accelerator data is
5110 * loaded for CPU models that depend on host capabilities
5111 * (e.g. "host"). Done by x86_cpu_expand_features().
5112 *
5113 * 3) CPUID filtering. This initializes extra data related to
5114 * CPUID, and checks if the host supports all capabilities
5115 * required by the CPU. Runnability of a CPU model is
5116 * determined at this step. Done by x86_cpu_filter_features().
5117 *
5118 * Some operations don't require all steps to be performed.
5119 * More precisely:
5120 *
5121 * - CPU instance creation (instance_init) will run only CPU
5122 * model loading. CPU expansion can't run at instance_init-time
5123 * because host/accelerator data may be not available yet.
5124 * - CPU realization will perform both CPU model expansion and CPUID
5125 * filtering, and return an error in case one of them fails.
5126 * - query-cpu-definitions needs to run all 3 steps. It needs
5127 * to run CPUID filtering, as the 'unavailable-features'
5128 * field is set based on the filtering results.
5129 * - The query-cpu-model-expansion QMP command only needs to run
5130 * CPU model loading and CPU expansion. It should not filter
5131 * any CPUID data based on host capabilities.
5132 */
5133
5134 /* Expand CPU configuration data, based on configured features
5135 * and host/accelerator capabilities when appropriate.
5136 */
5137 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
5138 {
5139 CPUX86State *env = &cpu->env;
5140 FeatureWord w;
5141 GList *l;
5142 Error *local_err = NULL;
5143
5144 /*TODO: Now cpu->max_features doesn't overwrite features
5145 * set using QOM properties, and we can convert
5146 * plus_features & minus_features to global properties
5147 * inside x86_cpu_parse_featurestr() too.
5148 */
5149 if (cpu->max_features) {
5150 for (w = 0; w < FEATURE_WORDS; w++) {
5151 /* Override only features that weren't set explicitly
5152 * by the user.
5153 */
5154 env->features[w] |=
5155 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
5156 ~env->user_features[w] & \
5157 ~feature_word_info[w].no_autoenable_flags;
5158 }
5159 }
5160
5161 for (l = plus_features; l; l = l->next) {
5162 const char *prop = l->data;
5163 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
5164 if (local_err) {
5165 goto out;
5166 }
5167 }
5168
5169 for (l = minus_features; l; l = l->next) {
5170 const char *prop = l->data;
5171 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5172 if (local_err) {
5173 goto out;
5174 }
5175 }
5176
5177 if (!kvm_enabled() || !cpu->expose_kvm) {
5178 env->features[FEAT_KVM] = 0;
5179 }
5180
5181 x86_cpu_enable_xsave_components(cpu);
5182
5183 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5184 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5185 if (cpu->full_cpuid_auto_level) {
5186 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5187 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5188 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5189 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5190 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5191 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5192 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5193 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5194 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5195 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5196 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5197
5198 /* Intel Processor Trace requires CPUID[0x14] */
5199 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5200 kvm_enabled() && cpu->intel_pt_auto_level) {
5201 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5202 }
5203
5204 /* CPU topology with multi-dies support requires CPUID[0x1F] */
5205 if (env->nr_dies > 1) {
5206 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
5207 }
5208
5209 /* SVM requires CPUID[0x8000000A] */
5210 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5211 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5212 }
5213
5214 /* SEV requires CPUID[0x8000001F] */
5215 if (sev_enabled()) {
5216 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5217 }
5218 }
5219
5220 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5221 if (env->cpuid_level == UINT32_MAX) {
5222 env->cpuid_level = env->cpuid_min_level;
5223 }
5224 if (env->cpuid_xlevel == UINT32_MAX) {
5225 env->cpuid_xlevel = env->cpuid_min_xlevel;
5226 }
5227 if (env->cpuid_xlevel2 == UINT32_MAX) {
5228 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5229 }
5230
5231 out:
5232 if (local_err != NULL) {
5233 error_propagate(errp, local_err);
5234 }
5235 }
5236
5237 /*
5238 * Finishes initialization of CPUID data, filters CPU feature
5239 * words based on host availability of each feature.
5240 *
5241 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5242 */
5243 static int x86_cpu_filter_features(X86CPU *cpu)
5244 {
5245 CPUX86State *env = &cpu->env;
5246 FeatureWord w;
5247 int rv = 0;
5248
5249 for (w = 0; w < FEATURE_WORDS; w++) {
5250 uint32_t host_feat =
5251 x86_cpu_get_supported_feature_word(w, false);
5252 uint32_t requested_features = env->features[w];
5253 uint32_t available_features = requested_features & host_feat;
5254 if (!cpu->force_features) {
5255 env->features[w] = available_features;
5256 }
5257 cpu->filtered_features[w] = requested_features & ~available_features;
5258 if (cpu->filtered_features[w]) {
5259 rv = 1;
5260 }
5261 }
5262
5263 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5264 kvm_enabled()) {
5265 KVMState *s = CPU(cpu)->kvm_state;
5266 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5267 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5268 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5269 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5270 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5271
5272 if (!eax_0 ||
5273 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5274 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5275 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5276 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5277 INTEL_PT_ADDR_RANGES_NUM) ||
5278 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5279 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5280 (ecx_0 & INTEL_PT_IP_LIP)) {
5281 /*
5282 * Processor Trace capabilities aren't configurable, so if the
5283 * host can't emulate the capabilities we report on
5284 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5285 */
5286 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5287 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5288 rv = 1;
5289 }
5290 }
5291
5292 return rv;
5293 }
5294
5295 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5296 {
5297 CPUState *cs = CPU(dev);
5298 X86CPU *cpu = X86_CPU(dev);
5299 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5300 CPUX86State *env = &cpu->env;
5301 Error *local_err = NULL;
5302 static bool ht_warned;
5303
5304 if (xcc->host_cpuid_required) {
5305 if (!accel_uses_host_cpuid()) {
5306 char *name = x86_cpu_class_get_model_name(xcc);
5307 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5308 g_free(name);
5309 goto out;
5310 }
5311
5312 if (enable_cpu_pm) {
5313 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5314 &cpu->mwait.ecx, &cpu->mwait.edx);
5315 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5316 }
5317 }
5318
5319 /* mwait extended info: needed for Core compatibility */
5320 /* We always wake on interrupt even if host does not have the capability */
5321 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5322
5323 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5324 error_setg(errp, "apic-id property was not initialized properly");
5325 return;
5326 }
5327
5328 x86_cpu_expand_features(cpu, &local_err);
5329 if (local_err) {
5330 goto out;
5331 }
5332
5333 if (x86_cpu_filter_features(cpu) &&
5334 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5335 x86_cpu_report_filtered_features(cpu);
5336 if (cpu->enforce_cpuid) {
5337 error_setg(&local_err,
5338 accel_uses_host_cpuid() ?
5339 "Host doesn't support requested features" :
5340 "TCG doesn't support requested features");
5341 goto out;
5342 }
5343 }
5344
5345 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5346 * CPUID[1].EDX.
5347 */
5348 if (IS_AMD_CPU(env)) {
5349 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5350 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5351 & CPUID_EXT2_AMD_ALIASES);
5352 }
5353
5354 /* For 64bit systems think about the number of physical bits to present.
5355 * ideally this should be the same as the host; anything other than matching
5356 * the host can cause incorrect guest behaviour.
5357 * QEMU used to pick the magic value of 40 bits that corresponds to
5358 * consumer AMD devices but nothing else.
5359 */
5360 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5361 if (accel_uses_host_cpuid()) {
5362 uint32_t host_phys_bits = x86_host_phys_bits();
5363 static bool warned;
5364
5365 /* Print a warning if the user set it to a value that's not the
5366 * host value.
5367 */
5368 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5369 !warned) {
5370 warn_report("Host physical bits (%u)"
5371 " does not match phys-bits property (%u)",
5372 host_phys_bits, cpu->phys_bits);
5373 warned = true;
5374 }
5375
5376 if (cpu->host_phys_bits) {
5377 /* The user asked for us to use the host physical bits */
5378 cpu->phys_bits = host_phys_bits;
5379 if (cpu->host_phys_bits_limit &&
5380 cpu->phys_bits > cpu->host_phys_bits_limit) {
5381 cpu->phys_bits = cpu->host_phys_bits_limit;
5382 }
5383 }
5384
5385 if (cpu->phys_bits &&
5386 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5387 cpu->phys_bits < 32)) {
5388 error_setg(errp, "phys-bits should be between 32 and %u "
5389 " (but is %u)",
5390 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5391 return;
5392 }
5393 } else {
5394 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5395 error_setg(errp, "TCG only supports phys-bits=%u",
5396 TCG_PHYS_ADDR_BITS);
5397 return;
5398 }
5399 }
5400 /* 0 means it was not explicitly set by the user (or by machine
5401 * compat_props or by the host code above). In this case, the default
5402 * is the value used by TCG (40).
5403 */
5404 if (cpu->phys_bits == 0) {
5405 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5406 }
5407 } else {
5408 /* For 32 bit systems don't use the user set value, but keep
5409 * phys_bits consistent with what we tell the guest.
5410 */
5411 if (cpu->phys_bits != 0) {
5412 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5413 return;
5414 }
5415
5416 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5417 cpu->phys_bits = 36;
5418 } else {
5419 cpu->phys_bits = 32;
5420 }
5421 }
5422
5423 /* Cache information initialization */
5424 if (!cpu->legacy_cache) {
5425 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5426 char *name = x86_cpu_class_get_model_name(xcc);
5427 error_setg(errp,
5428 "CPU model '%s' doesn't support legacy-cache=off", name);
5429 g_free(name);
5430 return;
5431 }
5432 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5433 *xcc->cpu_def->cache_info;
5434 } else {
5435 /* Build legacy cache information */
5436 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5437 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5438 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5439 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5440
5441 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5442 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5443 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5444 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5445
5446 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5447 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5448 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5449 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5450 }
5451
5452
5453 cpu_exec_realizefn(cs, &local_err);
5454 if (local_err != NULL) {
5455 error_propagate(errp, local_err);
5456 return;
5457 }
5458
5459 #ifndef CONFIG_USER_ONLY
5460 MachineState *ms = MACHINE(qdev_get_machine());
5461 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5462
5463 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
5464 x86_cpu_apic_create(cpu, &local_err);
5465 if (local_err != NULL) {
5466 goto out;
5467 }
5468 }
5469 #endif
5470
5471 mce_init(cpu);
5472
5473 #ifndef CONFIG_USER_ONLY
5474 if (tcg_enabled()) {
5475 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5476 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5477
5478 /* Outer container... */
5479 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5480 memory_region_set_enabled(cpu->cpu_as_root, true);
5481
5482 /* ... with two regions inside: normal system memory with low
5483 * priority, and...
5484 */
5485 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5486 get_system_memory(), 0, ~0ull);
5487 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5488 memory_region_set_enabled(cpu->cpu_as_mem, true);
5489
5490 cs->num_ases = 2;
5491 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5492 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5493
5494 /* ... SMRAM with higher priority, linked from /machine/smram. */
5495 cpu->machine_done.notify = x86_cpu_machine_done;
5496 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5497 }
5498 #endif
5499
5500 qemu_init_vcpu(cs);
5501
5502 /*
5503 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5504 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5505 * based on inputs (sockets,cores,threads), it is still better to give
5506 * users a warning.
5507 *
5508 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5509 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5510 */
5511 if (IS_AMD_CPU(env) &&
5512 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5513 cs->nr_threads > 1 && !ht_warned) {
5514 warn_report("This family of AMD CPU doesn't support "
5515 "hyperthreading(%d)",
5516 cs->nr_threads);
5517 error_printf("Please configure -smp options properly"
5518 " or try enabling topoext feature.\n");
5519 ht_warned = true;
5520 }
5521
5522 x86_cpu_apic_realize(cpu, &local_err);
5523 if (local_err != NULL) {
5524 goto out;
5525 }
5526 cpu_reset(cs);
5527
5528 xcc->parent_realize(dev, &local_err);
5529
5530 out:
5531 if (local_err != NULL) {
5532 error_propagate(errp, local_err);
5533 return;
5534 }
5535 }
5536
5537 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5538 {
5539 X86CPU *cpu = X86_CPU(dev);
5540 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5541 Error *local_err = NULL;
5542
5543 #ifndef CONFIG_USER_ONLY
5544 cpu_remove_sync(CPU(dev));
5545 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5546 #endif
5547
5548 if (cpu->apic_state) {
5549 object_unparent(OBJECT(cpu->apic_state));
5550 cpu->apic_state = NULL;
5551 }
5552
5553 xcc->parent_unrealize(dev, &local_err);
5554 if (local_err != NULL) {
5555 error_propagate(errp, local_err);
5556 return;
5557 }
5558 }
5559
5560 typedef struct BitProperty {
5561 FeatureWord w;
5562 uint32_t mask;
5563 } BitProperty;
5564
5565 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5566 void *opaque, Error **errp)
5567 {
5568 X86CPU *cpu = X86_CPU(obj);
5569 BitProperty *fp = opaque;
5570 uint32_t f = cpu->env.features[fp->w];
5571 bool value = (f & fp->mask) == fp->mask;
5572 visit_type_bool(v, name, &value, errp);
5573 }
5574
5575 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5576 void *opaque, Error **errp)
5577 {
5578 DeviceState *dev = DEVICE(obj);
5579 X86CPU *cpu = X86_CPU(obj);
5580 BitProperty *fp = opaque;
5581 Error *local_err = NULL;
5582 bool value;
5583
5584 if (dev->realized) {
5585 qdev_prop_set_after_realize(dev, name, errp);
5586 return;
5587 }
5588
5589 visit_type_bool(v, name, &value, &local_err);
5590 if (local_err) {
5591 error_propagate(errp, local_err);
5592 return;
5593 }
5594
5595 if (value) {
5596 cpu->env.features[fp->w] |= fp->mask;
5597 } else {
5598 cpu->env.features[fp->w] &= ~fp->mask;
5599 }
5600 cpu->env.user_features[fp->w] |= fp->mask;
5601 }
5602
5603 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5604 void *opaque)
5605 {
5606 BitProperty *prop = opaque;
5607 g_free(prop);
5608 }
5609
5610 /* Register a boolean property to get/set a single bit in a uint32_t field.
5611 *
5612 * The same property name can be registered multiple times to make it affect
5613 * multiple bits in the same FeatureWord. In that case, the getter will return
5614 * true only if all bits are set.
5615 */
5616 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5617 const char *prop_name,
5618 FeatureWord w,
5619 int bitnr)
5620 {
5621 BitProperty *fp;
5622 ObjectProperty *op;
5623 uint32_t mask = (1UL << bitnr);
5624
5625 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5626 if (op) {
5627 fp = op->opaque;
5628 assert(fp->w == w);
5629 fp->mask |= mask;
5630 } else {
5631 fp = g_new0(BitProperty, 1);
5632 fp->w = w;
5633 fp->mask = mask;
5634 object_property_add(OBJECT(cpu), prop_name, "bool",
5635 x86_cpu_get_bit_prop,
5636 x86_cpu_set_bit_prop,
5637 x86_cpu_release_bit_prop, fp, &error_abort);
5638 }
5639 }
5640
5641 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5642 FeatureWord w,
5643 int bitnr)
5644 {
5645 FeatureWordInfo *fi = &feature_word_info[w];
5646 const char *name = fi->feat_names[bitnr];
5647
5648 if (!name) {
5649 return;
5650 }
5651
5652 /* Property names should use "-" instead of "_".
5653 * Old names containing underscores are registered as aliases
5654 * using object_property_add_alias()
5655 */
5656 assert(!strchr(name, '_'));
5657 /* aliases don't use "|" delimiters anymore, they are registered
5658 * manually using object_property_add_alias() */
5659 assert(!strchr(name, '|'));
5660 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5661 }
5662
5663 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5664 {
5665 X86CPU *cpu = X86_CPU(cs);
5666 CPUX86State *env = &cpu->env;
5667 GuestPanicInformation *panic_info = NULL;
5668
5669 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5670 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5671
5672 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5673
5674 assert(HV_CRASH_PARAMS >= 5);
5675 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5676 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5677 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5678 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5679 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5680 }
5681
5682 return panic_info;
5683 }
5684 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5685 const char *name, void *opaque,
5686 Error **errp)
5687 {
5688 CPUState *cs = CPU(obj);
5689 GuestPanicInformation *panic_info;
5690
5691 if (!cs->crash_occurred) {
5692 error_setg(errp, "No crash occured");
5693 return;
5694 }
5695
5696 panic_info = x86_cpu_get_crash_info(cs);
5697 if (panic_info == NULL) {
5698 error_setg(errp, "No crash information");
5699 return;
5700 }
5701
5702 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5703 errp);
5704 qapi_free_GuestPanicInformation(panic_info);
5705 }
5706
5707 static void x86_cpu_initfn(Object *obj)
5708 {
5709 X86CPU *cpu = X86_CPU(obj);
5710 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5711 CPUX86State *env = &cpu->env;
5712 FeatureWord w;
5713
5714 env->nr_dies = 1;
5715 cpu_set_cpustate_pointers(cpu);
5716
5717 object_property_add(obj, "family", "int",
5718 x86_cpuid_version_get_family,
5719 x86_cpuid_version_set_family, NULL, NULL, NULL);
5720 object_property_add(obj, "model", "int",
5721 x86_cpuid_version_get_model,
5722 x86_cpuid_version_set_model, NULL, NULL, NULL);
5723 object_property_add(obj, "stepping", "int",
5724 x86_cpuid_version_get_stepping,
5725 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5726 object_property_add_str(obj, "vendor",
5727 x86_cpuid_get_vendor,
5728 x86_cpuid_set_vendor, NULL);
5729 object_property_add_str(obj, "model-id",
5730 x86_cpuid_get_model_id,
5731 x86_cpuid_set_model_id, NULL);
5732 object_property_add(obj, "tsc-frequency", "int",
5733 x86_cpuid_get_tsc_freq,
5734 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5735 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5736 x86_cpu_get_feature_words,
5737 NULL, NULL, (void *)env->features, NULL);
5738 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5739 x86_cpu_get_feature_words,
5740 NULL, NULL, (void *)cpu->filtered_features, NULL);
5741 /*
5742 * The "unavailable-features" property has the same semantics as
5743 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
5744 * QMP command: they list the features that would have prevented the
5745 * CPU from running if the "enforce" flag was set.
5746 */
5747 object_property_add(obj, "unavailable-features", "strList",
5748 x86_cpu_get_unavailable_features,
5749 NULL, NULL, NULL, &error_abort);
5750
5751 object_property_add(obj, "crash-information", "GuestPanicInformation",
5752 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5753
5754 for (w = 0; w < FEATURE_WORDS; w++) {
5755 int bitnr;
5756
5757 for (bitnr = 0; bitnr < 32; bitnr++) {
5758 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5759 }
5760 }
5761
5762 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5763 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5764 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5765 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5766 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5767 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5768 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5769
5770 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5771 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5772 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5773 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5774 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5775 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5776 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5777 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5778 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5779 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5780 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5781 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5782 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5783 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5784 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5785 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5786 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5787 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5788 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5789 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5790 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5791
5792 if (xcc->cpu_def) {
5793 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5794 }
5795 }
5796
5797 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5798 {
5799 X86CPU *cpu = X86_CPU(cs);
5800
5801 return cpu->apic_id;
5802 }
5803
5804 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5805 {
5806 X86CPU *cpu = X86_CPU(cs);
5807
5808 return cpu->env.cr[0] & CR0_PG_MASK;
5809 }
5810
5811 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5812 {
5813 X86CPU *cpu = X86_CPU(cs);
5814
5815 cpu->env.eip = value;
5816 }
5817
5818 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5819 {
5820 X86CPU *cpu = X86_CPU(cs);
5821
5822 cpu->env.eip = tb->pc - tb->cs_base;
5823 }
5824
5825 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5826 {
5827 X86CPU *cpu = X86_CPU(cs);
5828 CPUX86State *env = &cpu->env;
5829
5830 #if !defined(CONFIG_USER_ONLY)
5831 if (interrupt_request & CPU_INTERRUPT_POLL) {
5832 return CPU_INTERRUPT_POLL;
5833 }
5834 #endif
5835 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5836 return CPU_INTERRUPT_SIPI;
5837 }
5838
5839 if (env->hflags2 & HF2_GIF_MASK) {
5840 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5841 !(env->hflags & HF_SMM_MASK)) {
5842 return CPU_INTERRUPT_SMI;
5843 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5844 !(env->hflags2 & HF2_NMI_MASK)) {
5845 return CPU_INTERRUPT_NMI;
5846 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5847 return CPU_INTERRUPT_MCE;
5848 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5849 (((env->hflags2 & HF2_VINTR_MASK) &&
5850 (env->hflags2 & HF2_HIF_MASK)) ||
5851 (!(env->hflags2 & HF2_VINTR_MASK) &&
5852 (env->eflags & IF_MASK &&
5853 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5854 return CPU_INTERRUPT_HARD;
5855 #if !defined(CONFIG_USER_ONLY)
5856 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5857 (env->eflags & IF_MASK) &&
5858 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5859 return CPU_INTERRUPT_VIRQ;
5860 #endif
5861 }
5862 }
5863
5864 return 0;
5865 }
5866
5867 static bool x86_cpu_has_work(CPUState *cs)
5868 {
5869 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5870 }
5871
5872 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5873 {
5874 X86CPU *cpu = X86_CPU(cs);
5875 CPUX86State *env = &cpu->env;
5876
5877 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5878 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5879 : bfd_mach_i386_i8086);
5880 info->print_insn = print_insn_i386;
5881
5882 info->cap_arch = CS_ARCH_X86;
5883 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5884 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5885 : CS_MODE_16);
5886 info->cap_insn_unit = 1;
5887 info->cap_insn_split = 8;
5888 }
5889
5890 void x86_update_hflags(CPUX86State *env)
5891 {
5892 uint32_t hflags;
5893 #define HFLAG_COPY_MASK \
5894 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5895 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5896 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5897 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5898
5899 hflags = env->hflags & HFLAG_COPY_MASK;
5900 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5901 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5902 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5903 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5904 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5905
5906 if (env->cr[4] & CR4_OSFXSR_MASK) {
5907 hflags |= HF_OSFXSR_MASK;
5908 }
5909
5910 if (env->efer & MSR_EFER_LMA) {
5911 hflags |= HF_LMA_MASK;
5912 }
5913
5914 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5915 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5916 } else {
5917 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5918 (DESC_B_SHIFT - HF_CS32_SHIFT);
5919 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5920 (DESC_B_SHIFT - HF_SS32_SHIFT);
5921 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5922 !(hflags & HF_CS32_MASK)) {
5923 hflags |= HF_ADDSEG_MASK;
5924 } else {
5925 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5926 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5927 }
5928 }
5929 env->hflags = hflags;
5930 }
5931
5932 static Property x86_cpu_properties[] = {
5933 #ifdef CONFIG_USER_ONLY
5934 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5935 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5936 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5937 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5938 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
5939 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5940 #else
5941 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5942 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5943 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5944 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
5945 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5946 #endif
5947 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5948 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5949
5950 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
5951 HYPERV_SPINLOCK_NEVER_RETRY),
5952 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
5953 HYPERV_FEAT_RELAXED, 0),
5954 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
5955 HYPERV_FEAT_VAPIC, 0),
5956 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
5957 HYPERV_FEAT_TIME, 0),
5958 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
5959 HYPERV_FEAT_CRASH, 0),
5960 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
5961 HYPERV_FEAT_RESET, 0),
5962 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
5963 HYPERV_FEAT_VPINDEX, 0),
5964 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
5965 HYPERV_FEAT_RUNTIME, 0),
5966 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
5967 HYPERV_FEAT_SYNIC, 0),
5968 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
5969 HYPERV_FEAT_STIMER, 0),
5970 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
5971 HYPERV_FEAT_FREQUENCIES, 0),
5972 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
5973 HYPERV_FEAT_REENLIGHTENMENT, 0),
5974 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
5975 HYPERV_FEAT_TLBFLUSH, 0),
5976 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
5977 HYPERV_FEAT_EVMCS, 0),
5978 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
5979 HYPERV_FEAT_IPI, 0),
5980 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
5981 HYPERV_FEAT_STIMER_DIRECT, 0),
5982 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
5983
5984 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5985 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5986 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
5987 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5988 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5989 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5990 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
5991 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5992 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5993 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5994 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5995 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5996 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5997 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5998 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5999 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
6000 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
6001 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
6002 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
6003 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
6004 false),
6005 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
6006 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
6007 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
6008 true),
6009 /*
6010 * lecacy_cache defaults to true unless the CPU model provides its
6011 * own cache information (see x86_cpu_load_def()).
6012 */
6013 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
6014
6015 /*
6016 * From "Requirements for Implementing the Microsoft
6017 * Hypervisor Interface":
6018 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
6019 *
6020 * "Starting with Windows Server 2012 and Windows 8, if
6021 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
6022 * the hypervisor imposes no specific limit to the number of VPs.
6023 * In this case, Windows Server 2012 guest VMs may use more than
6024 * 64 VPs, up to the maximum supported number of processors applicable
6025 * to the specific Windows version being used."
6026 */
6027 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
6028 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
6029 false),
6030 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
6031 true),
6032 DEFINE_PROP_END_OF_LIST()
6033 };
6034
6035 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
6036 {
6037 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6038 CPUClass *cc = CPU_CLASS(oc);
6039 DeviceClass *dc = DEVICE_CLASS(oc);
6040
6041 device_class_set_parent_realize(dc, x86_cpu_realizefn,
6042 &xcc->parent_realize);
6043 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
6044 &xcc->parent_unrealize);
6045 dc->props = x86_cpu_properties;
6046
6047 xcc->parent_reset = cc->reset;
6048 cc->reset = x86_cpu_reset;
6049 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
6050
6051 cc->class_by_name = x86_cpu_class_by_name;
6052 cc->parse_features = x86_cpu_parse_featurestr;
6053 cc->has_work = x86_cpu_has_work;
6054 #ifdef CONFIG_TCG
6055 cc->do_interrupt = x86_cpu_do_interrupt;
6056 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
6057 #endif
6058 cc->dump_state = x86_cpu_dump_state;
6059 cc->get_crash_info = x86_cpu_get_crash_info;
6060 cc->set_pc = x86_cpu_set_pc;
6061 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
6062 cc->gdb_read_register = x86_cpu_gdb_read_register;
6063 cc->gdb_write_register = x86_cpu_gdb_write_register;
6064 cc->get_arch_id = x86_cpu_get_arch_id;
6065 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
6066 #ifndef CONFIG_USER_ONLY
6067 cc->asidx_from_attrs = x86_asidx_from_attrs;
6068 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
6069 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
6070 cc->write_elf64_note = x86_cpu_write_elf64_note;
6071 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
6072 cc->write_elf32_note = x86_cpu_write_elf32_note;
6073 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
6074 cc->vmsd = &vmstate_x86_cpu;
6075 #endif
6076 cc->gdb_arch_name = x86_gdb_arch_name;
6077 #ifdef TARGET_X86_64
6078 cc->gdb_core_xml_file = "i386-64bit.xml";
6079 cc->gdb_num_core_regs = 66;
6080 #else
6081 cc->gdb_core_xml_file = "i386-32bit.xml";
6082 cc->gdb_num_core_regs = 50;
6083 #endif
6084 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
6085 cc->debug_excp_handler = breakpoint_handler;
6086 #endif
6087 cc->cpu_exec_enter = x86_cpu_exec_enter;
6088 cc->cpu_exec_exit = x86_cpu_exec_exit;
6089 #ifdef CONFIG_TCG
6090 cc->tcg_initialize = tcg_x86_init;
6091 cc->tlb_fill = x86_cpu_tlb_fill;
6092 #endif
6093 cc->disas_set_info = x86_disas_set_info;
6094
6095 dc->user_creatable = true;
6096 }
6097
6098 static const TypeInfo x86_cpu_type_info = {
6099 .name = TYPE_X86_CPU,
6100 .parent = TYPE_CPU,
6101 .instance_size = sizeof(X86CPU),
6102 .instance_init = x86_cpu_initfn,
6103 .abstract = true,
6104 .class_size = sizeof(X86CPUClass),
6105 .class_init = x86_cpu_common_class_init,
6106 };
6107
6108
6109 /* "base" CPU model, used by query-cpu-model-expansion */
6110 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
6111 {
6112 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6113
6114 xcc->static_model = true;
6115 xcc->migration_safe = true;
6116 xcc->model_description = "base CPU model type with no features enabled";
6117 xcc->ordering = 8;
6118 }
6119
6120 static const TypeInfo x86_base_cpu_type_info = {
6121 .name = X86_CPU_TYPE_NAME("base"),
6122 .parent = TYPE_X86_CPU,
6123 .class_init = x86_cpu_base_class_init,
6124 };
6125
6126 static void x86_cpu_register_types(void)
6127 {
6128 int i;
6129
6130 type_register_static(&x86_cpu_type_info);
6131 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
6132 x86_register_cpudef_type(&builtin_x86_defs[i]);
6133 }
6134 type_register_static(&max_x86_cpu_type_info);
6135 type_register_static(&x86_base_cpu_type_info);
6136 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
6137 type_register_static(&host_x86_cpu_type_info);
6138 #endif
6139 }
6140
6141 type_init(x86_cpu_register_types)