]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
a4bc04ed1dc33309a0af9b9ceaafe190647bee2b
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/hvf.h"
30 #include "sysemu/cpus.h"
31 #include "kvm_i386.h"
32 #include "sev_i386.h"
33
34 #include "qemu/error-report.h"
35 #include "qemu/option.h"
36 #include "qemu/config-file.h"
37 #include "qapi/error.h"
38 #include "qapi/qapi-visit-misc.h"
39 #include "qapi/qapi-visit-run-state.h"
40 #include "qapi/qmp/qdict.h"
41 #include "qapi/qmp/qerror.h"
42 #include "qapi/visitor.h"
43 #include "qom/qom-qobject.h"
44 #include "sysemu/arch_init.h"
45 #include "qapi/qapi-commands-target.h"
46
47 #include "standard-headers/asm-x86/kvm_para.h"
48
49 #include "sysemu/sysemu.h"
50 #include "sysemu/tcg.h"
51 #include "hw/qdev-properties.h"
52 #include "hw/i386/topology.h"
53 #ifndef CONFIG_USER_ONLY
54 #include "exec/address-spaces.h"
55 #include "hw/hw.h"
56 #include "hw/xen/xen.h"
57 #include "hw/i386/apic_internal.h"
58 #endif
59
60 #include "disas/capstone.h"
61
62 /* Helpers for building CPUID[2] descriptors: */
63
64 struct CPUID2CacheDescriptorInfo {
65 enum CacheType type;
66 int level;
67 int size;
68 int line_size;
69 int associativity;
70 };
71
72 /*
73 * Known CPUID 2 cache descriptors.
74 * From Intel SDM Volume 2A, CPUID instruction
75 */
76 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
77 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
82 .associativity = 4, .line_size = 64, },
83 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
84 .associativity = 2, .line_size = 32, },
85 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 32, },
87 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 64, },
89 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
90 .associativity = 6, .line_size = 64, },
91 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
92 .associativity = 2, .line_size = 64, },
93 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
94 .associativity = 8, .line_size = 64, },
95 /* lines per sector is not supported cpuid2_cache_descriptor(),
96 * so descriptors 0x22, 0x23 are not included
97 */
98 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
99 .associativity = 16, .line_size = 64, },
100 /* lines per sector is not supported cpuid2_cache_descriptor(),
101 * so descriptors 0x25, 0x20 are not included
102 */
103 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
118 .associativity = 4, .line_size = 64, },
119 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
120 .associativity = 8, .line_size = 64, },
121 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
122 .associativity = 12, .line_size = 64, },
123 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
124 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
125 .associativity = 12, .line_size = 64, },
126 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
127 .associativity = 16, .line_size = 64, },
128 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
129 .associativity = 12, .line_size = 64, },
130 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
131 .associativity = 16, .line_size = 64, },
132 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
133 .associativity = 24, .line_size = 64, },
134 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
135 .associativity = 8, .line_size = 64, },
136 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
143 .associativity = 4, .line_size = 64, },
144 /* lines per sector is not supported cpuid2_cache_descriptor(),
145 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
146 */
147 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
148 .associativity = 8, .line_size = 64, },
149 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 2, .line_size = 64, },
151 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 8, .line_size = 64, },
153 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
162 .associativity = 4, .line_size = 64, },
163 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
164 .associativity = 8, .line_size = 64, },
165 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
194 .associativity = 24, .line_size = 64, },
195 };
196
197 /*
198 * "CPUID leaf 2 does not report cache descriptor information,
199 * use CPUID leaf 4 to query cache parameters"
200 */
201 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
202
203 /*
204 * Return a CPUID 2 cache descriptor for a given cache.
205 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
206 */
207 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
208 {
209 int i;
210
211 assert(cache->size > 0);
212 assert(cache->level > 0);
213 assert(cache->line_size > 0);
214 assert(cache->associativity > 0);
215 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
216 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
217 if (d->level == cache->level && d->type == cache->type &&
218 d->size == cache->size && d->line_size == cache->line_size &&
219 d->associativity == cache->associativity) {
220 return i;
221 }
222 }
223
224 return CACHE_DESCRIPTOR_UNAVAILABLE;
225 }
226
227 /* CPUID Leaf 4 constants: */
228
229 /* EAX: */
230 #define CACHE_TYPE_D 1
231 #define CACHE_TYPE_I 2
232 #define CACHE_TYPE_UNIFIED 3
233
234 #define CACHE_LEVEL(l) (l << 5)
235
236 #define CACHE_SELF_INIT_LEVEL (1 << 8)
237
238 /* EDX: */
239 #define CACHE_NO_INVD_SHARING (1 << 0)
240 #define CACHE_INCLUSIVE (1 << 1)
241 #define CACHE_COMPLEX_IDX (1 << 2)
242
243 /* Encode CacheType for CPUID[4].EAX */
244 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
245 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
246 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
247 0 /* Invalid value */)
248
249
250 /* Encode cache info for CPUID[4] */
251 static void encode_cache_cpuid4(CPUCacheInfo *cache,
252 int num_apic_ids, int num_cores,
253 uint32_t *eax, uint32_t *ebx,
254 uint32_t *ecx, uint32_t *edx)
255 {
256 assert(cache->size == cache->line_size * cache->associativity *
257 cache->partitions * cache->sets);
258
259 assert(num_apic_ids > 0);
260 *eax = CACHE_TYPE(cache->type) |
261 CACHE_LEVEL(cache->level) |
262 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
263 ((num_cores - 1) << 26) |
264 ((num_apic_ids - 1) << 14);
265
266 assert(cache->line_size > 0);
267 assert(cache->partitions > 0);
268 assert(cache->associativity > 0);
269 /* We don't implement fully-associative caches */
270 assert(cache->associativity < cache->sets);
271 *ebx = (cache->line_size - 1) |
272 ((cache->partitions - 1) << 12) |
273 ((cache->associativity - 1) << 22);
274
275 assert(cache->sets > 0);
276 *ecx = cache->sets - 1;
277
278 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
279 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
280 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
281 }
282
283 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
284 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
285 {
286 assert(cache->size % 1024 == 0);
287 assert(cache->lines_per_tag > 0);
288 assert(cache->associativity > 0);
289 assert(cache->line_size > 0);
290 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
291 (cache->lines_per_tag << 8) | (cache->line_size);
292 }
293
294 #define ASSOC_FULL 0xFF
295
296 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
297 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
298 a == 2 ? 0x2 : \
299 a == 4 ? 0x4 : \
300 a == 8 ? 0x6 : \
301 a == 16 ? 0x8 : \
302 a == 32 ? 0xA : \
303 a == 48 ? 0xB : \
304 a == 64 ? 0xC : \
305 a == 96 ? 0xD : \
306 a == 128 ? 0xE : \
307 a == ASSOC_FULL ? 0xF : \
308 0 /* invalid value */)
309
310 /*
311 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
312 * @l3 can be NULL.
313 */
314 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
315 CPUCacheInfo *l3,
316 uint32_t *ecx, uint32_t *edx)
317 {
318 assert(l2->size % 1024 == 0);
319 assert(l2->associativity > 0);
320 assert(l2->lines_per_tag > 0);
321 assert(l2->line_size > 0);
322 *ecx = ((l2->size / 1024) << 16) |
323 (AMD_ENC_ASSOC(l2->associativity) << 12) |
324 (l2->lines_per_tag << 8) | (l2->line_size);
325
326 if (l3) {
327 assert(l3->size % (512 * 1024) == 0);
328 assert(l3->associativity > 0);
329 assert(l3->lines_per_tag > 0);
330 assert(l3->line_size > 0);
331 *edx = ((l3->size / (512 * 1024)) << 18) |
332 (AMD_ENC_ASSOC(l3->associativity) << 12) |
333 (l3->lines_per_tag << 8) | (l3->line_size);
334 } else {
335 *edx = 0;
336 }
337 }
338
339 /*
340 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
341 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
342 * Define the constants to build the cpu topology. Right now, TOPOEXT
343 * feature is enabled only on EPYC. So, these constants are based on
344 * EPYC supported configurations. We may need to handle the cases if
345 * these values change in future.
346 */
347 /* Maximum core complexes in a node */
348 #define MAX_CCX 2
349 /* Maximum cores in a core complex */
350 #define MAX_CORES_IN_CCX 4
351 /* Maximum cores in a node */
352 #define MAX_CORES_IN_NODE 8
353 /* Maximum nodes in a socket */
354 #define MAX_NODES_PER_SOCKET 4
355
356 /*
357 * Figure out the number of nodes required to build this config.
358 * Max cores in a node is 8
359 */
360 static int nodes_in_socket(int nr_cores)
361 {
362 int nodes;
363
364 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
365
366 /* Hardware does not support config with 3 nodes, return 4 in that case */
367 return (nodes == 3) ? 4 : nodes;
368 }
369
370 /*
371 * Decide the number of cores in a core complex with the given nr_cores using
372 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
373 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
374 * L3 cache is shared across all cores in a core complex. So, this will also
375 * tell us how many cores are sharing the L3 cache.
376 */
377 static int cores_in_core_complex(int nr_cores)
378 {
379 int nodes;
380
381 /* Check if we can fit all the cores in one core complex */
382 if (nr_cores <= MAX_CORES_IN_CCX) {
383 return nr_cores;
384 }
385 /* Get the number of nodes required to build this config */
386 nodes = nodes_in_socket(nr_cores);
387
388 /*
389 * Divide the cores accros all the core complexes
390 * Return rounded up value
391 */
392 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
393 }
394
395 /* Encode cache info for CPUID[8000001D] */
396 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
397 uint32_t *eax, uint32_t *ebx,
398 uint32_t *ecx, uint32_t *edx)
399 {
400 uint32_t l3_cores;
401 assert(cache->size == cache->line_size * cache->associativity *
402 cache->partitions * cache->sets);
403
404 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
405 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
406
407 /* L3 is shared among multiple cores */
408 if (cache->level == 3) {
409 l3_cores = cores_in_core_complex(cs->nr_cores);
410 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
411 } else {
412 *eax |= ((cs->nr_threads - 1) << 14);
413 }
414
415 assert(cache->line_size > 0);
416 assert(cache->partitions > 0);
417 assert(cache->associativity > 0);
418 /* We don't implement fully-associative caches */
419 assert(cache->associativity < cache->sets);
420 *ebx = (cache->line_size - 1) |
421 ((cache->partitions - 1) << 12) |
422 ((cache->associativity - 1) << 22);
423
424 assert(cache->sets > 0);
425 *ecx = cache->sets - 1;
426
427 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
428 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
429 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
430 }
431
432 /* Data structure to hold the configuration info for a given core index */
433 struct core_topology {
434 /* core complex id of the current core index */
435 int ccx_id;
436 /*
437 * Adjusted core index for this core in the topology
438 * This can be 0,1,2,3 with max 4 cores in a core complex
439 */
440 int core_id;
441 /* Node id for this core index */
442 int node_id;
443 /* Number of nodes in this config */
444 int num_nodes;
445 };
446
447 /*
448 * Build the configuration closely match the EPYC hardware. Using the EPYC
449 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
450 * right now. This could change in future.
451 * nr_cores : Total number of cores in the config
452 * core_id : Core index of the current CPU
453 * topo : Data structure to hold all the config info for this core index
454 */
455 static void build_core_topology(int nr_cores, int core_id,
456 struct core_topology *topo)
457 {
458 int nodes, cores_in_ccx;
459
460 /* First get the number of nodes required */
461 nodes = nodes_in_socket(nr_cores);
462
463 cores_in_ccx = cores_in_core_complex(nr_cores);
464
465 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
466 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
467 topo->core_id = core_id % cores_in_ccx;
468 topo->num_nodes = nodes;
469 }
470
471 /* Encode cache info for CPUID[8000001E] */
472 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
473 uint32_t *eax, uint32_t *ebx,
474 uint32_t *ecx, uint32_t *edx)
475 {
476 struct core_topology topo = {0};
477 unsigned long nodes;
478 int shift;
479
480 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
481 *eax = cpu->apic_id;
482 /*
483 * CPUID_Fn8000001E_EBX
484 * 31:16 Reserved
485 * 15:8 Threads per core (The number of threads per core is
486 * Threads per core + 1)
487 * 7:0 Core id (see bit decoding below)
488 * SMT:
489 * 4:3 node id
490 * 2 Core complex id
491 * 1:0 Core id
492 * Non SMT:
493 * 5:4 node id
494 * 3 Core complex id
495 * 1:0 Core id
496 */
497 if (cs->nr_threads - 1) {
498 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
499 (topo.ccx_id << 2) | topo.core_id;
500 } else {
501 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
502 }
503 /*
504 * CPUID_Fn8000001E_ECX
505 * 31:11 Reserved
506 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
507 * 7:0 Node id (see bit decoding below)
508 * 2 Socket id
509 * 1:0 Node id
510 */
511 if (topo.num_nodes <= 4) {
512 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
513 topo.node_id;
514 } else {
515 /*
516 * Node id fix up. Actual hardware supports up to 4 nodes. But with
517 * more than 32 cores, we may end up with more than 4 nodes.
518 * Node id is a combination of socket id and node id. Only requirement
519 * here is that this number should be unique accross the system.
520 * Shift the socket id to accommodate more nodes. We dont expect both
521 * socket id and node id to be big number at the same time. This is not
522 * an ideal config but we need to to support it. Max nodes we can have
523 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
524 * 5 bits for nodes. Find the left most set bit to represent the total
525 * number of nodes. find_last_bit returns last set bit(0 based). Left
526 * shift(+1) the socket id to represent all the nodes.
527 */
528 nodes = topo.num_nodes - 1;
529 shift = find_last_bit(&nodes, 8);
530 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
531 topo.node_id;
532 }
533 *edx = 0;
534 }
535
536 /*
537 * Definitions of the hardcoded cache entries we expose:
538 * These are legacy cache values. If there is a need to change any
539 * of these values please use builtin_x86_defs
540 */
541
542 /* L1 data cache: */
543 static CPUCacheInfo legacy_l1d_cache = {
544 .type = DATA_CACHE,
545 .level = 1,
546 .size = 32 * KiB,
547 .self_init = 1,
548 .line_size = 64,
549 .associativity = 8,
550 .sets = 64,
551 .partitions = 1,
552 .no_invd_sharing = true,
553 };
554
555 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
556 static CPUCacheInfo legacy_l1d_cache_amd = {
557 .type = DATA_CACHE,
558 .level = 1,
559 .size = 64 * KiB,
560 .self_init = 1,
561 .line_size = 64,
562 .associativity = 2,
563 .sets = 512,
564 .partitions = 1,
565 .lines_per_tag = 1,
566 .no_invd_sharing = true,
567 };
568
569 /* L1 instruction cache: */
570 static CPUCacheInfo legacy_l1i_cache = {
571 .type = INSTRUCTION_CACHE,
572 .level = 1,
573 .size = 32 * KiB,
574 .self_init = 1,
575 .line_size = 64,
576 .associativity = 8,
577 .sets = 64,
578 .partitions = 1,
579 .no_invd_sharing = true,
580 };
581
582 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
583 static CPUCacheInfo legacy_l1i_cache_amd = {
584 .type = INSTRUCTION_CACHE,
585 .level = 1,
586 .size = 64 * KiB,
587 .self_init = 1,
588 .line_size = 64,
589 .associativity = 2,
590 .sets = 512,
591 .partitions = 1,
592 .lines_per_tag = 1,
593 .no_invd_sharing = true,
594 };
595
596 /* Level 2 unified cache: */
597 static CPUCacheInfo legacy_l2_cache = {
598 .type = UNIFIED_CACHE,
599 .level = 2,
600 .size = 4 * MiB,
601 .self_init = 1,
602 .line_size = 64,
603 .associativity = 16,
604 .sets = 4096,
605 .partitions = 1,
606 .no_invd_sharing = true,
607 };
608
609 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
610 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
611 .type = UNIFIED_CACHE,
612 .level = 2,
613 .size = 2 * MiB,
614 .line_size = 64,
615 .associativity = 8,
616 };
617
618
619 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
620 static CPUCacheInfo legacy_l2_cache_amd = {
621 .type = UNIFIED_CACHE,
622 .level = 2,
623 .size = 512 * KiB,
624 .line_size = 64,
625 .lines_per_tag = 1,
626 .associativity = 16,
627 .sets = 512,
628 .partitions = 1,
629 };
630
631 /* Level 3 unified cache: */
632 static CPUCacheInfo legacy_l3_cache = {
633 .type = UNIFIED_CACHE,
634 .level = 3,
635 .size = 16 * MiB,
636 .line_size = 64,
637 .associativity = 16,
638 .sets = 16384,
639 .partitions = 1,
640 .lines_per_tag = 1,
641 .self_init = true,
642 .inclusive = true,
643 .complex_indexing = true,
644 };
645
646 /* TLB definitions: */
647
648 #define L1_DTLB_2M_ASSOC 1
649 #define L1_DTLB_2M_ENTRIES 255
650 #define L1_DTLB_4K_ASSOC 1
651 #define L1_DTLB_4K_ENTRIES 255
652
653 #define L1_ITLB_2M_ASSOC 1
654 #define L1_ITLB_2M_ENTRIES 255
655 #define L1_ITLB_4K_ASSOC 1
656 #define L1_ITLB_4K_ENTRIES 255
657
658 #define L2_DTLB_2M_ASSOC 0 /* disabled */
659 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
660 #define L2_DTLB_4K_ASSOC 4
661 #define L2_DTLB_4K_ENTRIES 512
662
663 #define L2_ITLB_2M_ASSOC 0 /* disabled */
664 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
665 #define L2_ITLB_4K_ASSOC 4
666 #define L2_ITLB_4K_ENTRIES 512
667
668 /* CPUID Leaf 0x14 constants: */
669 #define INTEL_PT_MAX_SUBLEAF 0x1
670 /*
671 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
672 * MSR can be accessed;
673 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
674 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
675 * of Intel PT MSRs across warm reset;
676 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
677 */
678 #define INTEL_PT_MINIMAL_EBX 0xf
679 /*
680 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
681 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
682 * accessed;
683 * bit[01]: ToPA tables can hold any number of output entries, up to the
684 * maximum allowed by the MaskOrTableOffset field of
685 * IA32_RTIT_OUTPUT_MASK_PTRS;
686 * bit[02]: Support Single-Range Output scheme;
687 */
688 #define INTEL_PT_MINIMAL_ECX 0x7
689 /* generated packets which contain IP payloads have LIP values */
690 #define INTEL_PT_IP_LIP (1 << 31)
691 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
692 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
693 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
694 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
695 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
696
697 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
698 uint32_t vendor2, uint32_t vendor3)
699 {
700 int i;
701 for (i = 0; i < 4; i++) {
702 dst[i] = vendor1 >> (8 * i);
703 dst[i + 4] = vendor2 >> (8 * i);
704 dst[i + 8] = vendor3 >> (8 * i);
705 }
706 dst[CPUID_VENDOR_SZ] = '\0';
707 }
708
709 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
710 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
711 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
712 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
713 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
714 CPUID_PSE36 | CPUID_FXSR)
715 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
716 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
717 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
718 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
719 CPUID_PAE | CPUID_SEP | CPUID_APIC)
720
721 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
722 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
723 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
724 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
725 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
726 /* partly implemented:
727 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
728 /* missing:
729 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
730 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
731 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
732 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
733 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
734 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
735 CPUID_EXT_RDRAND)
736 /* missing:
737 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
738 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
739 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
740 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
741 CPUID_EXT_F16C */
742
743 #ifdef TARGET_X86_64
744 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
745 #else
746 #define TCG_EXT2_X86_64_FEATURES 0
747 #endif
748
749 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
750 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
751 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
752 TCG_EXT2_X86_64_FEATURES)
753 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
754 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
755 #define TCG_EXT4_FEATURES 0
756 #define TCG_SVM_FEATURES CPUID_SVM_NPT
757 #define TCG_KVM_FEATURES 0
758 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
759 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
760 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
761 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
762 CPUID_7_0_EBX_ERMS)
763 /* missing:
764 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
765 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
766 CPUID_7_0_EBX_RDSEED */
767 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
768 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
769 CPUID_7_0_ECX_LA57)
770 #define TCG_7_0_EDX_FEATURES 0
771 #define TCG_APM_FEATURES 0
772 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
773 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
774 /* missing:
775 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
776
777 typedef enum FeatureWordType {
778 CPUID_FEATURE_WORD,
779 MSR_FEATURE_WORD,
780 } FeatureWordType;
781
782 typedef struct FeatureWordInfo {
783 FeatureWordType type;
784 /* feature flags names are taken from "Intel Processor Identification and
785 * the CPUID Instruction" and AMD's "CPUID Specification".
786 * In cases of disagreement between feature naming conventions,
787 * aliases may be added.
788 */
789 const char *feat_names[32];
790 union {
791 /* If type==CPUID_FEATURE_WORD */
792 struct {
793 uint32_t eax; /* Input EAX for CPUID */
794 bool needs_ecx; /* CPUID instruction uses ECX as input */
795 uint32_t ecx; /* Input ECX value for CPUID */
796 int reg; /* output register (R_* constant) */
797 } cpuid;
798 /* If type==MSR_FEATURE_WORD */
799 struct {
800 uint32_t index;
801 struct { /*CPUID that enumerate this MSR*/
802 FeatureWord cpuid_class;
803 uint32_t cpuid_flag;
804 } cpuid_dep;
805 } msr;
806 };
807 uint32_t tcg_features; /* Feature flags supported by TCG */
808 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
809 uint32_t migratable_flags; /* Feature flags known to be migratable */
810 /* Features that shouldn't be auto-enabled by "-cpu host" */
811 uint32_t no_autoenable_flags;
812 } FeatureWordInfo;
813
814 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
815 [FEAT_1_EDX] = {
816 .type = CPUID_FEATURE_WORD,
817 .feat_names = {
818 "fpu", "vme", "de", "pse",
819 "tsc", "msr", "pae", "mce",
820 "cx8", "apic", NULL, "sep",
821 "mtrr", "pge", "mca", "cmov",
822 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
823 NULL, "ds" /* Intel dts */, "acpi", "mmx",
824 "fxsr", "sse", "sse2", "ss",
825 "ht" /* Intel htt */, "tm", "ia64", "pbe",
826 },
827 .cpuid = {.eax = 1, .reg = R_EDX, },
828 .tcg_features = TCG_FEATURES,
829 },
830 [FEAT_1_ECX] = {
831 .type = CPUID_FEATURE_WORD,
832 .feat_names = {
833 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
834 "ds-cpl", "vmx", "smx", "est",
835 "tm2", "ssse3", "cid", NULL,
836 "fma", "cx16", "xtpr", "pdcm",
837 NULL, "pcid", "dca", "sse4.1",
838 "sse4.2", "x2apic", "movbe", "popcnt",
839 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
840 "avx", "f16c", "rdrand", "hypervisor",
841 },
842 .cpuid = { .eax = 1, .reg = R_ECX, },
843 .tcg_features = TCG_EXT_FEATURES,
844 },
845 /* Feature names that are already defined on feature_name[] but
846 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
847 * names on feat_names below. They are copied automatically
848 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
849 */
850 [FEAT_8000_0001_EDX] = {
851 .type = CPUID_FEATURE_WORD,
852 .feat_names = {
853 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
854 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
855 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
856 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
857 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
858 "nx", NULL, "mmxext", NULL /* mmx */,
859 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
860 NULL, "lm", "3dnowext", "3dnow",
861 },
862 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
863 .tcg_features = TCG_EXT2_FEATURES,
864 },
865 [FEAT_8000_0001_ECX] = {
866 .type = CPUID_FEATURE_WORD,
867 .feat_names = {
868 "lahf-lm", "cmp-legacy", "svm", "extapic",
869 "cr8legacy", "abm", "sse4a", "misalignsse",
870 "3dnowprefetch", "osvw", "ibs", "xop",
871 "skinit", "wdt", NULL, "lwp",
872 "fma4", "tce", NULL, "nodeid-msr",
873 NULL, "tbm", "topoext", "perfctr-core",
874 "perfctr-nb", NULL, NULL, NULL,
875 NULL, NULL, NULL, NULL,
876 },
877 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
878 .tcg_features = TCG_EXT3_FEATURES,
879 /*
880 * TOPOEXT is always allowed but can't be enabled blindly by
881 * "-cpu host", as it requires consistent cache topology info
882 * to be provided so it doesn't confuse guests.
883 */
884 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
885 },
886 [FEAT_C000_0001_EDX] = {
887 .type = CPUID_FEATURE_WORD,
888 .feat_names = {
889 NULL, NULL, "xstore", "xstore-en",
890 NULL, NULL, "xcrypt", "xcrypt-en",
891 "ace2", "ace2-en", "phe", "phe-en",
892 "pmm", "pmm-en", NULL, NULL,
893 NULL, NULL, NULL, NULL,
894 NULL, NULL, NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 },
898 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
899 .tcg_features = TCG_EXT4_FEATURES,
900 },
901 [FEAT_KVM] = {
902 .type = CPUID_FEATURE_WORD,
903 .feat_names = {
904 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
905 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
906 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
907 NULL, NULL, NULL, NULL,
908 NULL, NULL, NULL, NULL,
909 NULL, NULL, NULL, NULL,
910 "kvmclock-stable-bit", NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 },
913 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
914 .tcg_features = TCG_KVM_FEATURES,
915 },
916 [FEAT_KVM_HINTS] = {
917 .type = CPUID_FEATURE_WORD,
918 .feat_names = {
919 "kvm-hint-dedicated", NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 },
928 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
929 .tcg_features = TCG_KVM_FEATURES,
930 /*
931 * KVM hints aren't auto-enabled by -cpu host, they need to be
932 * explicitly enabled in the command-line.
933 */
934 .no_autoenable_flags = ~0U,
935 },
936 /*
937 * .feat_names are commented out for Hyper-V enlightenments because we
938 * don't want to have two different ways for enabling them on QEMU command
939 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
940 * enabling several feature bits simultaneously, exposing these bits
941 * individually may just confuse guests.
942 */
943 [FEAT_HYPERV_EAX] = {
944 .type = CPUID_FEATURE_WORD,
945 .feat_names = {
946 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
947 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
948 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
949 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
950 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
951 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
952 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
953 NULL, NULL,
954 NULL, NULL, NULL, NULL,
955 NULL, NULL, NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 },
959 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
960 },
961 [FEAT_HYPERV_EBX] = {
962 .type = CPUID_FEATURE_WORD,
963 .feat_names = {
964 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
965 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
966 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
967 NULL /* hv_create_port */, NULL /* hv_connect_port */,
968 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
969 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
970 NULL, NULL,
971 NULL, NULL, NULL, NULL,
972 NULL, NULL, NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 NULL, NULL, NULL, NULL,
975 },
976 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
977 },
978 [FEAT_HYPERV_EDX] = {
979 .type = CPUID_FEATURE_WORD,
980 .feat_names = {
981 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
982 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
983 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
984 NULL, NULL,
985 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
986 NULL, NULL, NULL, NULL,
987 NULL, NULL, NULL, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL, NULL,
991 },
992 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
993 },
994 [FEAT_HV_RECOMM_EAX] = {
995 .type = CPUID_FEATURE_WORD,
996 .feat_names = {
997 NULL /* hv_recommend_pv_as_switch */,
998 NULL /* hv_recommend_pv_tlbflush_local */,
999 NULL /* hv_recommend_pv_tlbflush_remote */,
1000 NULL /* hv_recommend_msr_apic_access */,
1001 NULL /* hv_recommend_msr_reset */,
1002 NULL /* hv_recommend_relaxed_timing */,
1003 NULL /* hv_recommend_dma_remapping */,
1004 NULL /* hv_recommend_int_remapping */,
1005 NULL /* hv_recommend_x2apic_msrs */,
1006 NULL /* hv_recommend_autoeoi_deprecation */,
1007 NULL /* hv_recommend_pv_ipi */,
1008 NULL /* hv_recommend_ex_hypercalls */,
1009 NULL /* hv_hypervisor_is_nested */,
1010 NULL /* hv_recommend_int_mbec */,
1011 NULL /* hv_recommend_evmcs */,
1012 NULL,
1013 NULL, NULL, NULL, NULL,
1014 NULL, NULL, NULL, NULL,
1015 NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 },
1018 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1019 },
1020 [FEAT_HV_NESTED_EAX] = {
1021 .type = CPUID_FEATURE_WORD,
1022 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1023 },
1024 [FEAT_SVM] = {
1025 .type = CPUID_FEATURE_WORD,
1026 .feat_names = {
1027 "npt", "lbrv", "svm-lock", "nrip-save",
1028 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1029 NULL, NULL, "pause-filter", NULL,
1030 "pfthreshold", NULL, NULL, NULL,
1031 NULL, NULL, NULL, NULL,
1032 NULL, NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 },
1036 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1037 .tcg_features = TCG_SVM_FEATURES,
1038 },
1039 [FEAT_7_0_EBX] = {
1040 .type = CPUID_FEATURE_WORD,
1041 .feat_names = {
1042 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1043 "hle", "avx2", NULL, "smep",
1044 "bmi2", "erms", "invpcid", "rtm",
1045 NULL, NULL, "mpx", NULL,
1046 "avx512f", "avx512dq", "rdseed", "adx",
1047 "smap", "avx512ifma", "pcommit", "clflushopt",
1048 "clwb", "intel-pt", "avx512pf", "avx512er",
1049 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1050 },
1051 .cpuid = {
1052 .eax = 7,
1053 .needs_ecx = true, .ecx = 0,
1054 .reg = R_EBX,
1055 },
1056 .tcg_features = TCG_7_0_EBX_FEATURES,
1057 },
1058 [FEAT_7_0_ECX] = {
1059 .type = CPUID_FEATURE_WORD,
1060 .feat_names = {
1061 NULL, "avx512vbmi", "umip", "pku",
1062 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1063 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1064 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1065 "la57", NULL, NULL, NULL,
1066 NULL, NULL, "rdpid", NULL,
1067 NULL, "cldemote", NULL, "movdiri",
1068 "movdir64b", NULL, NULL, NULL,
1069 },
1070 .cpuid = {
1071 .eax = 7,
1072 .needs_ecx = true, .ecx = 0,
1073 .reg = R_ECX,
1074 },
1075 .tcg_features = TCG_7_0_ECX_FEATURES,
1076 },
1077 [FEAT_7_0_EDX] = {
1078 .type = CPUID_FEATURE_WORD,
1079 .feat_names = {
1080 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1081 NULL, NULL, NULL, NULL,
1082 NULL, NULL, "md-clear", NULL,
1083 NULL, NULL, NULL, NULL,
1084 NULL, NULL, NULL, NULL,
1085 NULL, NULL, NULL, NULL,
1086 NULL, NULL, "spec-ctrl", "stibp",
1087 NULL, "arch-capabilities", NULL, "ssbd",
1088 },
1089 .cpuid = {
1090 .eax = 7,
1091 .needs_ecx = true, .ecx = 0,
1092 .reg = R_EDX,
1093 },
1094 .tcg_features = TCG_7_0_EDX_FEATURES,
1095 },
1096 [FEAT_8000_0007_EDX] = {
1097 .type = CPUID_FEATURE_WORD,
1098 .feat_names = {
1099 NULL, NULL, NULL, NULL,
1100 NULL, NULL, NULL, NULL,
1101 "invtsc", NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 NULL, NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 },
1108 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1109 .tcg_features = TCG_APM_FEATURES,
1110 .unmigratable_flags = CPUID_APM_INVTSC,
1111 },
1112 [FEAT_8000_0008_EBX] = {
1113 .type = CPUID_FEATURE_WORD,
1114 .feat_names = {
1115 NULL, NULL, NULL, NULL,
1116 NULL, NULL, NULL, NULL,
1117 NULL, "wbnoinvd", NULL, NULL,
1118 "ibpb", NULL, NULL, NULL,
1119 NULL, NULL, NULL, NULL,
1120 NULL, NULL, NULL, NULL,
1121 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1122 NULL, NULL, NULL, NULL,
1123 },
1124 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1125 .tcg_features = 0,
1126 .unmigratable_flags = 0,
1127 },
1128 [FEAT_XSAVE] = {
1129 .type = CPUID_FEATURE_WORD,
1130 .feat_names = {
1131 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1132 NULL, NULL, NULL, NULL,
1133 NULL, NULL, NULL, NULL,
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 },
1140 .cpuid = {
1141 .eax = 0xd,
1142 .needs_ecx = true, .ecx = 1,
1143 .reg = R_EAX,
1144 },
1145 .tcg_features = TCG_XSAVE_FEATURES,
1146 },
1147 [FEAT_6_EAX] = {
1148 .type = CPUID_FEATURE_WORD,
1149 .feat_names = {
1150 NULL, NULL, "arat", NULL,
1151 NULL, NULL, NULL, NULL,
1152 NULL, NULL, NULL, NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 NULL, NULL, NULL, NULL,
1158 },
1159 .cpuid = { .eax = 6, .reg = R_EAX, },
1160 .tcg_features = TCG_6_EAX_FEATURES,
1161 },
1162 [FEAT_XSAVE_COMP_LO] = {
1163 .type = CPUID_FEATURE_WORD,
1164 .cpuid = {
1165 .eax = 0xD,
1166 .needs_ecx = true, .ecx = 0,
1167 .reg = R_EAX,
1168 },
1169 .tcg_features = ~0U,
1170 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1171 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1172 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1173 XSTATE_PKRU_MASK,
1174 },
1175 [FEAT_XSAVE_COMP_HI] = {
1176 .type = CPUID_FEATURE_WORD,
1177 .cpuid = {
1178 .eax = 0xD,
1179 .needs_ecx = true, .ecx = 0,
1180 .reg = R_EDX,
1181 },
1182 .tcg_features = ~0U,
1183 },
1184 /*Below are MSR exposed features*/
1185 [FEAT_ARCH_CAPABILITIES] = {
1186 .type = MSR_FEATURE_WORD,
1187 .feat_names = {
1188 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1189 "ssb-no", "mds-no", NULL, NULL,
1190 NULL, NULL, NULL, NULL,
1191 NULL, NULL, NULL, NULL,
1192 NULL, NULL, NULL, NULL,
1193 NULL, NULL, NULL, NULL,
1194 NULL, NULL, NULL, NULL,
1195 NULL, NULL, NULL, NULL,
1196 },
1197 .msr = {
1198 .index = MSR_IA32_ARCH_CAPABILITIES,
1199 .cpuid_dep = {
1200 FEAT_7_0_EDX,
1201 CPUID_7_0_EDX_ARCH_CAPABILITIES
1202 }
1203 },
1204 },
1205 };
1206
1207 typedef struct X86RegisterInfo32 {
1208 /* Name of register */
1209 const char *name;
1210 /* QAPI enum value register */
1211 X86CPURegister32 qapi_enum;
1212 } X86RegisterInfo32;
1213
1214 #define REGISTER(reg) \
1215 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1216 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1217 REGISTER(EAX),
1218 REGISTER(ECX),
1219 REGISTER(EDX),
1220 REGISTER(EBX),
1221 REGISTER(ESP),
1222 REGISTER(EBP),
1223 REGISTER(ESI),
1224 REGISTER(EDI),
1225 };
1226 #undef REGISTER
1227
1228 typedef struct ExtSaveArea {
1229 uint32_t feature, bits;
1230 uint32_t offset, size;
1231 } ExtSaveArea;
1232
1233 static const ExtSaveArea x86_ext_save_areas[] = {
1234 [XSTATE_FP_BIT] = {
1235 /* x87 FP state component is always enabled if XSAVE is supported */
1236 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1237 /* x87 state is in the legacy region of the XSAVE area */
1238 .offset = 0,
1239 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1240 },
1241 [XSTATE_SSE_BIT] = {
1242 /* SSE state component is always enabled if XSAVE is supported */
1243 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1244 /* SSE state is in the legacy region of the XSAVE area */
1245 .offset = 0,
1246 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1247 },
1248 [XSTATE_YMM_BIT] =
1249 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1250 .offset = offsetof(X86XSaveArea, avx_state),
1251 .size = sizeof(XSaveAVX) },
1252 [XSTATE_BNDREGS_BIT] =
1253 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1254 .offset = offsetof(X86XSaveArea, bndreg_state),
1255 .size = sizeof(XSaveBNDREG) },
1256 [XSTATE_BNDCSR_BIT] =
1257 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1258 .offset = offsetof(X86XSaveArea, bndcsr_state),
1259 .size = sizeof(XSaveBNDCSR) },
1260 [XSTATE_OPMASK_BIT] =
1261 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1262 .offset = offsetof(X86XSaveArea, opmask_state),
1263 .size = sizeof(XSaveOpmask) },
1264 [XSTATE_ZMM_Hi256_BIT] =
1265 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1266 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1267 .size = sizeof(XSaveZMM_Hi256) },
1268 [XSTATE_Hi16_ZMM_BIT] =
1269 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1270 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1271 .size = sizeof(XSaveHi16_ZMM) },
1272 [XSTATE_PKRU_BIT] =
1273 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1274 .offset = offsetof(X86XSaveArea, pkru_state),
1275 .size = sizeof(XSavePKRU) },
1276 };
1277
1278 static uint32_t xsave_area_size(uint64_t mask)
1279 {
1280 int i;
1281 uint64_t ret = 0;
1282
1283 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1284 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1285 if ((mask >> i) & 1) {
1286 ret = MAX(ret, esa->offset + esa->size);
1287 }
1288 }
1289 return ret;
1290 }
1291
1292 static inline bool accel_uses_host_cpuid(void)
1293 {
1294 return kvm_enabled() || hvf_enabled();
1295 }
1296
1297 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1298 {
1299 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1300 cpu->env.features[FEAT_XSAVE_COMP_LO];
1301 }
1302
1303 const char *get_register_name_32(unsigned int reg)
1304 {
1305 if (reg >= CPU_NB_REGS32) {
1306 return NULL;
1307 }
1308 return x86_reg_info_32[reg].name;
1309 }
1310
1311 /*
1312 * Returns the set of feature flags that are supported and migratable by
1313 * QEMU, for a given FeatureWord.
1314 */
1315 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1316 {
1317 FeatureWordInfo *wi = &feature_word_info[w];
1318 uint32_t r = 0;
1319 int i;
1320
1321 for (i = 0; i < 32; i++) {
1322 uint32_t f = 1U << i;
1323
1324 /* If the feature name is known, it is implicitly considered migratable,
1325 * unless it is explicitly set in unmigratable_flags */
1326 if ((wi->migratable_flags & f) ||
1327 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1328 r |= f;
1329 }
1330 }
1331 return r;
1332 }
1333
1334 void host_cpuid(uint32_t function, uint32_t count,
1335 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1336 {
1337 uint32_t vec[4];
1338
1339 #ifdef __x86_64__
1340 asm volatile("cpuid"
1341 : "=a"(vec[0]), "=b"(vec[1]),
1342 "=c"(vec[2]), "=d"(vec[3])
1343 : "0"(function), "c"(count) : "cc");
1344 #elif defined(__i386__)
1345 asm volatile("pusha \n\t"
1346 "cpuid \n\t"
1347 "mov %%eax, 0(%2) \n\t"
1348 "mov %%ebx, 4(%2) \n\t"
1349 "mov %%ecx, 8(%2) \n\t"
1350 "mov %%edx, 12(%2) \n\t"
1351 "popa"
1352 : : "a"(function), "c"(count), "S"(vec)
1353 : "memory", "cc");
1354 #else
1355 abort();
1356 #endif
1357
1358 if (eax)
1359 *eax = vec[0];
1360 if (ebx)
1361 *ebx = vec[1];
1362 if (ecx)
1363 *ecx = vec[2];
1364 if (edx)
1365 *edx = vec[3];
1366 }
1367
1368 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1369 {
1370 uint32_t eax, ebx, ecx, edx;
1371
1372 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1373 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1374
1375 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1376 if (family) {
1377 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1378 }
1379 if (model) {
1380 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1381 }
1382 if (stepping) {
1383 *stepping = eax & 0x0F;
1384 }
1385 }
1386
1387 /* CPU class name definitions: */
1388
1389 /* Return type name for a given CPU model name
1390 * Caller is responsible for freeing the returned string.
1391 */
1392 static char *x86_cpu_type_name(const char *model_name)
1393 {
1394 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1395 }
1396
1397 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1398 {
1399 ObjectClass *oc;
1400 char *typename = x86_cpu_type_name(cpu_model);
1401 oc = object_class_by_name(typename);
1402 g_free(typename);
1403 return oc;
1404 }
1405
1406 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1407 {
1408 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1409 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1410 return g_strndup(class_name,
1411 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1412 }
1413
1414 struct X86CPUDefinition {
1415 const char *name;
1416 uint32_t level;
1417 uint32_t xlevel;
1418 /* vendor is zero-terminated, 12 character ASCII string */
1419 char vendor[CPUID_VENDOR_SZ + 1];
1420 int family;
1421 int model;
1422 int stepping;
1423 FeatureWordArray features;
1424 const char *model_id;
1425 CPUCaches *cache_info;
1426 };
1427
1428 static CPUCaches epyc_cache_info = {
1429 .l1d_cache = &(CPUCacheInfo) {
1430 .type = DATA_CACHE,
1431 .level = 1,
1432 .size = 32 * KiB,
1433 .line_size = 64,
1434 .associativity = 8,
1435 .partitions = 1,
1436 .sets = 64,
1437 .lines_per_tag = 1,
1438 .self_init = 1,
1439 .no_invd_sharing = true,
1440 },
1441 .l1i_cache = &(CPUCacheInfo) {
1442 .type = INSTRUCTION_CACHE,
1443 .level = 1,
1444 .size = 64 * KiB,
1445 .line_size = 64,
1446 .associativity = 4,
1447 .partitions = 1,
1448 .sets = 256,
1449 .lines_per_tag = 1,
1450 .self_init = 1,
1451 .no_invd_sharing = true,
1452 },
1453 .l2_cache = &(CPUCacheInfo) {
1454 .type = UNIFIED_CACHE,
1455 .level = 2,
1456 .size = 512 * KiB,
1457 .line_size = 64,
1458 .associativity = 8,
1459 .partitions = 1,
1460 .sets = 1024,
1461 .lines_per_tag = 1,
1462 },
1463 .l3_cache = &(CPUCacheInfo) {
1464 .type = UNIFIED_CACHE,
1465 .level = 3,
1466 .size = 8 * MiB,
1467 .line_size = 64,
1468 .associativity = 16,
1469 .partitions = 1,
1470 .sets = 8192,
1471 .lines_per_tag = 1,
1472 .self_init = true,
1473 .inclusive = true,
1474 .complex_indexing = true,
1475 },
1476 };
1477
1478 static X86CPUDefinition builtin_x86_defs[] = {
1479 {
1480 .name = "qemu64",
1481 .level = 0xd,
1482 .vendor = CPUID_VENDOR_AMD,
1483 .family = 6,
1484 .model = 6,
1485 .stepping = 3,
1486 .features[FEAT_1_EDX] =
1487 PPRO_FEATURES |
1488 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1489 CPUID_PSE36,
1490 .features[FEAT_1_ECX] =
1491 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1492 .features[FEAT_8000_0001_EDX] =
1493 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1494 .features[FEAT_8000_0001_ECX] =
1495 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1496 .xlevel = 0x8000000A,
1497 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1498 },
1499 {
1500 .name = "phenom",
1501 .level = 5,
1502 .vendor = CPUID_VENDOR_AMD,
1503 .family = 16,
1504 .model = 2,
1505 .stepping = 3,
1506 /* Missing: CPUID_HT */
1507 .features[FEAT_1_EDX] =
1508 PPRO_FEATURES |
1509 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1510 CPUID_PSE36 | CPUID_VME,
1511 .features[FEAT_1_ECX] =
1512 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1513 CPUID_EXT_POPCNT,
1514 .features[FEAT_8000_0001_EDX] =
1515 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1516 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1517 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1518 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1519 CPUID_EXT3_CR8LEG,
1520 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1521 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1522 .features[FEAT_8000_0001_ECX] =
1523 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1524 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1525 /* Missing: CPUID_SVM_LBRV */
1526 .features[FEAT_SVM] =
1527 CPUID_SVM_NPT,
1528 .xlevel = 0x8000001A,
1529 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1530 },
1531 {
1532 .name = "core2duo",
1533 .level = 10,
1534 .vendor = CPUID_VENDOR_INTEL,
1535 .family = 6,
1536 .model = 15,
1537 .stepping = 11,
1538 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1539 .features[FEAT_1_EDX] =
1540 PPRO_FEATURES |
1541 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1542 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1543 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1544 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1545 .features[FEAT_1_ECX] =
1546 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1547 CPUID_EXT_CX16,
1548 .features[FEAT_8000_0001_EDX] =
1549 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1550 .features[FEAT_8000_0001_ECX] =
1551 CPUID_EXT3_LAHF_LM,
1552 .xlevel = 0x80000008,
1553 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1554 },
1555 {
1556 .name = "kvm64",
1557 .level = 0xd,
1558 .vendor = CPUID_VENDOR_INTEL,
1559 .family = 15,
1560 .model = 6,
1561 .stepping = 1,
1562 /* Missing: CPUID_HT */
1563 .features[FEAT_1_EDX] =
1564 PPRO_FEATURES | CPUID_VME |
1565 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1566 CPUID_PSE36,
1567 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1568 .features[FEAT_1_ECX] =
1569 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1570 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1571 .features[FEAT_8000_0001_EDX] =
1572 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1573 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1574 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1575 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1576 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1577 .features[FEAT_8000_0001_ECX] =
1578 0,
1579 .xlevel = 0x80000008,
1580 .model_id = "Common KVM processor"
1581 },
1582 {
1583 .name = "qemu32",
1584 .level = 4,
1585 .vendor = CPUID_VENDOR_INTEL,
1586 .family = 6,
1587 .model = 6,
1588 .stepping = 3,
1589 .features[FEAT_1_EDX] =
1590 PPRO_FEATURES,
1591 .features[FEAT_1_ECX] =
1592 CPUID_EXT_SSE3,
1593 .xlevel = 0x80000004,
1594 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1595 },
1596 {
1597 .name = "kvm32",
1598 .level = 5,
1599 .vendor = CPUID_VENDOR_INTEL,
1600 .family = 15,
1601 .model = 6,
1602 .stepping = 1,
1603 .features[FEAT_1_EDX] =
1604 PPRO_FEATURES | CPUID_VME |
1605 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1606 .features[FEAT_1_ECX] =
1607 CPUID_EXT_SSE3,
1608 .features[FEAT_8000_0001_ECX] =
1609 0,
1610 .xlevel = 0x80000008,
1611 .model_id = "Common 32-bit KVM processor"
1612 },
1613 {
1614 .name = "coreduo",
1615 .level = 10,
1616 .vendor = CPUID_VENDOR_INTEL,
1617 .family = 6,
1618 .model = 14,
1619 .stepping = 8,
1620 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1621 .features[FEAT_1_EDX] =
1622 PPRO_FEATURES | CPUID_VME |
1623 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1624 CPUID_SS,
1625 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1626 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1627 .features[FEAT_1_ECX] =
1628 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1629 .features[FEAT_8000_0001_EDX] =
1630 CPUID_EXT2_NX,
1631 .xlevel = 0x80000008,
1632 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1633 },
1634 {
1635 .name = "486",
1636 .level = 1,
1637 .vendor = CPUID_VENDOR_INTEL,
1638 .family = 4,
1639 .model = 8,
1640 .stepping = 0,
1641 .features[FEAT_1_EDX] =
1642 I486_FEATURES,
1643 .xlevel = 0,
1644 .model_id = "",
1645 },
1646 {
1647 .name = "pentium",
1648 .level = 1,
1649 .vendor = CPUID_VENDOR_INTEL,
1650 .family = 5,
1651 .model = 4,
1652 .stepping = 3,
1653 .features[FEAT_1_EDX] =
1654 PENTIUM_FEATURES,
1655 .xlevel = 0,
1656 .model_id = "",
1657 },
1658 {
1659 .name = "pentium2",
1660 .level = 2,
1661 .vendor = CPUID_VENDOR_INTEL,
1662 .family = 6,
1663 .model = 5,
1664 .stepping = 2,
1665 .features[FEAT_1_EDX] =
1666 PENTIUM2_FEATURES,
1667 .xlevel = 0,
1668 .model_id = "",
1669 },
1670 {
1671 .name = "pentium3",
1672 .level = 3,
1673 .vendor = CPUID_VENDOR_INTEL,
1674 .family = 6,
1675 .model = 7,
1676 .stepping = 3,
1677 .features[FEAT_1_EDX] =
1678 PENTIUM3_FEATURES,
1679 .xlevel = 0,
1680 .model_id = "",
1681 },
1682 {
1683 .name = "athlon",
1684 .level = 2,
1685 .vendor = CPUID_VENDOR_AMD,
1686 .family = 6,
1687 .model = 2,
1688 .stepping = 3,
1689 .features[FEAT_1_EDX] =
1690 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1691 CPUID_MCA,
1692 .features[FEAT_8000_0001_EDX] =
1693 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1694 .xlevel = 0x80000008,
1695 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1696 },
1697 {
1698 .name = "n270",
1699 .level = 10,
1700 .vendor = CPUID_VENDOR_INTEL,
1701 .family = 6,
1702 .model = 28,
1703 .stepping = 2,
1704 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1705 .features[FEAT_1_EDX] =
1706 PPRO_FEATURES |
1707 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1708 CPUID_ACPI | CPUID_SS,
1709 /* Some CPUs got no CPUID_SEP */
1710 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1711 * CPUID_EXT_XTPR */
1712 .features[FEAT_1_ECX] =
1713 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1714 CPUID_EXT_MOVBE,
1715 .features[FEAT_8000_0001_EDX] =
1716 CPUID_EXT2_NX,
1717 .features[FEAT_8000_0001_ECX] =
1718 CPUID_EXT3_LAHF_LM,
1719 .xlevel = 0x80000008,
1720 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1721 },
1722 {
1723 .name = "Conroe",
1724 .level = 10,
1725 .vendor = CPUID_VENDOR_INTEL,
1726 .family = 6,
1727 .model = 15,
1728 .stepping = 3,
1729 .features[FEAT_1_EDX] =
1730 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1731 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1732 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1733 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1734 CPUID_DE | CPUID_FP87,
1735 .features[FEAT_1_ECX] =
1736 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1737 .features[FEAT_8000_0001_EDX] =
1738 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1739 .features[FEAT_8000_0001_ECX] =
1740 CPUID_EXT3_LAHF_LM,
1741 .xlevel = 0x80000008,
1742 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1743 },
1744 {
1745 .name = "Penryn",
1746 .level = 10,
1747 .vendor = CPUID_VENDOR_INTEL,
1748 .family = 6,
1749 .model = 23,
1750 .stepping = 3,
1751 .features[FEAT_1_EDX] =
1752 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1753 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1754 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1755 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1756 CPUID_DE | CPUID_FP87,
1757 .features[FEAT_1_ECX] =
1758 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1759 CPUID_EXT_SSE3,
1760 .features[FEAT_8000_0001_EDX] =
1761 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1762 .features[FEAT_8000_0001_ECX] =
1763 CPUID_EXT3_LAHF_LM,
1764 .xlevel = 0x80000008,
1765 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1766 },
1767 {
1768 .name = "Nehalem",
1769 .level = 11,
1770 .vendor = CPUID_VENDOR_INTEL,
1771 .family = 6,
1772 .model = 26,
1773 .stepping = 3,
1774 .features[FEAT_1_EDX] =
1775 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1776 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1777 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1778 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1779 CPUID_DE | CPUID_FP87,
1780 .features[FEAT_1_ECX] =
1781 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1782 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1783 .features[FEAT_8000_0001_EDX] =
1784 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1785 .features[FEAT_8000_0001_ECX] =
1786 CPUID_EXT3_LAHF_LM,
1787 .xlevel = 0x80000008,
1788 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1789 },
1790 {
1791 .name = "Nehalem-IBRS",
1792 .level = 11,
1793 .vendor = CPUID_VENDOR_INTEL,
1794 .family = 6,
1795 .model = 26,
1796 .stepping = 3,
1797 .features[FEAT_1_EDX] =
1798 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1799 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1800 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1801 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1802 CPUID_DE | CPUID_FP87,
1803 .features[FEAT_1_ECX] =
1804 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1805 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1806 .features[FEAT_7_0_EDX] =
1807 CPUID_7_0_EDX_SPEC_CTRL,
1808 .features[FEAT_8000_0001_EDX] =
1809 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1810 .features[FEAT_8000_0001_ECX] =
1811 CPUID_EXT3_LAHF_LM,
1812 .xlevel = 0x80000008,
1813 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1814 },
1815 {
1816 .name = "Westmere",
1817 .level = 11,
1818 .vendor = CPUID_VENDOR_INTEL,
1819 .family = 6,
1820 .model = 44,
1821 .stepping = 1,
1822 .features[FEAT_1_EDX] =
1823 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1824 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1825 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1826 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1827 CPUID_DE | CPUID_FP87,
1828 .features[FEAT_1_ECX] =
1829 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1830 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1831 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1832 .features[FEAT_8000_0001_EDX] =
1833 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1834 .features[FEAT_8000_0001_ECX] =
1835 CPUID_EXT3_LAHF_LM,
1836 .features[FEAT_6_EAX] =
1837 CPUID_6_EAX_ARAT,
1838 .xlevel = 0x80000008,
1839 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1840 },
1841 {
1842 .name = "Westmere-IBRS",
1843 .level = 11,
1844 .vendor = CPUID_VENDOR_INTEL,
1845 .family = 6,
1846 .model = 44,
1847 .stepping = 1,
1848 .features[FEAT_1_EDX] =
1849 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1850 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1851 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1852 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1853 CPUID_DE | CPUID_FP87,
1854 .features[FEAT_1_ECX] =
1855 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1856 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1857 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1858 .features[FEAT_8000_0001_EDX] =
1859 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1860 .features[FEAT_8000_0001_ECX] =
1861 CPUID_EXT3_LAHF_LM,
1862 .features[FEAT_7_0_EDX] =
1863 CPUID_7_0_EDX_SPEC_CTRL,
1864 .features[FEAT_6_EAX] =
1865 CPUID_6_EAX_ARAT,
1866 .xlevel = 0x80000008,
1867 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1868 },
1869 {
1870 .name = "SandyBridge",
1871 .level = 0xd,
1872 .vendor = CPUID_VENDOR_INTEL,
1873 .family = 6,
1874 .model = 42,
1875 .stepping = 1,
1876 .features[FEAT_1_EDX] =
1877 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1878 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1879 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1880 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1881 CPUID_DE | CPUID_FP87,
1882 .features[FEAT_1_ECX] =
1883 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1884 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1885 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1886 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1887 CPUID_EXT_SSE3,
1888 .features[FEAT_8000_0001_EDX] =
1889 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1890 CPUID_EXT2_SYSCALL,
1891 .features[FEAT_8000_0001_ECX] =
1892 CPUID_EXT3_LAHF_LM,
1893 .features[FEAT_XSAVE] =
1894 CPUID_XSAVE_XSAVEOPT,
1895 .features[FEAT_6_EAX] =
1896 CPUID_6_EAX_ARAT,
1897 .xlevel = 0x80000008,
1898 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1899 },
1900 {
1901 .name = "SandyBridge-IBRS",
1902 .level = 0xd,
1903 .vendor = CPUID_VENDOR_INTEL,
1904 .family = 6,
1905 .model = 42,
1906 .stepping = 1,
1907 .features[FEAT_1_EDX] =
1908 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1909 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1910 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1911 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1912 CPUID_DE | CPUID_FP87,
1913 .features[FEAT_1_ECX] =
1914 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1915 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1916 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1917 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1918 CPUID_EXT_SSE3,
1919 .features[FEAT_8000_0001_EDX] =
1920 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1921 CPUID_EXT2_SYSCALL,
1922 .features[FEAT_8000_0001_ECX] =
1923 CPUID_EXT3_LAHF_LM,
1924 .features[FEAT_7_0_EDX] =
1925 CPUID_7_0_EDX_SPEC_CTRL,
1926 .features[FEAT_XSAVE] =
1927 CPUID_XSAVE_XSAVEOPT,
1928 .features[FEAT_6_EAX] =
1929 CPUID_6_EAX_ARAT,
1930 .xlevel = 0x80000008,
1931 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1932 },
1933 {
1934 .name = "IvyBridge",
1935 .level = 0xd,
1936 .vendor = CPUID_VENDOR_INTEL,
1937 .family = 6,
1938 .model = 58,
1939 .stepping = 9,
1940 .features[FEAT_1_EDX] =
1941 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1942 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1943 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1944 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1945 CPUID_DE | CPUID_FP87,
1946 .features[FEAT_1_ECX] =
1947 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1948 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1949 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1950 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1951 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1952 .features[FEAT_7_0_EBX] =
1953 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1954 CPUID_7_0_EBX_ERMS,
1955 .features[FEAT_8000_0001_EDX] =
1956 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1957 CPUID_EXT2_SYSCALL,
1958 .features[FEAT_8000_0001_ECX] =
1959 CPUID_EXT3_LAHF_LM,
1960 .features[FEAT_XSAVE] =
1961 CPUID_XSAVE_XSAVEOPT,
1962 .features[FEAT_6_EAX] =
1963 CPUID_6_EAX_ARAT,
1964 .xlevel = 0x80000008,
1965 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1966 },
1967 {
1968 .name = "IvyBridge-IBRS",
1969 .level = 0xd,
1970 .vendor = CPUID_VENDOR_INTEL,
1971 .family = 6,
1972 .model = 58,
1973 .stepping = 9,
1974 .features[FEAT_1_EDX] =
1975 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1976 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1977 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1978 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1979 CPUID_DE | CPUID_FP87,
1980 .features[FEAT_1_ECX] =
1981 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1982 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1983 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1984 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1985 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1986 .features[FEAT_7_0_EBX] =
1987 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1988 CPUID_7_0_EBX_ERMS,
1989 .features[FEAT_8000_0001_EDX] =
1990 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1991 CPUID_EXT2_SYSCALL,
1992 .features[FEAT_8000_0001_ECX] =
1993 CPUID_EXT3_LAHF_LM,
1994 .features[FEAT_7_0_EDX] =
1995 CPUID_7_0_EDX_SPEC_CTRL,
1996 .features[FEAT_XSAVE] =
1997 CPUID_XSAVE_XSAVEOPT,
1998 .features[FEAT_6_EAX] =
1999 CPUID_6_EAX_ARAT,
2000 .xlevel = 0x80000008,
2001 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
2002 },
2003 {
2004 .name = "Haswell-noTSX",
2005 .level = 0xd,
2006 .vendor = CPUID_VENDOR_INTEL,
2007 .family = 6,
2008 .model = 60,
2009 .stepping = 1,
2010 .features[FEAT_1_EDX] =
2011 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2012 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2013 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2014 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2015 CPUID_DE | CPUID_FP87,
2016 .features[FEAT_1_ECX] =
2017 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2018 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2019 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2020 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2021 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2022 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2023 .features[FEAT_8000_0001_EDX] =
2024 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2025 CPUID_EXT2_SYSCALL,
2026 .features[FEAT_8000_0001_ECX] =
2027 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2028 .features[FEAT_7_0_EBX] =
2029 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2030 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2031 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2032 .features[FEAT_XSAVE] =
2033 CPUID_XSAVE_XSAVEOPT,
2034 .features[FEAT_6_EAX] =
2035 CPUID_6_EAX_ARAT,
2036 .xlevel = 0x80000008,
2037 .model_id = "Intel Core Processor (Haswell, no TSX)",
2038 },
2039 {
2040 .name = "Haswell-noTSX-IBRS",
2041 .level = 0xd,
2042 .vendor = CPUID_VENDOR_INTEL,
2043 .family = 6,
2044 .model = 60,
2045 .stepping = 1,
2046 .features[FEAT_1_EDX] =
2047 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2048 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2049 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2050 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2051 CPUID_DE | CPUID_FP87,
2052 .features[FEAT_1_ECX] =
2053 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2054 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2055 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2056 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2057 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2058 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2059 .features[FEAT_8000_0001_EDX] =
2060 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2061 CPUID_EXT2_SYSCALL,
2062 .features[FEAT_8000_0001_ECX] =
2063 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2064 .features[FEAT_7_0_EDX] =
2065 CPUID_7_0_EDX_SPEC_CTRL,
2066 .features[FEAT_7_0_EBX] =
2067 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2068 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2069 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2070 .features[FEAT_XSAVE] =
2071 CPUID_XSAVE_XSAVEOPT,
2072 .features[FEAT_6_EAX] =
2073 CPUID_6_EAX_ARAT,
2074 .xlevel = 0x80000008,
2075 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2076 },
2077 {
2078 .name = "Haswell",
2079 .level = 0xd,
2080 .vendor = CPUID_VENDOR_INTEL,
2081 .family = 6,
2082 .model = 60,
2083 .stepping = 4,
2084 .features[FEAT_1_EDX] =
2085 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2086 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2087 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2088 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2089 CPUID_DE | CPUID_FP87,
2090 .features[FEAT_1_ECX] =
2091 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2092 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2093 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2094 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2095 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2096 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2097 .features[FEAT_8000_0001_EDX] =
2098 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2099 CPUID_EXT2_SYSCALL,
2100 .features[FEAT_8000_0001_ECX] =
2101 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2102 .features[FEAT_7_0_EBX] =
2103 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2104 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2105 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2106 CPUID_7_0_EBX_RTM,
2107 .features[FEAT_XSAVE] =
2108 CPUID_XSAVE_XSAVEOPT,
2109 .features[FEAT_6_EAX] =
2110 CPUID_6_EAX_ARAT,
2111 .xlevel = 0x80000008,
2112 .model_id = "Intel Core Processor (Haswell)",
2113 },
2114 {
2115 .name = "Haswell-IBRS",
2116 .level = 0xd,
2117 .vendor = CPUID_VENDOR_INTEL,
2118 .family = 6,
2119 .model = 60,
2120 .stepping = 4,
2121 .features[FEAT_1_EDX] =
2122 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2123 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2124 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2125 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2126 CPUID_DE | CPUID_FP87,
2127 .features[FEAT_1_ECX] =
2128 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2129 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2130 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2131 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2132 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2133 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2134 .features[FEAT_8000_0001_EDX] =
2135 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2136 CPUID_EXT2_SYSCALL,
2137 .features[FEAT_8000_0001_ECX] =
2138 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2139 .features[FEAT_7_0_EDX] =
2140 CPUID_7_0_EDX_SPEC_CTRL,
2141 .features[FEAT_7_0_EBX] =
2142 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2143 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2144 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2145 CPUID_7_0_EBX_RTM,
2146 .features[FEAT_XSAVE] =
2147 CPUID_XSAVE_XSAVEOPT,
2148 .features[FEAT_6_EAX] =
2149 CPUID_6_EAX_ARAT,
2150 .xlevel = 0x80000008,
2151 .model_id = "Intel Core Processor (Haswell, IBRS)",
2152 },
2153 {
2154 .name = "Broadwell-noTSX",
2155 .level = 0xd,
2156 .vendor = CPUID_VENDOR_INTEL,
2157 .family = 6,
2158 .model = 61,
2159 .stepping = 2,
2160 .features[FEAT_1_EDX] =
2161 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2162 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2163 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2164 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2165 CPUID_DE | CPUID_FP87,
2166 .features[FEAT_1_ECX] =
2167 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2168 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2169 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2170 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2171 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2172 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2173 .features[FEAT_8000_0001_EDX] =
2174 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2175 CPUID_EXT2_SYSCALL,
2176 .features[FEAT_8000_0001_ECX] =
2177 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2178 .features[FEAT_7_0_EBX] =
2179 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2180 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2181 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2182 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2183 CPUID_7_0_EBX_SMAP,
2184 .features[FEAT_XSAVE] =
2185 CPUID_XSAVE_XSAVEOPT,
2186 .features[FEAT_6_EAX] =
2187 CPUID_6_EAX_ARAT,
2188 .xlevel = 0x80000008,
2189 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2190 },
2191 {
2192 .name = "Broadwell-noTSX-IBRS",
2193 .level = 0xd,
2194 .vendor = CPUID_VENDOR_INTEL,
2195 .family = 6,
2196 .model = 61,
2197 .stepping = 2,
2198 .features[FEAT_1_EDX] =
2199 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2200 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2201 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2202 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2203 CPUID_DE | CPUID_FP87,
2204 .features[FEAT_1_ECX] =
2205 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2206 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2207 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2208 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2209 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2210 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2211 .features[FEAT_8000_0001_EDX] =
2212 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2213 CPUID_EXT2_SYSCALL,
2214 .features[FEAT_8000_0001_ECX] =
2215 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2216 .features[FEAT_7_0_EDX] =
2217 CPUID_7_0_EDX_SPEC_CTRL,
2218 .features[FEAT_7_0_EBX] =
2219 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2220 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2221 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2222 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2223 CPUID_7_0_EBX_SMAP,
2224 .features[FEAT_XSAVE] =
2225 CPUID_XSAVE_XSAVEOPT,
2226 .features[FEAT_6_EAX] =
2227 CPUID_6_EAX_ARAT,
2228 .xlevel = 0x80000008,
2229 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2230 },
2231 {
2232 .name = "Broadwell",
2233 .level = 0xd,
2234 .vendor = CPUID_VENDOR_INTEL,
2235 .family = 6,
2236 .model = 61,
2237 .stepping = 2,
2238 .features[FEAT_1_EDX] =
2239 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2240 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2241 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2242 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2243 CPUID_DE | CPUID_FP87,
2244 .features[FEAT_1_ECX] =
2245 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2246 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2247 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2248 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2249 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2250 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2251 .features[FEAT_8000_0001_EDX] =
2252 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2253 CPUID_EXT2_SYSCALL,
2254 .features[FEAT_8000_0001_ECX] =
2255 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2256 .features[FEAT_7_0_EBX] =
2257 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2258 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2259 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2260 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2261 CPUID_7_0_EBX_SMAP,
2262 .features[FEAT_XSAVE] =
2263 CPUID_XSAVE_XSAVEOPT,
2264 .features[FEAT_6_EAX] =
2265 CPUID_6_EAX_ARAT,
2266 .xlevel = 0x80000008,
2267 .model_id = "Intel Core Processor (Broadwell)",
2268 },
2269 {
2270 .name = "Broadwell-IBRS",
2271 .level = 0xd,
2272 .vendor = CPUID_VENDOR_INTEL,
2273 .family = 6,
2274 .model = 61,
2275 .stepping = 2,
2276 .features[FEAT_1_EDX] =
2277 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2278 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2279 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2280 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2281 CPUID_DE | CPUID_FP87,
2282 .features[FEAT_1_ECX] =
2283 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2284 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2285 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2286 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2287 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2288 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2289 .features[FEAT_8000_0001_EDX] =
2290 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2291 CPUID_EXT2_SYSCALL,
2292 .features[FEAT_8000_0001_ECX] =
2293 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2294 .features[FEAT_7_0_EDX] =
2295 CPUID_7_0_EDX_SPEC_CTRL,
2296 .features[FEAT_7_0_EBX] =
2297 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2298 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2299 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2300 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2301 CPUID_7_0_EBX_SMAP,
2302 .features[FEAT_XSAVE] =
2303 CPUID_XSAVE_XSAVEOPT,
2304 .features[FEAT_6_EAX] =
2305 CPUID_6_EAX_ARAT,
2306 .xlevel = 0x80000008,
2307 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2308 },
2309 {
2310 .name = "Skylake-Client",
2311 .level = 0xd,
2312 .vendor = CPUID_VENDOR_INTEL,
2313 .family = 6,
2314 .model = 94,
2315 .stepping = 3,
2316 .features[FEAT_1_EDX] =
2317 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2318 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2319 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2320 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2321 CPUID_DE | CPUID_FP87,
2322 .features[FEAT_1_ECX] =
2323 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2324 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2325 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2326 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2327 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2328 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2329 .features[FEAT_8000_0001_EDX] =
2330 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2331 CPUID_EXT2_SYSCALL,
2332 .features[FEAT_8000_0001_ECX] =
2333 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2334 .features[FEAT_7_0_EBX] =
2335 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2336 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2337 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2338 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2339 CPUID_7_0_EBX_SMAP,
2340 /* Missing: XSAVES (not supported by some Linux versions,
2341 * including v4.1 to v4.12).
2342 * KVM doesn't yet expose any XSAVES state save component,
2343 * and the only one defined in Skylake (processor tracing)
2344 * probably will block migration anyway.
2345 */
2346 .features[FEAT_XSAVE] =
2347 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2348 CPUID_XSAVE_XGETBV1,
2349 .features[FEAT_6_EAX] =
2350 CPUID_6_EAX_ARAT,
2351 .xlevel = 0x80000008,
2352 .model_id = "Intel Core Processor (Skylake)",
2353 },
2354 {
2355 .name = "Skylake-Client-IBRS",
2356 .level = 0xd,
2357 .vendor = CPUID_VENDOR_INTEL,
2358 .family = 6,
2359 .model = 94,
2360 .stepping = 3,
2361 .features[FEAT_1_EDX] =
2362 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2363 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2364 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2365 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2366 CPUID_DE | CPUID_FP87,
2367 .features[FEAT_1_ECX] =
2368 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2369 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2370 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2371 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2372 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2373 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2374 .features[FEAT_8000_0001_EDX] =
2375 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2376 CPUID_EXT2_SYSCALL,
2377 .features[FEAT_8000_0001_ECX] =
2378 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2379 .features[FEAT_7_0_EDX] =
2380 CPUID_7_0_EDX_SPEC_CTRL,
2381 .features[FEAT_7_0_EBX] =
2382 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2383 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2384 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2385 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2386 CPUID_7_0_EBX_SMAP,
2387 /* Missing: XSAVES (not supported by some Linux versions,
2388 * including v4.1 to v4.12).
2389 * KVM doesn't yet expose any XSAVES state save component,
2390 * and the only one defined in Skylake (processor tracing)
2391 * probably will block migration anyway.
2392 */
2393 .features[FEAT_XSAVE] =
2394 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2395 CPUID_XSAVE_XGETBV1,
2396 .features[FEAT_6_EAX] =
2397 CPUID_6_EAX_ARAT,
2398 .xlevel = 0x80000008,
2399 .model_id = "Intel Core Processor (Skylake, IBRS)",
2400 },
2401 {
2402 .name = "Skylake-Server",
2403 .level = 0xd,
2404 .vendor = CPUID_VENDOR_INTEL,
2405 .family = 6,
2406 .model = 85,
2407 .stepping = 4,
2408 .features[FEAT_1_EDX] =
2409 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2410 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2411 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2412 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2413 CPUID_DE | CPUID_FP87,
2414 .features[FEAT_1_ECX] =
2415 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2416 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2417 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2418 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2419 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2420 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2421 .features[FEAT_8000_0001_EDX] =
2422 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2423 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2424 .features[FEAT_8000_0001_ECX] =
2425 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2426 .features[FEAT_7_0_EBX] =
2427 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2428 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2429 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2430 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2431 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2432 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2433 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2434 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2435 .features[FEAT_7_0_ECX] =
2436 CPUID_7_0_ECX_PKU,
2437 /* Missing: XSAVES (not supported by some Linux versions,
2438 * including v4.1 to v4.12).
2439 * KVM doesn't yet expose any XSAVES state save component,
2440 * and the only one defined in Skylake (processor tracing)
2441 * probably will block migration anyway.
2442 */
2443 .features[FEAT_XSAVE] =
2444 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2445 CPUID_XSAVE_XGETBV1,
2446 .features[FEAT_6_EAX] =
2447 CPUID_6_EAX_ARAT,
2448 .xlevel = 0x80000008,
2449 .model_id = "Intel Xeon Processor (Skylake)",
2450 },
2451 {
2452 .name = "Skylake-Server-IBRS",
2453 .level = 0xd,
2454 .vendor = CPUID_VENDOR_INTEL,
2455 .family = 6,
2456 .model = 85,
2457 .stepping = 4,
2458 .features[FEAT_1_EDX] =
2459 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2460 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2461 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2462 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2463 CPUID_DE | CPUID_FP87,
2464 .features[FEAT_1_ECX] =
2465 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2466 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2467 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2468 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2469 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2470 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2471 .features[FEAT_8000_0001_EDX] =
2472 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2473 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2474 .features[FEAT_8000_0001_ECX] =
2475 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2476 .features[FEAT_7_0_EDX] =
2477 CPUID_7_0_EDX_SPEC_CTRL,
2478 .features[FEAT_7_0_EBX] =
2479 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2480 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2481 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2482 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2483 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2484 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2485 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2486 CPUID_7_0_EBX_AVX512VL,
2487 .features[FEAT_7_0_ECX] =
2488 CPUID_7_0_ECX_PKU,
2489 /* Missing: XSAVES (not supported by some Linux versions,
2490 * including v4.1 to v4.12).
2491 * KVM doesn't yet expose any XSAVES state save component,
2492 * and the only one defined in Skylake (processor tracing)
2493 * probably will block migration anyway.
2494 */
2495 .features[FEAT_XSAVE] =
2496 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2497 CPUID_XSAVE_XGETBV1,
2498 .features[FEAT_6_EAX] =
2499 CPUID_6_EAX_ARAT,
2500 .xlevel = 0x80000008,
2501 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2502 },
2503 {
2504 .name = "Cascadelake-Server",
2505 .level = 0xd,
2506 .vendor = CPUID_VENDOR_INTEL,
2507 .family = 6,
2508 .model = 85,
2509 .stepping = 6,
2510 .features[FEAT_1_EDX] =
2511 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2512 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2513 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2514 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2515 CPUID_DE | CPUID_FP87,
2516 .features[FEAT_1_ECX] =
2517 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2518 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2519 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2520 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2521 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2522 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2523 .features[FEAT_8000_0001_EDX] =
2524 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2525 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2526 .features[FEAT_8000_0001_ECX] =
2527 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2528 .features[FEAT_7_0_EBX] =
2529 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2530 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2531 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2532 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2533 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2534 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2535 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2536 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2537 .features[FEAT_7_0_ECX] =
2538 CPUID_7_0_ECX_PKU |
2539 CPUID_7_0_ECX_AVX512VNNI,
2540 .features[FEAT_7_0_EDX] =
2541 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2542 /* Missing: XSAVES (not supported by some Linux versions,
2543 * including v4.1 to v4.12).
2544 * KVM doesn't yet expose any XSAVES state save component,
2545 * and the only one defined in Skylake (processor tracing)
2546 * probably will block migration anyway.
2547 */
2548 .features[FEAT_XSAVE] =
2549 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2550 CPUID_XSAVE_XGETBV1,
2551 .features[FEAT_6_EAX] =
2552 CPUID_6_EAX_ARAT,
2553 .xlevel = 0x80000008,
2554 .model_id = "Intel Xeon Processor (Cascadelake)",
2555 },
2556 {
2557 .name = "Icelake-Client",
2558 .level = 0xd,
2559 .vendor = CPUID_VENDOR_INTEL,
2560 .family = 6,
2561 .model = 126,
2562 .stepping = 0,
2563 .features[FEAT_1_EDX] =
2564 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2565 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2566 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2567 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2568 CPUID_DE | CPUID_FP87,
2569 .features[FEAT_1_ECX] =
2570 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2571 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2572 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2573 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2574 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2575 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2576 .features[FEAT_8000_0001_EDX] =
2577 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2578 CPUID_EXT2_SYSCALL,
2579 .features[FEAT_8000_0001_ECX] =
2580 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2581 .features[FEAT_8000_0008_EBX] =
2582 CPUID_8000_0008_EBX_WBNOINVD,
2583 .features[FEAT_7_0_EBX] =
2584 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2585 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2586 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2587 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2588 CPUID_7_0_EBX_SMAP,
2589 .features[FEAT_7_0_ECX] =
2590 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2591 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2592 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2593 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2594 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2595 .features[FEAT_7_0_EDX] =
2596 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2597 /* Missing: XSAVES (not supported by some Linux versions,
2598 * including v4.1 to v4.12).
2599 * KVM doesn't yet expose any XSAVES state save component,
2600 * and the only one defined in Skylake (processor tracing)
2601 * probably will block migration anyway.
2602 */
2603 .features[FEAT_XSAVE] =
2604 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2605 CPUID_XSAVE_XGETBV1,
2606 .features[FEAT_6_EAX] =
2607 CPUID_6_EAX_ARAT,
2608 .xlevel = 0x80000008,
2609 .model_id = "Intel Core Processor (Icelake)",
2610 },
2611 {
2612 .name = "Icelake-Server",
2613 .level = 0xd,
2614 .vendor = CPUID_VENDOR_INTEL,
2615 .family = 6,
2616 .model = 134,
2617 .stepping = 0,
2618 .features[FEAT_1_EDX] =
2619 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2620 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2621 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2622 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2623 CPUID_DE | CPUID_FP87,
2624 .features[FEAT_1_ECX] =
2625 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2626 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2627 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2628 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2629 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2630 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2631 .features[FEAT_8000_0001_EDX] =
2632 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2633 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2634 .features[FEAT_8000_0001_ECX] =
2635 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2636 .features[FEAT_8000_0008_EBX] =
2637 CPUID_8000_0008_EBX_WBNOINVD,
2638 .features[FEAT_7_0_EBX] =
2639 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2640 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2641 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2642 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2643 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2644 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2645 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2646 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2647 .features[FEAT_7_0_ECX] =
2648 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2649 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2650 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2651 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2652 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2653 .features[FEAT_7_0_EDX] =
2654 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2655 /* Missing: XSAVES (not supported by some Linux versions,
2656 * including v4.1 to v4.12).
2657 * KVM doesn't yet expose any XSAVES state save component,
2658 * and the only one defined in Skylake (processor tracing)
2659 * probably will block migration anyway.
2660 */
2661 .features[FEAT_XSAVE] =
2662 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2663 CPUID_XSAVE_XGETBV1,
2664 .features[FEAT_6_EAX] =
2665 CPUID_6_EAX_ARAT,
2666 .xlevel = 0x80000008,
2667 .model_id = "Intel Xeon Processor (Icelake)",
2668 },
2669 {
2670 .name = "KnightsMill",
2671 .level = 0xd,
2672 .vendor = CPUID_VENDOR_INTEL,
2673 .family = 6,
2674 .model = 133,
2675 .stepping = 0,
2676 .features[FEAT_1_EDX] =
2677 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2678 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2679 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2680 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2681 CPUID_PSE | CPUID_DE | CPUID_FP87,
2682 .features[FEAT_1_ECX] =
2683 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2684 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2685 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2686 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2687 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2688 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2689 .features[FEAT_8000_0001_EDX] =
2690 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2691 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2692 .features[FEAT_8000_0001_ECX] =
2693 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2694 .features[FEAT_7_0_EBX] =
2695 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2696 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2697 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2698 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2699 CPUID_7_0_EBX_AVX512ER,
2700 .features[FEAT_7_0_ECX] =
2701 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2702 .features[FEAT_7_0_EDX] =
2703 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2704 .features[FEAT_XSAVE] =
2705 CPUID_XSAVE_XSAVEOPT,
2706 .features[FEAT_6_EAX] =
2707 CPUID_6_EAX_ARAT,
2708 .xlevel = 0x80000008,
2709 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2710 },
2711 {
2712 .name = "Opteron_G1",
2713 .level = 5,
2714 .vendor = CPUID_VENDOR_AMD,
2715 .family = 15,
2716 .model = 6,
2717 .stepping = 1,
2718 .features[FEAT_1_EDX] =
2719 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2720 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2721 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2722 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2723 CPUID_DE | CPUID_FP87,
2724 .features[FEAT_1_ECX] =
2725 CPUID_EXT_SSE3,
2726 .features[FEAT_8000_0001_EDX] =
2727 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2728 .xlevel = 0x80000008,
2729 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2730 },
2731 {
2732 .name = "Opteron_G2",
2733 .level = 5,
2734 .vendor = CPUID_VENDOR_AMD,
2735 .family = 15,
2736 .model = 6,
2737 .stepping = 1,
2738 .features[FEAT_1_EDX] =
2739 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2740 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2741 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2742 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2743 CPUID_DE | CPUID_FP87,
2744 .features[FEAT_1_ECX] =
2745 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2746 .features[FEAT_8000_0001_EDX] =
2747 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2748 .features[FEAT_8000_0001_ECX] =
2749 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2750 .xlevel = 0x80000008,
2751 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2752 },
2753 {
2754 .name = "Opteron_G3",
2755 .level = 5,
2756 .vendor = CPUID_VENDOR_AMD,
2757 .family = 16,
2758 .model = 2,
2759 .stepping = 3,
2760 .features[FEAT_1_EDX] =
2761 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2762 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2763 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2764 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2765 CPUID_DE | CPUID_FP87,
2766 .features[FEAT_1_ECX] =
2767 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2768 CPUID_EXT_SSE3,
2769 .features[FEAT_8000_0001_EDX] =
2770 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2771 CPUID_EXT2_RDTSCP,
2772 .features[FEAT_8000_0001_ECX] =
2773 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2774 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2775 .xlevel = 0x80000008,
2776 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2777 },
2778 {
2779 .name = "Opteron_G4",
2780 .level = 0xd,
2781 .vendor = CPUID_VENDOR_AMD,
2782 .family = 21,
2783 .model = 1,
2784 .stepping = 2,
2785 .features[FEAT_1_EDX] =
2786 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2787 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2788 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2789 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2790 CPUID_DE | CPUID_FP87,
2791 .features[FEAT_1_ECX] =
2792 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2793 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2794 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2795 CPUID_EXT_SSE3,
2796 .features[FEAT_8000_0001_EDX] =
2797 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2798 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2799 .features[FEAT_8000_0001_ECX] =
2800 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2801 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2802 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2803 CPUID_EXT3_LAHF_LM,
2804 .features[FEAT_SVM] =
2805 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2806 /* no xsaveopt! */
2807 .xlevel = 0x8000001A,
2808 .model_id = "AMD Opteron 62xx class CPU",
2809 },
2810 {
2811 .name = "Opteron_G5",
2812 .level = 0xd,
2813 .vendor = CPUID_VENDOR_AMD,
2814 .family = 21,
2815 .model = 2,
2816 .stepping = 0,
2817 .features[FEAT_1_EDX] =
2818 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2819 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2820 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2821 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2822 CPUID_DE | CPUID_FP87,
2823 .features[FEAT_1_ECX] =
2824 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2825 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2826 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2827 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2828 .features[FEAT_8000_0001_EDX] =
2829 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2830 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2831 .features[FEAT_8000_0001_ECX] =
2832 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2833 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2834 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2835 CPUID_EXT3_LAHF_LM,
2836 .features[FEAT_SVM] =
2837 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2838 /* no xsaveopt! */
2839 .xlevel = 0x8000001A,
2840 .model_id = "AMD Opteron 63xx class CPU",
2841 },
2842 {
2843 .name = "EPYC",
2844 .level = 0xd,
2845 .vendor = CPUID_VENDOR_AMD,
2846 .family = 23,
2847 .model = 1,
2848 .stepping = 2,
2849 .features[FEAT_1_EDX] =
2850 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2851 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2852 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2853 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2854 CPUID_VME | CPUID_FP87,
2855 .features[FEAT_1_ECX] =
2856 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2857 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2858 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2859 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2860 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2861 .features[FEAT_8000_0001_EDX] =
2862 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2863 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2864 CPUID_EXT2_SYSCALL,
2865 .features[FEAT_8000_0001_ECX] =
2866 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2867 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2868 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2869 CPUID_EXT3_TOPOEXT,
2870 .features[FEAT_7_0_EBX] =
2871 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2872 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2873 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2874 CPUID_7_0_EBX_SHA_NI,
2875 /* Missing: XSAVES (not supported by some Linux versions,
2876 * including v4.1 to v4.12).
2877 * KVM doesn't yet expose any XSAVES state save component.
2878 */
2879 .features[FEAT_XSAVE] =
2880 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2881 CPUID_XSAVE_XGETBV1,
2882 .features[FEAT_6_EAX] =
2883 CPUID_6_EAX_ARAT,
2884 .features[FEAT_SVM] =
2885 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2886 .xlevel = 0x8000001E,
2887 .model_id = "AMD EPYC Processor",
2888 .cache_info = &epyc_cache_info,
2889 },
2890 {
2891 .name = "EPYC-IBPB",
2892 .level = 0xd,
2893 .vendor = CPUID_VENDOR_AMD,
2894 .family = 23,
2895 .model = 1,
2896 .stepping = 2,
2897 .features[FEAT_1_EDX] =
2898 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2899 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2900 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2901 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2902 CPUID_VME | CPUID_FP87,
2903 .features[FEAT_1_ECX] =
2904 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2905 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2906 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2907 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2908 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2909 .features[FEAT_8000_0001_EDX] =
2910 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2911 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2912 CPUID_EXT2_SYSCALL,
2913 .features[FEAT_8000_0001_ECX] =
2914 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2915 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2916 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2917 CPUID_EXT3_TOPOEXT,
2918 .features[FEAT_8000_0008_EBX] =
2919 CPUID_8000_0008_EBX_IBPB,
2920 .features[FEAT_7_0_EBX] =
2921 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2922 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2923 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2924 CPUID_7_0_EBX_SHA_NI,
2925 /* Missing: XSAVES (not supported by some Linux versions,
2926 * including v4.1 to v4.12).
2927 * KVM doesn't yet expose any XSAVES state save component.
2928 */
2929 .features[FEAT_XSAVE] =
2930 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2931 CPUID_XSAVE_XGETBV1,
2932 .features[FEAT_6_EAX] =
2933 CPUID_6_EAX_ARAT,
2934 .features[FEAT_SVM] =
2935 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2936 .xlevel = 0x8000001E,
2937 .model_id = "AMD EPYC Processor (with IBPB)",
2938 .cache_info = &epyc_cache_info,
2939 },
2940 {
2941 .name = "Dhyana",
2942 .level = 0xd,
2943 .vendor = CPUID_VENDOR_HYGON,
2944 .family = 24,
2945 .model = 0,
2946 .stepping = 1,
2947 .features[FEAT_1_EDX] =
2948 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2949 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2950 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2951 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2952 CPUID_VME | CPUID_FP87,
2953 .features[FEAT_1_ECX] =
2954 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2955 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
2956 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2957 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2958 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
2959 .features[FEAT_8000_0001_EDX] =
2960 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2961 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2962 CPUID_EXT2_SYSCALL,
2963 .features[FEAT_8000_0001_ECX] =
2964 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2965 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2966 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2967 CPUID_EXT3_TOPOEXT,
2968 .features[FEAT_8000_0008_EBX] =
2969 CPUID_8000_0008_EBX_IBPB,
2970 .features[FEAT_7_0_EBX] =
2971 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2972 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2973 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
2974 /*
2975 * Missing: XSAVES (not supported by some Linux versions,
2976 * including v4.1 to v4.12).
2977 * KVM doesn't yet expose any XSAVES state save component.
2978 */
2979 .features[FEAT_XSAVE] =
2980 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2981 CPUID_XSAVE_XGETBV1,
2982 .features[FEAT_6_EAX] =
2983 CPUID_6_EAX_ARAT,
2984 .features[FEAT_SVM] =
2985 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2986 .xlevel = 0x8000001E,
2987 .model_id = "Hygon Dhyana Processor",
2988 .cache_info = &epyc_cache_info,
2989 },
2990 };
2991
2992 typedef struct PropValue {
2993 const char *prop, *value;
2994 } PropValue;
2995
2996 /* KVM-specific features that are automatically added/removed
2997 * from all CPU models when KVM is enabled.
2998 */
2999 static PropValue kvm_default_props[] = {
3000 { "kvmclock", "on" },
3001 { "kvm-nopiodelay", "on" },
3002 { "kvm-asyncpf", "on" },
3003 { "kvm-steal-time", "on" },
3004 { "kvm-pv-eoi", "on" },
3005 { "kvmclock-stable-bit", "on" },
3006 { "x2apic", "on" },
3007 { "acpi", "off" },
3008 { "monitor", "off" },
3009 { "svm", "off" },
3010 { NULL, NULL },
3011 };
3012
3013 /* TCG-specific defaults that override all CPU models when using TCG
3014 */
3015 static PropValue tcg_default_props[] = {
3016 { "vme", "off" },
3017 { NULL, NULL },
3018 };
3019
3020
3021 void x86_cpu_change_kvm_default(const char *prop, const char *value)
3022 {
3023 PropValue *pv;
3024 for (pv = kvm_default_props; pv->prop; pv++) {
3025 if (!strcmp(pv->prop, prop)) {
3026 pv->value = value;
3027 break;
3028 }
3029 }
3030
3031 /* It is valid to call this function only for properties that
3032 * are already present in the kvm_default_props table.
3033 */
3034 assert(pv->prop);
3035 }
3036
3037 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3038 bool migratable_only);
3039
3040 static bool lmce_supported(void)
3041 {
3042 uint64_t mce_cap = 0;
3043
3044 #ifdef CONFIG_KVM
3045 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
3046 return false;
3047 }
3048 #endif
3049
3050 return !!(mce_cap & MCG_LMCE_P);
3051 }
3052
3053 #define CPUID_MODEL_ID_SZ 48
3054
3055 /**
3056 * cpu_x86_fill_model_id:
3057 * Get CPUID model ID string from host CPU.
3058 *
3059 * @str should have at least CPUID_MODEL_ID_SZ bytes
3060 *
3061 * The function does NOT add a null terminator to the string
3062 * automatically.
3063 */
3064 static int cpu_x86_fill_model_id(char *str)
3065 {
3066 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3067 int i;
3068
3069 for (i = 0; i < 3; i++) {
3070 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3071 memcpy(str + i * 16 + 0, &eax, 4);
3072 memcpy(str + i * 16 + 4, &ebx, 4);
3073 memcpy(str + i * 16 + 8, &ecx, 4);
3074 memcpy(str + i * 16 + 12, &edx, 4);
3075 }
3076 return 0;
3077 }
3078
3079 static Property max_x86_cpu_properties[] = {
3080 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3081 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3082 DEFINE_PROP_END_OF_LIST()
3083 };
3084
3085 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3086 {
3087 DeviceClass *dc = DEVICE_CLASS(oc);
3088 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3089
3090 xcc->ordering = 9;
3091
3092 xcc->model_description =
3093 "Enables all features supported by the accelerator in the current host";
3094
3095 dc->props = max_x86_cpu_properties;
3096 }
3097
3098 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3099
3100 static void max_x86_cpu_initfn(Object *obj)
3101 {
3102 X86CPU *cpu = X86_CPU(obj);
3103 CPUX86State *env = &cpu->env;
3104 KVMState *s = kvm_state;
3105
3106 /* We can't fill the features array here because we don't know yet if
3107 * "migratable" is true or false.
3108 */
3109 cpu->max_features = true;
3110
3111 if (accel_uses_host_cpuid()) {
3112 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3113 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3114 int family, model, stepping;
3115 X86CPUDefinition host_cpudef = { };
3116 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3117
3118 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
3119 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
3120
3121 host_vendor_fms(vendor, &family, &model, &stepping);
3122
3123 cpu_x86_fill_model_id(model_id);
3124
3125 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3126 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3127 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3128 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3129 &error_abort);
3130 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3131 &error_abort);
3132
3133 if (kvm_enabled()) {
3134 env->cpuid_min_level =
3135 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3136 env->cpuid_min_xlevel =
3137 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3138 env->cpuid_min_xlevel2 =
3139 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3140 } else {
3141 env->cpuid_min_level =
3142 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3143 env->cpuid_min_xlevel =
3144 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3145 env->cpuid_min_xlevel2 =
3146 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3147 }
3148
3149 if (lmce_supported()) {
3150 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3151 }
3152 } else {
3153 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3154 "vendor", &error_abort);
3155 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3156 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3157 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3158 object_property_set_str(OBJECT(cpu),
3159 "QEMU TCG CPU version " QEMU_HW_VERSION,
3160 "model-id", &error_abort);
3161 }
3162
3163 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3164 }
3165
3166 static const TypeInfo max_x86_cpu_type_info = {
3167 .name = X86_CPU_TYPE_NAME("max"),
3168 .parent = TYPE_X86_CPU,
3169 .instance_init = max_x86_cpu_initfn,
3170 .class_init = max_x86_cpu_class_init,
3171 };
3172
3173 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3174 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3175 {
3176 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3177
3178 xcc->host_cpuid_required = true;
3179 xcc->ordering = 8;
3180
3181 #if defined(CONFIG_KVM)
3182 xcc->model_description =
3183 "KVM processor with all supported host features ";
3184 #elif defined(CONFIG_HVF)
3185 xcc->model_description =
3186 "HVF processor with all supported host features ";
3187 #endif
3188 }
3189
3190 static const TypeInfo host_x86_cpu_type_info = {
3191 .name = X86_CPU_TYPE_NAME("host"),
3192 .parent = X86_CPU_TYPE_NAME("max"),
3193 .class_init = host_x86_cpu_class_init,
3194 };
3195
3196 #endif
3197
3198 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3199 {
3200 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3201
3202 switch (f->type) {
3203 case CPUID_FEATURE_WORD:
3204 {
3205 const char *reg = get_register_name_32(f->cpuid.reg);
3206 assert(reg);
3207 return g_strdup_printf("CPUID.%02XH:%s",
3208 f->cpuid.eax, reg);
3209 }
3210 case MSR_FEATURE_WORD:
3211 return g_strdup_printf("MSR(%02XH)",
3212 f->msr.index);
3213 }
3214
3215 return NULL;
3216 }
3217
3218 static void report_unavailable_features(FeatureWord w, uint32_t mask)
3219 {
3220 FeatureWordInfo *f = &feature_word_info[w];
3221 int i;
3222 char *feat_word_str;
3223
3224 for (i = 0; i < 32; ++i) {
3225 if ((1UL << i) & mask) {
3226 feat_word_str = feature_word_description(f, i);
3227 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3228 accel_uses_host_cpuid() ? "host" : "TCG",
3229 feat_word_str,
3230 f->feat_names[i] ? "." : "",
3231 f->feat_names[i] ? f->feat_names[i] : "", i);
3232 g_free(feat_word_str);
3233 }
3234 }
3235 }
3236
3237 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3238 const char *name, void *opaque,
3239 Error **errp)
3240 {
3241 X86CPU *cpu = X86_CPU(obj);
3242 CPUX86State *env = &cpu->env;
3243 int64_t value;
3244
3245 value = (env->cpuid_version >> 8) & 0xf;
3246 if (value == 0xf) {
3247 value += (env->cpuid_version >> 20) & 0xff;
3248 }
3249 visit_type_int(v, name, &value, errp);
3250 }
3251
3252 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3253 const char *name, void *opaque,
3254 Error **errp)
3255 {
3256 X86CPU *cpu = X86_CPU(obj);
3257 CPUX86State *env = &cpu->env;
3258 const int64_t min = 0;
3259 const int64_t max = 0xff + 0xf;
3260 Error *local_err = NULL;
3261 int64_t value;
3262
3263 visit_type_int(v, name, &value, &local_err);
3264 if (local_err) {
3265 error_propagate(errp, local_err);
3266 return;
3267 }
3268 if (value < min || value > max) {
3269 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3270 name ? name : "null", value, min, max);
3271 return;
3272 }
3273
3274 env->cpuid_version &= ~0xff00f00;
3275 if (value > 0x0f) {
3276 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3277 } else {
3278 env->cpuid_version |= value << 8;
3279 }
3280 }
3281
3282 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3283 const char *name, void *opaque,
3284 Error **errp)
3285 {
3286 X86CPU *cpu = X86_CPU(obj);
3287 CPUX86State *env = &cpu->env;
3288 int64_t value;
3289
3290 value = (env->cpuid_version >> 4) & 0xf;
3291 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3292 visit_type_int(v, name, &value, errp);
3293 }
3294
3295 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3296 const char *name, void *opaque,
3297 Error **errp)
3298 {
3299 X86CPU *cpu = X86_CPU(obj);
3300 CPUX86State *env = &cpu->env;
3301 const int64_t min = 0;
3302 const int64_t max = 0xff;
3303 Error *local_err = NULL;
3304 int64_t value;
3305
3306 visit_type_int(v, name, &value, &local_err);
3307 if (local_err) {
3308 error_propagate(errp, local_err);
3309 return;
3310 }
3311 if (value < min || value > max) {
3312 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3313 name ? name : "null", value, min, max);
3314 return;
3315 }
3316
3317 env->cpuid_version &= ~0xf00f0;
3318 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3319 }
3320
3321 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3322 const char *name, void *opaque,
3323 Error **errp)
3324 {
3325 X86CPU *cpu = X86_CPU(obj);
3326 CPUX86State *env = &cpu->env;
3327 int64_t value;
3328
3329 value = env->cpuid_version & 0xf;
3330 visit_type_int(v, name, &value, errp);
3331 }
3332
3333 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3334 const char *name, void *opaque,
3335 Error **errp)
3336 {
3337 X86CPU *cpu = X86_CPU(obj);
3338 CPUX86State *env = &cpu->env;
3339 const int64_t min = 0;
3340 const int64_t max = 0xf;
3341 Error *local_err = NULL;
3342 int64_t value;
3343
3344 visit_type_int(v, name, &value, &local_err);
3345 if (local_err) {
3346 error_propagate(errp, local_err);
3347 return;
3348 }
3349 if (value < min || value > max) {
3350 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3351 name ? name : "null", value, min, max);
3352 return;
3353 }
3354
3355 env->cpuid_version &= ~0xf;
3356 env->cpuid_version |= value & 0xf;
3357 }
3358
3359 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3360 {
3361 X86CPU *cpu = X86_CPU(obj);
3362 CPUX86State *env = &cpu->env;
3363 char *value;
3364
3365 value = g_malloc(CPUID_VENDOR_SZ + 1);
3366 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3367 env->cpuid_vendor3);
3368 return value;
3369 }
3370
3371 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3372 Error **errp)
3373 {
3374 X86CPU *cpu = X86_CPU(obj);
3375 CPUX86State *env = &cpu->env;
3376 int i;
3377
3378 if (strlen(value) != CPUID_VENDOR_SZ) {
3379 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3380 return;
3381 }
3382
3383 env->cpuid_vendor1 = 0;
3384 env->cpuid_vendor2 = 0;
3385 env->cpuid_vendor3 = 0;
3386 for (i = 0; i < 4; i++) {
3387 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3388 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3389 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3390 }
3391 }
3392
3393 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3394 {
3395 X86CPU *cpu = X86_CPU(obj);
3396 CPUX86State *env = &cpu->env;
3397 char *value;
3398 int i;
3399
3400 value = g_malloc(48 + 1);
3401 for (i = 0; i < 48; i++) {
3402 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3403 }
3404 value[48] = '\0';
3405 return value;
3406 }
3407
3408 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3409 Error **errp)
3410 {
3411 X86CPU *cpu = X86_CPU(obj);
3412 CPUX86State *env = &cpu->env;
3413 int c, len, i;
3414
3415 if (model_id == NULL) {
3416 model_id = "";
3417 }
3418 len = strlen(model_id);
3419 memset(env->cpuid_model, 0, 48);
3420 for (i = 0; i < 48; i++) {
3421 if (i >= len) {
3422 c = '\0';
3423 } else {
3424 c = (uint8_t)model_id[i];
3425 }
3426 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3427 }
3428 }
3429
3430 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3431 void *opaque, Error **errp)
3432 {
3433 X86CPU *cpu = X86_CPU(obj);
3434 int64_t value;
3435
3436 value = cpu->env.tsc_khz * 1000;
3437 visit_type_int(v, name, &value, errp);
3438 }
3439
3440 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3441 void *opaque, Error **errp)
3442 {
3443 X86CPU *cpu = X86_CPU(obj);
3444 const int64_t min = 0;
3445 const int64_t max = INT64_MAX;
3446 Error *local_err = NULL;
3447 int64_t value;
3448
3449 visit_type_int(v, name, &value, &local_err);
3450 if (local_err) {
3451 error_propagate(errp, local_err);
3452 return;
3453 }
3454 if (value < min || value > max) {
3455 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3456 name ? name : "null", value, min, max);
3457 return;
3458 }
3459
3460 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3461 }
3462
3463 /* Generic getter for "feature-words" and "filtered-features" properties */
3464 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3465 const char *name, void *opaque,
3466 Error **errp)
3467 {
3468 uint32_t *array = (uint32_t *)opaque;
3469 FeatureWord w;
3470 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3471 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3472 X86CPUFeatureWordInfoList *list = NULL;
3473
3474 for (w = 0; w < FEATURE_WORDS; w++) {
3475 FeatureWordInfo *wi = &feature_word_info[w];
3476 /*
3477 * We didn't have MSR features when "feature-words" was
3478 * introduced. Therefore skipped other type entries.
3479 */
3480 if (wi->type != CPUID_FEATURE_WORD) {
3481 continue;
3482 }
3483 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3484 qwi->cpuid_input_eax = wi->cpuid.eax;
3485 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3486 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3487 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3488 qwi->features = array[w];
3489
3490 /* List will be in reverse order, but order shouldn't matter */
3491 list_entries[w].next = list;
3492 list_entries[w].value = &word_infos[w];
3493 list = &list_entries[w];
3494 }
3495
3496 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3497 }
3498
3499 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3500 void *opaque, Error **errp)
3501 {
3502 X86CPU *cpu = X86_CPU(obj);
3503 int64_t value = cpu->hyperv_spinlock_attempts;
3504
3505 visit_type_int(v, name, &value, errp);
3506 }
3507
3508 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3509 void *opaque, Error **errp)
3510 {
3511 const int64_t min = 0xFFF;
3512 const int64_t max = UINT_MAX;
3513 X86CPU *cpu = X86_CPU(obj);
3514 Error *err = NULL;
3515 int64_t value;
3516
3517 visit_type_int(v, name, &value, &err);
3518 if (err) {
3519 error_propagate(errp, err);
3520 return;
3521 }
3522
3523 if (value < min || value > max) {
3524 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3525 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3526 object_get_typename(obj), name ? name : "null",
3527 value, min, max);
3528 return;
3529 }
3530 cpu->hyperv_spinlock_attempts = value;
3531 }
3532
3533 static const PropertyInfo qdev_prop_spinlocks = {
3534 .name = "int",
3535 .get = x86_get_hv_spinlocks,
3536 .set = x86_set_hv_spinlocks,
3537 };
3538
3539 /* Convert all '_' in a feature string option name to '-', to make feature
3540 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3541 */
3542 static inline void feat2prop(char *s)
3543 {
3544 while ((s = strchr(s, '_'))) {
3545 *s = '-';
3546 }
3547 }
3548
3549 /* Return the feature property name for a feature flag bit */
3550 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3551 {
3552 /* XSAVE components are automatically enabled by other features,
3553 * so return the original feature name instead
3554 */
3555 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3556 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3557
3558 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3559 x86_ext_save_areas[comp].bits) {
3560 w = x86_ext_save_areas[comp].feature;
3561 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3562 }
3563 }
3564
3565 assert(bitnr < 32);
3566 assert(w < FEATURE_WORDS);
3567 return feature_word_info[w].feat_names[bitnr];
3568 }
3569
3570 /* Compatibily hack to maintain legacy +-feat semantic,
3571 * where +-feat overwrites any feature set by
3572 * feat=on|feat even if the later is parsed after +-feat
3573 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3574 */
3575 static GList *plus_features, *minus_features;
3576
3577 static gint compare_string(gconstpointer a, gconstpointer b)
3578 {
3579 return g_strcmp0(a, b);
3580 }
3581
3582 /* Parse "+feature,-feature,feature=foo" CPU feature string
3583 */
3584 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3585 Error **errp)
3586 {
3587 char *featurestr; /* Single 'key=value" string being parsed */
3588 static bool cpu_globals_initialized;
3589 bool ambiguous = false;
3590
3591 if (cpu_globals_initialized) {
3592 return;
3593 }
3594 cpu_globals_initialized = true;
3595
3596 if (!features) {
3597 return;
3598 }
3599
3600 for (featurestr = strtok(features, ",");
3601 featurestr;
3602 featurestr = strtok(NULL, ",")) {
3603 const char *name;
3604 const char *val = NULL;
3605 char *eq = NULL;
3606 char num[32];
3607 GlobalProperty *prop;
3608
3609 /* Compatibility syntax: */
3610 if (featurestr[0] == '+') {
3611 plus_features = g_list_append(plus_features,
3612 g_strdup(featurestr + 1));
3613 continue;
3614 } else if (featurestr[0] == '-') {
3615 minus_features = g_list_append(minus_features,
3616 g_strdup(featurestr + 1));
3617 continue;
3618 }
3619
3620 eq = strchr(featurestr, '=');
3621 if (eq) {
3622 *eq++ = 0;
3623 val = eq;
3624 } else {
3625 val = "on";
3626 }
3627
3628 feat2prop(featurestr);
3629 name = featurestr;
3630
3631 if (g_list_find_custom(plus_features, name, compare_string)) {
3632 warn_report("Ambiguous CPU model string. "
3633 "Don't mix both \"+%s\" and \"%s=%s\"",
3634 name, name, val);
3635 ambiguous = true;
3636 }
3637 if (g_list_find_custom(minus_features, name, compare_string)) {
3638 warn_report("Ambiguous CPU model string. "
3639 "Don't mix both \"-%s\" and \"%s=%s\"",
3640 name, name, val);
3641 ambiguous = true;
3642 }
3643
3644 /* Special case: */
3645 if (!strcmp(name, "tsc-freq")) {
3646 int ret;
3647 uint64_t tsc_freq;
3648
3649 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3650 if (ret < 0 || tsc_freq > INT64_MAX) {
3651 error_setg(errp, "bad numerical value %s", val);
3652 return;
3653 }
3654 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3655 val = num;
3656 name = "tsc-frequency";
3657 }
3658
3659 prop = g_new0(typeof(*prop), 1);
3660 prop->driver = typename;
3661 prop->property = g_strdup(name);
3662 prop->value = g_strdup(val);
3663 qdev_prop_register_global(prop);
3664 }
3665
3666 if (ambiguous) {
3667 warn_report("Compatibility of ambiguous CPU model "
3668 "strings won't be kept on future QEMU versions");
3669 }
3670 }
3671
3672 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3673 static int x86_cpu_filter_features(X86CPU *cpu);
3674
3675 /* Check for missing features that may prevent the CPU class from
3676 * running using the current machine and accelerator.
3677 */
3678 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3679 strList **missing_feats)
3680 {
3681 X86CPU *xc;
3682 FeatureWord w;
3683 Error *err = NULL;
3684 strList **next = missing_feats;
3685
3686 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3687 strList *new = g_new0(strList, 1);
3688 new->value = g_strdup("kvm");
3689 *missing_feats = new;
3690 return;
3691 }
3692
3693 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3694
3695 x86_cpu_expand_features(xc, &err);
3696 if (err) {
3697 /* Errors at x86_cpu_expand_features should never happen,
3698 * but in case it does, just report the model as not
3699 * runnable at all using the "type" property.
3700 */
3701 strList *new = g_new0(strList, 1);
3702 new->value = g_strdup("type");
3703 *next = new;
3704 next = &new->next;
3705 }
3706
3707 x86_cpu_filter_features(xc);
3708
3709 for (w = 0; w < FEATURE_WORDS; w++) {
3710 uint32_t filtered = xc->filtered_features[w];
3711 int i;
3712 for (i = 0; i < 32; i++) {
3713 if (filtered & (1UL << i)) {
3714 strList *new = g_new0(strList, 1);
3715 new->value = g_strdup(x86_cpu_feature_name(w, i));
3716 *next = new;
3717 next = &new->next;
3718 }
3719 }
3720 }
3721
3722 object_unref(OBJECT(xc));
3723 }
3724
3725 /* Print all cpuid feature names in featureset
3726 */
3727 static void listflags(GList *features)
3728 {
3729 size_t len = 0;
3730 GList *tmp;
3731
3732 for (tmp = features; tmp; tmp = tmp->next) {
3733 const char *name = tmp->data;
3734 if ((len + strlen(name) + 1) >= 75) {
3735 qemu_printf("\n");
3736 len = 0;
3737 }
3738 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3739 len += strlen(name) + 1;
3740 }
3741 qemu_printf("\n");
3742 }
3743
3744 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3745 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3746 {
3747 ObjectClass *class_a = (ObjectClass *)a;
3748 ObjectClass *class_b = (ObjectClass *)b;
3749 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3750 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3751 char *name_a, *name_b;
3752 int ret;
3753
3754 if (cc_a->ordering != cc_b->ordering) {
3755 ret = cc_a->ordering - cc_b->ordering;
3756 } else {
3757 name_a = x86_cpu_class_get_model_name(cc_a);
3758 name_b = x86_cpu_class_get_model_name(cc_b);
3759 ret = strcmp(name_a, name_b);
3760 g_free(name_a);
3761 g_free(name_b);
3762 }
3763 return ret;
3764 }
3765
3766 static GSList *get_sorted_cpu_model_list(void)
3767 {
3768 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3769 list = g_slist_sort(list, x86_cpu_list_compare);
3770 return list;
3771 }
3772
3773 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3774 {
3775 ObjectClass *oc = data;
3776 X86CPUClass *cc = X86_CPU_CLASS(oc);
3777 char *name = x86_cpu_class_get_model_name(cc);
3778 const char *desc = cc->model_description;
3779 if (!desc && cc->cpu_def) {
3780 desc = cc->cpu_def->model_id;
3781 }
3782
3783 qemu_printf("x86 %-20s %-48s\n", name, desc);
3784 g_free(name);
3785 }
3786
3787 /* list available CPU models and flags */
3788 void x86_cpu_list(void)
3789 {
3790 int i, j;
3791 GSList *list;
3792 GList *names = NULL;
3793
3794 qemu_printf("Available CPUs:\n");
3795 list = get_sorted_cpu_model_list();
3796 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3797 g_slist_free(list);
3798
3799 names = NULL;
3800 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3801 FeatureWordInfo *fw = &feature_word_info[i];
3802 for (j = 0; j < 32; j++) {
3803 if (fw->feat_names[j]) {
3804 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3805 }
3806 }
3807 }
3808
3809 names = g_list_sort(names, (GCompareFunc)strcmp);
3810
3811 qemu_printf("\nRecognized CPUID flags:\n");
3812 listflags(names);
3813 qemu_printf("\n");
3814 g_list_free(names);
3815 }
3816
3817 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3818 {
3819 ObjectClass *oc = data;
3820 X86CPUClass *cc = X86_CPU_CLASS(oc);
3821 CpuDefinitionInfoList **cpu_list = user_data;
3822 CpuDefinitionInfoList *entry;
3823 CpuDefinitionInfo *info;
3824
3825 info = g_malloc0(sizeof(*info));
3826 info->name = x86_cpu_class_get_model_name(cc);
3827 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3828 info->has_unavailable_features = true;
3829 info->q_typename = g_strdup(object_class_get_name(oc));
3830 info->migration_safe = cc->migration_safe;
3831 info->has_migration_safe = true;
3832 info->q_static = cc->static_model;
3833
3834 entry = g_malloc0(sizeof(*entry));
3835 entry->value = info;
3836 entry->next = *cpu_list;
3837 *cpu_list = entry;
3838 }
3839
3840 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
3841 {
3842 CpuDefinitionInfoList *cpu_list = NULL;
3843 GSList *list = get_sorted_cpu_model_list();
3844 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3845 g_slist_free(list);
3846 return cpu_list;
3847 }
3848
3849 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3850 bool migratable_only)
3851 {
3852 FeatureWordInfo *wi = &feature_word_info[w];
3853 uint32_t r = 0;
3854
3855 if (kvm_enabled()) {
3856 switch (wi->type) {
3857 case CPUID_FEATURE_WORD:
3858 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3859 wi->cpuid.ecx,
3860 wi->cpuid.reg);
3861 break;
3862 case MSR_FEATURE_WORD:
3863 r = kvm_arch_get_supported_msr_feature(kvm_state,
3864 wi->msr.index);
3865 break;
3866 }
3867 } else if (hvf_enabled()) {
3868 if (wi->type != CPUID_FEATURE_WORD) {
3869 return 0;
3870 }
3871 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3872 wi->cpuid.ecx,
3873 wi->cpuid.reg);
3874 } else if (tcg_enabled()) {
3875 r = wi->tcg_features;
3876 } else {
3877 return ~0;
3878 }
3879 if (migratable_only) {
3880 r &= x86_cpu_get_migratable_flags(w);
3881 }
3882 return r;
3883 }
3884
3885 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3886 {
3887 FeatureWord w;
3888
3889 for (w = 0; w < FEATURE_WORDS; w++) {
3890 report_unavailable_features(w, cpu->filtered_features[w]);
3891 }
3892 }
3893
3894 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3895 {
3896 PropValue *pv;
3897 for (pv = props; pv->prop; pv++) {
3898 if (!pv->value) {
3899 continue;
3900 }
3901 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3902 &error_abort);
3903 }
3904 }
3905
3906 /* Load data from X86CPUDefinition into a X86CPU object
3907 */
3908 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3909 {
3910 CPUX86State *env = &cpu->env;
3911 const char *vendor;
3912 char host_vendor[CPUID_VENDOR_SZ + 1];
3913 FeatureWord w;
3914
3915 /*NOTE: any property set by this function should be returned by
3916 * x86_cpu_static_props(), so static expansion of
3917 * query-cpu-model-expansion is always complete.
3918 */
3919
3920 /* CPU models only set _minimum_ values for level/xlevel: */
3921 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3922 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3923
3924 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3925 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3926 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3927 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3928 for (w = 0; w < FEATURE_WORDS; w++) {
3929 env->features[w] = def->features[w];
3930 }
3931
3932 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3933 cpu->legacy_cache = !def->cache_info;
3934
3935 /* Special cases not set in the X86CPUDefinition structs: */
3936 /* TODO: in-kernel irqchip for hvf */
3937 if (kvm_enabled()) {
3938 if (!kvm_irqchip_in_kernel()) {
3939 x86_cpu_change_kvm_default("x2apic", "off");
3940 }
3941
3942 x86_cpu_apply_props(cpu, kvm_default_props);
3943 } else if (tcg_enabled()) {
3944 x86_cpu_apply_props(cpu, tcg_default_props);
3945 }
3946
3947 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3948
3949 /* sysenter isn't supported in compatibility mode on AMD,
3950 * syscall isn't supported in compatibility mode on Intel.
3951 * Normally we advertise the actual CPU vendor, but you can
3952 * override this using the 'vendor' property if you want to use
3953 * KVM's sysenter/syscall emulation in compatibility mode and
3954 * when doing cross vendor migration
3955 */
3956 vendor = def->vendor;
3957 if (accel_uses_host_cpuid()) {
3958 uint32_t ebx = 0, ecx = 0, edx = 0;
3959 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3960 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3961 vendor = host_vendor;
3962 }
3963
3964 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3965
3966 }
3967
3968 #ifndef CONFIG_USER_ONLY
3969 /* Return a QDict containing keys for all properties that can be included
3970 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3971 * must be included in the dictionary.
3972 */
3973 static QDict *x86_cpu_static_props(void)
3974 {
3975 FeatureWord w;
3976 int i;
3977 static const char *props[] = {
3978 "min-level",
3979 "min-xlevel",
3980 "family",
3981 "model",
3982 "stepping",
3983 "model-id",
3984 "vendor",
3985 "lmce",
3986 NULL,
3987 };
3988 static QDict *d;
3989
3990 if (d) {
3991 return d;
3992 }
3993
3994 d = qdict_new();
3995 for (i = 0; props[i]; i++) {
3996 qdict_put_null(d, props[i]);
3997 }
3998
3999 for (w = 0; w < FEATURE_WORDS; w++) {
4000 FeatureWordInfo *fi = &feature_word_info[w];
4001 int bit;
4002 for (bit = 0; bit < 32; bit++) {
4003 if (!fi->feat_names[bit]) {
4004 continue;
4005 }
4006 qdict_put_null(d, fi->feat_names[bit]);
4007 }
4008 }
4009
4010 return d;
4011 }
4012
4013 /* Add an entry to @props dict, with the value for property. */
4014 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
4015 {
4016 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
4017 &error_abort);
4018
4019 qdict_put_obj(props, prop, value);
4020 }
4021
4022 /* Convert CPU model data from X86CPU object to a property dictionary
4023 * that can recreate exactly the same CPU model.
4024 */
4025 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
4026 {
4027 QDict *sprops = x86_cpu_static_props();
4028 const QDictEntry *e;
4029
4030 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
4031 const char *prop = qdict_entry_key(e);
4032 x86_cpu_expand_prop(cpu, props, prop);
4033 }
4034 }
4035
4036 /* Convert CPU model data from X86CPU object to a property dictionary
4037 * that can recreate exactly the same CPU model, including every
4038 * writeable QOM property.
4039 */
4040 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
4041 {
4042 ObjectPropertyIterator iter;
4043 ObjectProperty *prop;
4044
4045 object_property_iter_init(&iter, OBJECT(cpu));
4046 while ((prop = object_property_iter_next(&iter))) {
4047 /* skip read-only or write-only properties */
4048 if (!prop->get || !prop->set) {
4049 continue;
4050 }
4051
4052 /* "hotplugged" is the only property that is configurable
4053 * on the command-line but will be set differently on CPUs
4054 * created using "-cpu ... -smp ..." and by CPUs created
4055 * on the fly by x86_cpu_from_model() for querying. Skip it.
4056 */
4057 if (!strcmp(prop->name, "hotplugged")) {
4058 continue;
4059 }
4060 x86_cpu_expand_prop(cpu, props, prop->name);
4061 }
4062 }
4063
4064 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4065 {
4066 const QDictEntry *prop;
4067 Error *err = NULL;
4068
4069 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4070 object_property_set_qobject(obj, qdict_entry_value(prop),
4071 qdict_entry_key(prop), &err);
4072 if (err) {
4073 break;
4074 }
4075 }
4076
4077 error_propagate(errp, err);
4078 }
4079
4080 /* Create X86CPU object according to model+props specification */
4081 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4082 {
4083 X86CPU *xc = NULL;
4084 X86CPUClass *xcc;
4085 Error *err = NULL;
4086
4087 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4088 if (xcc == NULL) {
4089 error_setg(&err, "CPU model '%s' not found", model);
4090 goto out;
4091 }
4092
4093 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4094 if (props) {
4095 object_apply_props(OBJECT(xc), props, &err);
4096 if (err) {
4097 goto out;
4098 }
4099 }
4100
4101 x86_cpu_expand_features(xc, &err);
4102 if (err) {
4103 goto out;
4104 }
4105
4106 out:
4107 if (err) {
4108 error_propagate(errp, err);
4109 object_unref(OBJECT(xc));
4110 xc = NULL;
4111 }
4112 return xc;
4113 }
4114
4115 CpuModelExpansionInfo *
4116 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
4117 CpuModelInfo *model,
4118 Error **errp)
4119 {
4120 X86CPU *xc = NULL;
4121 Error *err = NULL;
4122 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4123 QDict *props = NULL;
4124 const char *base_name;
4125
4126 xc = x86_cpu_from_model(model->name,
4127 model->has_props ?
4128 qobject_to(QDict, model->props) :
4129 NULL, &err);
4130 if (err) {
4131 goto out;
4132 }
4133
4134 props = qdict_new();
4135 ret->model = g_new0(CpuModelInfo, 1);
4136 ret->model->props = QOBJECT(props);
4137 ret->model->has_props = true;
4138
4139 switch (type) {
4140 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4141 /* Static expansion will be based on "base" only */
4142 base_name = "base";
4143 x86_cpu_to_dict(xc, props);
4144 break;
4145 case CPU_MODEL_EXPANSION_TYPE_FULL:
4146 /* As we don't return every single property, full expansion needs
4147 * to keep the original model name+props, and add extra
4148 * properties on top of that.
4149 */
4150 base_name = model->name;
4151 x86_cpu_to_dict_full(xc, props);
4152 break;
4153 default:
4154 error_setg(&err, "Unsupported expansion type");
4155 goto out;
4156 }
4157
4158 x86_cpu_to_dict(xc, props);
4159
4160 ret->model->name = g_strdup(base_name);
4161
4162 out:
4163 object_unref(OBJECT(xc));
4164 if (err) {
4165 error_propagate(errp, err);
4166 qapi_free_CpuModelExpansionInfo(ret);
4167 ret = NULL;
4168 }
4169 return ret;
4170 }
4171 #endif /* !CONFIG_USER_ONLY */
4172
4173 static gchar *x86_gdb_arch_name(CPUState *cs)
4174 {
4175 #ifdef TARGET_X86_64
4176 return g_strdup("i386:x86-64");
4177 #else
4178 return g_strdup("i386");
4179 #endif
4180 }
4181
4182 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4183 {
4184 X86CPUDefinition *cpudef = data;
4185 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4186
4187 xcc->cpu_def = cpudef;
4188 xcc->migration_safe = true;
4189 }
4190
4191 static void x86_register_cpudef_type(X86CPUDefinition *def)
4192 {
4193 char *typename = x86_cpu_type_name(def->name);
4194 TypeInfo ti = {
4195 .name = typename,
4196 .parent = TYPE_X86_CPU,
4197 .class_init = x86_cpu_cpudef_class_init,
4198 .class_data = def,
4199 };
4200
4201 /* AMD aliases are handled at runtime based on CPUID vendor, so
4202 * they shouldn't be set on the CPU model table.
4203 */
4204 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4205 /* catch mistakes instead of silently truncating model_id when too long */
4206 assert(def->model_id && strlen(def->model_id) <= 48);
4207
4208
4209 type_register(&ti);
4210 g_free(typename);
4211 }
4212
4213 #if !defined(CONFIG_USER_ONLY)
4214
4215 void cpu_clear_apic_feature(CPUX86State *env)
4216 {
4217 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4218 }
4219
4220 #endif /* !CONFIG_USER_ONLY */
4221
4222 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4223 uint32_t *eax, uint32_t *ebx,
4224 uint32_t *ecx, uint32_t *edx)
4225 {
4226 X86CPU *cpu = env_archcpu(env);
4227 CPUState *cs = env_cpu(env);
4228 uint32_t pkg_offset;
4229 uint32_t limit;
4230 uint32_t signature[3];
4231
4232 /* Calculate & apply limits for different index ranges */
4233 if (index >= 0xC0000000) {
4234 limit = env->cpuid_xlevel2;
4235 } else if (index >= 0x80000000) {
4236 limit = env->cpuid_xlevel;
4237 } else if (index >= 0x40000000) {
4238 limit = 0x40000001;
4239 } else {
4240 limit = env->cpuid_level;
4241 }
4242
4243 if (index > limit) {
4244 /* Intel documentation states that invalid EAX input will
4245 * return the same information as EAX=cpuid_level
4246 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4247 */
4248 index = env->cpuid_level;
4249 }
4250
4251 switch(index) {
4252 case 0:
4253 *eax = env->cpuid_level;
4254 *ebx = env->cpuid_vendor1;
4255 *edx = env->cpuid_vendor2;
4256 *ecx = env->cpuid_vendor3;
4257 break;
4258 case 1:
4259 *eax = env->cpuid_version;
4260 *ebx = (cpu->apic_id << 24) |
4261 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4262 *ecx = env->features[FEAT_1_ECX];
4263 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4264 *ecx |= CPUID_EXT_OSXSAVE;
4265 }
4266 *edx = env->features[FEAT_1_EDX];
4267 if (cs->nr_cores * cs->nr_threads > 1) {
4268 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4269 *edx |= CPUID_HT;
4270 }
4271 break;
4272 case 2:
4273 /* cache info: needed for Pentium Pro compatibility */
4274 if (cpu->cache_info_passthrough) {
4275 host_cpuid(index, 0, eax, ebx, ecx, edx);
4276 break;
4277 }
4278 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4279 *ebx = 0;
4280 if (!cpu->enable_l3_cache) {
4281 *ecx = 0;
4282 } else {
4283 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4284 }
4285 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4286 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4287 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4288 break;
4289 case 4:
4290 /* cache info: needed for Core compatibility */
4291 if (cpu->cache_info_passthrough) {
4292 host_cpuid(index, count, eax, ebx, ecx, edx);
4293 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4294 *eax &= ~0xFC000000;
4295 if ((*eax & 31) && cs->nr_cores > 1) {
4296 *eax |= (cs->nr_cores - 1) << 26;
4297 }
4298 } else {
4299 *eax = 0;
4300 switch (count) {
4301 case 0: /* L1 dcache info */
4302 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4303 1, cs->nr_cores,
4304 eax, ebx, ecx, edx);
4305 break;
4306 case 1: /* L1 icache info */
4307 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4308 1, cs->nr_cores,
4309 eax, ebx, ecx, edx);
4310 break;
4311 case 2: /* L2 cache info */
4312 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4313 cs->nr_threads, cs->nr_cores,
4314 eax, ebx, ecx, edx);
4315 break;
4316 case 3: /* L3 cache info */
4317 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4318 if (cpu->enable_l3_cache) {
4319 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4320 (1 << pkg_offset), cs->nr_cores,
4321 eax, ebx, ecx, edx);
4322 break;
4323 }
4324 /* fall through */
4325 default: /* end of info */
4326 *eax = *ebx = *ecx = *edx = 0;
4327 break;
4328 }
4329 }
4330 break;
4331 case 5:
4332 /* MONITOR/MWAIT Leaf */
4333 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4334 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4335 *ecx = cpu->mwait.ecx; /* flags */
4336 *edx = cpu->mwait.edx; /* mwait substates */
4337 break;
4338 case 6:
4339 /* Thermal and Power Leaf */
4340 *eax = env->features[FEAT_6_EAX];
4341 *ebx = 0;
4342 *ecx = 0;
4343 *edx = 0;
4344 break;
4345 case 7:
4346 /* Structured Extended Feature Flags Enumeration Leaf */
4347 if (count == 0) {
4348 *eax = 0; /* Maximum ECX value for sub-leaves */
4349 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4350 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4351 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4352 *ecx |= CPUID_7_0_ECX_OSPKE;
4353 }
4354 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4355 } else {
4356 *eax = 0;
4357 *ebx = 0;
4358 *ecx = 0;
4359 *edx = 0;
4360 }
4361 break;
4362 case 9:
4363 /* Direct Cache Access Information Leaf */
4364 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4365 *ebx = 0;
4366 *ecx = 0;
4367 *edx = 0;
4368 break;
4369 case 0xA:
4370 /* Architectural Performance Monitoring Leaf */
4371 if (kvm_enabled() && cpu->enable_pmu) {
4372 KVMState *s = cs->kvm_state;
4373
4374 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4375 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4376 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4377 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4378 } else if (hvf_enabled() && cpu->enable_pmu) {
4379 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4380 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4381 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4382 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4383 } else {
4384 *eax = 0;
4385 *ebx = 0;
4386 *ecx = 0;
4387 *edx = 0;
4388 }
4389 break;
4390 case 0xB:
4391 /* Extended Topology Enumeration Leaf */
4392 if (!cpu->enable_cpuid_0xb) {
4393 *eax = *ebx = *ecx = *edx = 0;
4394 break;
4395 }
4396
4397 *ecx = count & 0xff;
4398 *edx = cpu->apic_id;
4399
4400 switch (count) {
4401 case 0:
4402 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4403 *ebx = cs->nr_threads;
4404 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4405 break;
4406 case 1:
4407 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4408 *ebx = cs->nr_cores * cs->nr_threads;
4409 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4410 break;
4411 default:
4412 *eax = 0;
4413 *ebx = 0;
4414 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4415 }
4416
4417 assert(!(*eax & ~0x1f));
4418 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4419 break;
4420 case 0xD: {
4421 /* Processor Extended State */
4422 *eax = 0;
4423 *ebx = 0;
4424 *ecx = 0;
4425 *edx = 0;
4426 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4427 break;
4428 }
4429
4430 if (count == 0) {
4431 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4432 *eax = env->features[FEAT_XSAVE_COMP_LO];
4433 *edx = env->features[FEAT_XSAVE_COMP_HI];
4434 *ebx = xsave_area_size(env->xcr0);
4435 } else if (count == 1) {
4436 *eax = env->features[FEAT_XSAVE];
4437 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4438 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4439 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4440 *eax = esa->size;
4441 *ebx = esa->offset;
4442 }
4443 }
4444 break;
4445 }
4446 case 0x14: {
4447 /* Intel Processor Trace Enumeration */
4448 *eax = 0;
4449 *ebx = 0;
4450 *ecx = 0;
4451 *edx = 0;
4452 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4453 !kvm_enabled()) {
4454 break;
4455 }
4456
4457 if (count == 0) {
4458 *eax = INTEL_PT_MAX_SUBLEAF;
4459 *ebx = INTEL_PT_MINIMAL_EBX;
4460 *ecx = INTEL_PT_MINIMAL_ECX;
4461 } else if (count == 1) {
4462 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4463 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4464 }
4465 break;
4466 }
4467 case 0x40000000:
4468 /*
4469 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4470 * set here, but we restrict to TCG none the less.
4471 */
4472 if (tcg_enabled() && cpu->expose_tcg) {
4473 memcpy(signature, "TCGTCGTCGTCG", 12);
4474 *eax = 0x40000001;
4475 *ebx = signature[0];
4476 *ecx = signature[1];
4477 *edx = signature[2];
4478 } else {
4479 *eax = 0;
4480 *ebx = 0;
4481 *ecx = 0;
4482 *edx = 0;
4483 }
4484 break;
4485 case 0x40000001:
4486 *eax = 0;
4487 *ebx = 0;
4488 *ecx = 0;
4489 *edx = 0;
4490 break;
4491 case 0x80000000:
4492 *eax = env->cpuid_xlevel;
4493 *ebx = env->cpuid_vendor1;
4494 *edx = env->cpuid_vendor2;
4495 *ecx = env->cpuid_vendor3;
4496 break;
4497 case 0x80000001:
4498 *eax = env->cpuid_version;
4499 *ebx = 0;
4500 *ecx = env->features[FEAT_8000_0001_ECX];
4501 *edx = env->features[FEAT_8000_0001_EDX];
4502
4503 /* The Linux kernel checks for the CMPLegacy bit and
4504 * discards multiple thread information if it is set.
4505 * So don't set it here for Intel to make Linux guests happy.
4506 */
4507 if (cs->nr_cores * cs->nr_threads > 1) {
4508 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4509 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4510 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4511 *ecx |= 1 << 1; /* CmpLegacy bit */
4512 }
4513 }
4514 break;
4515 case 0x80000002:
4516 case 0x80000003:
4517 case 0x80000004:
4518 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4519 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4520 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4521 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4522 break;
4523 case 0x80000005:
4524 /* cache info (L1 cache) */
4525 if (cpu->cache_info_passthrough) {
4526 host_cpuid(index, 0, eax, ebx, ecx, edx);
4527 break;
4528 }
4529 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4530 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4531 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4532 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4533 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4534 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4535 break;
4536 case 0x80000006:
4537 /* cache info (L2 cache) */
4538 if (cpu->cache_info_passthrough) {
4539 host_cpuid(index, 0, eax, ebx, ecx, edx);
4540 break;
4541 }
4542 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4543 (L2_DTLB_2M_ENTRIES << 16) | \
4544 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4545 (L2_ITLB_2M_ENTRIES);
4546 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4547 (L2_DTLB_4K_ENTRIES << 16) | \
4548 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4549 (L2_ITLB_4K_ENTRIES);
4550 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4551 cpu->enable_l3_cache ?
4552 env->cache_info_amd.l3_cache : NULL,
4553 ecx, edx);
4554 break;
4555 case 0x80000007:
4556 *eax = 0;
4557 *ebx = 0;
4558 *ecx = 0;
4559 *edx = env->features[FEAT_8000_0007_EDX];
4560 break;
4561 case 0x80000008:
4562 /* virtual & phys address size in low 2 bytes. */
4563 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4564 /* 64 bit processor */
4565 *eax = cpu->phys_bits; /* configurable physical bits */
4566 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4567 *eax |= 0x00003900; /* 57 bits virtual */
4568 } else {
4569 *eax |= 0x00003000; /* 48 bits virtual */
4570 }
4571 } else {
4572 *eax = cpu->phys_bits;
4573 }
4574 *ebx = env->features[FEAT_8000_0008_EBX];
4575 *ecx = 0;
4576 *edx = 0;
4577 if (cs->nr_cores * cs->nr_threads > 1) {
4578 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4579 }
4580 break;
4581 case 0x8000000A:
4582 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4583 *eax = 0x00000001; /* SVM Revision */
4584 *ebx = 0x00000010; /* nr of ASIDs */
4585 *ecx = 0;
4586 *edx = env->features[FEAT_SVM]; /* optional features */
4587 } else {
4588 *eax = 0;
4589 *ebx = 0;
4590 *ecx = 0;
4591 *edx = 0;
4592 }
4593 break;
4594 case 0x8000001D:
4595 *eax = 0;
4596 if (cpu->cache_info_passthrough) {
4597 host_cpuid(index, count, eax, ebx, ecx, edx);
4598 break;
4599 }
4600 switch (count) {
4601 case 0: /* L1 dcache info */
4602 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4603 eax, ebx, ecx, edx);
4604 break;
4605 case 1: /* L1 icache info */
4606 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4607 eax, ebx, ecx, edx);
4608 break;
4609 case 2: /* L2 cache info */
4610 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4611 eax, ebx, ecx, edx);
4612 break;
4613 case 3: /* L3 cache info */
4614 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4615 eax, ebx, ecx, edx);
4616 break;
4617 default: /* end of info */
4618 *eax = *ebx = *ecx = *edx = 0;
4619 break;
4620 }
4621 break;
4622 case 0x8000001E:
4623 assert(cpu->core_id <= 255);
4624 encode_topo_cpuid8000001e(cs, cpu,
4625 eax, ebx, ecx, edx);
4626 break;
4627 case 0xC0000000:
4628 *eax = env->cpuid_xlevel2;
4629 *ebx = 0;
4630 *ecx = 0;
4631 *edx = 0;
4632 break;
4633 case 0xC0000001:
4634 /* Support for VIA CPU's CPUID instruction */
4635 *eax = env->cpuid_version;
4636 *ebx = 0;
4637 *ecx = 0;
4638 *edx = env->features[FEAT_C000_0001_EDX];
4639 break;
4640 case 0xC0000002:
4641 case 0xC0000003:
4642 case 0xC0000004:
4643 /* Reserved for the future, and now filled with zero */
4644 *eax = 0;
4645 *ebx = 0;
4646 *ecx = 0;
4647 *edx = 0;
4648 break;
4649 case 0x8000001F:
4650 *eax = sev_enabled() ? 0x2 : 0;
4651 *ebx = sev_get_cbit_position();
4652 *ebx |= sev_get_reduced_phys_bits() << 6;
4653 *ecx = 0;
4654 *edx = 0;
4655 break;
4656 default:
4657 /* reserved values: zero */
4658 *eax = 0;
4659 *ebx = 0;
4660 *ecx = 0;
4661 *edx = 0;
4662 break;
4663 }
4664 }
4665
4666 /* CPUClass::reset() */
4667 static void x86_cpu_reset(CPUState *s)
4668 {
4669 X86CPU *cpu = X86_CPU(s);
4670 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4671 CPUX86State *env = &cpu->env;
4672 target_ulong cr4;
4673 uint64_t xcr0;
4674 int i;
4675
4676 xcc->parent_reset(s);
4677
4678 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4679
4680 env->old_exception = -1;
4681
4682 /* init to reset state */
4683
4684 env->hflags2 |= HF2_GIF_MASK;
4685
4686 cpu_x86_update_cr0(env, 0x60000010);
4687 env->a20_mask = ~0x0;
4688 env->smbase = 0x30000;
4689 env->msr_smi_count = 0;
4690
4691 env->idt.limit = 0xffff;
4692 env->gdt.limit = 0xffff;
4693 env->ldt.limit = 0xffff;
4694 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4695 env->tr.limit = 0xffff;
4696 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4697
4698 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4699 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4700 DESC_R_MASK | DESC_A_MASK);
4701 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4702 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4703 DESC_A_MASK);
4704 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4705 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4706 DESC_A_MASK);
4707 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4708 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4709 DESC_A_MASK);
4710 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4711 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4712 DESC_A_MASK);
4713 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4714 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4715 DESC_A_MASK);
4716
4717 env->eip = 0xfff0;
4718 env->regs[R_EDX] = env->cpuid_version;
4719
4720 env->eflags = 0x2;
4721
4722 /* FPU init */
4723 for (i = 0; i < 8; i++) {
4724 env->fptags[i] = 1;
4725 }
4726 cpu_set_fpuc(env, 0x37f);
4727
4728 env->mxcsr = 0x1f80;
4729 /* All units are in INIT state. */
4730 env->xstate_bv = 0;
4731
4732 env->pat = 0x0007040600070406ULL;
4733 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4734 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
4735 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
4736 }
4737
4738 memset(env->dr, 0, sizeof(env->dr));
4739 env->dr[6] = DR6_FIXED_1;
4740 env->dr[7] = DR7_FIXED_1;
4741 cpu_breakpoint_remove_all(s, BP_CPU);
4742 cpu_watchpoint_remove_all(s, BP_CPU);
4743
4744 cr4 = 0;
4745 xcr0 = XSTATE_FP_MASK;
4746
4747 #ifdef CONFIG_USER_ONLY
4748 /* Enable all the features for user-mode. */
4749 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4750 xcr0 |= XSTATE_SSE_MASK;
4751 }
4752 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4753 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4754 if (env->features[esa->feature] & esa->bits) {
4755 xcr0 |= 1ull << i;
4756 }
4757 }
4758
4759 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4760 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4761 }
4762 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4763 cr4 |= CR4_FSGSBASE_MASK;
4764 }
4765 #endif
4766
4767 env->xcr0 = xcr0;
4768 cpu_x86_update_cr4(env, cr4);
4769
4770 /*
4771 * SDM 11.11.5 requires:
4772 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4773 * - IA32_MTRR_PHYSMASKn.V = 0
4774 * All other bits are undefined. For simplification, zero it all.
4775 */
4776 env->mtrr_deftype = 0;
4777 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4778 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4779
4780 env->interrupt_injected = -1;
4781 env->exception_injected = -1;
4782 env->nmi_injected = false;
4783 #if !defined(CONFIG_USER_ONLY)
4784 /* We hard-wire the BSP to the first CPU. */
4785 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4786
4787 s->halted = !cpu_is_bsp(cpu);
4788
4789 if (kvm_enabled()) {
4790 kvm_arch_reset_vcpu(cpu);
4791 }
4792 else if (hvf_enabled()) {
4793 hvf_reset_vcpu(s);
4794 }
4795 #endif
4796 }
4797
4798 #ifndef CONFIG_USER_ONLY
4799 bool cpu_is_bsp(X86CPU *cpu)
4800 {
4801 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4802 }
4803
4804 /* TODO: remove me, when reset over QOM tree is implemented */
4805 static void x86_cpu_machine_reset_cb(void *opaque)
4806 {
4807 X86CPU *cpu = opaque;
4808 cpu_reset(CPU(cpu));
4809 }
4810 #endif
4811
4812 static void mce_init(X86CPU *cpu)
4813 {
4814 CPUX86State *cenv = &cpu->env;
4815 unsigned int bank;
4816
4817 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4818 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4819 (CPUID_MCE | CPUID_MCA)) {
4820 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4821 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4822 cenv->mcg_ctl = ~(uint64_t)0;
4823 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4824 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4825 }
4826 }
4827 }
4828
4829 #ifndef CONFIG_USER_ONLY
4830 APICCommonClass *apic_get_class(void)
4831 {
4832 const char *apic_type = "apic";
4833
4834 /* TODO: in-kernel irqchip for hvf */
4835 if (kvm_apic_in_kernel()) {
4836 apic_type = "kvm-apic";
4837 } else if (xen_enabled()) {
4838 apic_type = "xen-apic";
4839 }
4840
4841 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4842 }
4843
4844 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4845 {
4846 APICCommonState *apic;
4847 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4848
4849 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4850
4851 object_property_add_child(OBJECT(cpu), "lapic",
4852 OBJECT(cpu->apic_state), &error_abort);
4853 object_unref(OBJECT(cpu->apic_state));
4854
4855 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4856 /* TODO: convert to link<> */
4857 apic = APIC_COMMON(cpu->apic_state);
4858 apic->cpu = cpu;
4859 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4860 }
4861
4862 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4863 {
4864 APICCommonState *apic;
4865 static bool apic_mmio_map_once;
4866
4867 if (cpu->apic_state == NULL) {
4868 return;
4869 }
4870 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4871 errp);
4872
4873 /* Map APIC MMIO area */
4874 apic = APIC_COMMON(cpu->apic_state);
4875 if (!apic_mmio_map_once) {
4876 memory_region_add_subregion_overlap(get_system_memory(),
4877 apic->apicbase &
4878 MSR_IA32_APICBASE_BASE,
4879 &apic->io_memory,
4880 0x1000);
4881 apic_mmio_map_once = true;
4882 }
4883 }
4884
4885 static void x86_cpu_machine_done(Notifier *n, void *unused)
4886 {
4887 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4888 MemoryRegion *smram =
4889 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4890
4891 if (smram) {
4892 cpu->smram = g_new(MemoryRegion, 1);
4893 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4894 smram, 0, 1ull << 32);
4895 memory_region_set_enabled(cpu->smram, true);
4896 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4897 }
4898 }
4899 #else
4900 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4901 {
4902 }
4903 #endif
4904
4905 /* Note: Only safe for use on x86(-64) hosts */
4906 static uint32_t x86_host_phys_bits(void)
4907 {
4908 uint32_t eax;
4909 uint32_t host_phys_bits;
4910
4911 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4912 if (eax >= 0x80000008) {
4913 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4914 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4915 * at 23:16 that can specify a maximum physical address bits for
4916 * the guest that can override this value; but I've not seen
4917 * anything with that set.
4918 */
4919 host_phys_bits = eax & 0xff;
4920 } else {
4921 /* It's an odd 64 bit machine that doesn't have the leaf for
4922 * physical address bits; fall back to 36 that's most older
4923 * Intel.
4924 */
4925 host_phys_bits = 36;
4926 }
4927
4928 return host_phys_bits;
4929 }
4930
4931 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4932 {
4933 if (*min < value) {
4934 *min = value;
4935 }
4936 }
4937
4938 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4939 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4940 {
4941 CPUX86State *env = &cpu->env;
4942 FeatureWordInfo *fi = &feature_word_info[w];
4943 uint32_t eax = fi->cpuid.eax;
4944 uint32_t region = eax & 0xF0000000;
4945
4946 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
4947 if (!env->features[w]) {
4948 return;
4949 }
4950
4951 switch (region) {
4952 case 0x00000000:
4953 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4954 break;
4955 case 0x80000000:
4956 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4957 break;
4958 case 0xC0000000:
4959 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4960 break;
4961 }
4962 }
4963
4964 /* Calculate XSAVE components based on the configured CPU feature flags */
4965 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4966 {
4967 CPUX86State *env = &cpu->env;
4968 int i;
4969 uint64_t mask;
4970
4971 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4972 return;
4973 }
4974
4975 mask = 0;
4976 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4977 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4978 if (env->features[esa->feature] & esa->bits) {
4979 mask |= (1ULL << i);
4980 }
4981 }
4982
4983 env->features[FEAT_XSAVE_COMP_LO] = mask;
4984 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4985 }
4986
4987 /***** Steps involved on loading and filtering CPUID data
4988 *
4989 * When initializing and realizing a CPU object, the steps
4990 * involved in setting up CPUID data are:
4991 *
4992 * 1) Loading CPU model definition (X86CPUDefinition). This is
4993 * implemented by x86_cpu_load_def() and should be completely
4994 * transparent, as it is done automatically by instance_init.
4995 * No code should need to look at X86CPUDefinition structs
4996 * outside instance_init.
4997 *
4998 * 2) CPU expansion. This is done by realize before CPUID
4999 * filtering, and will make sure host/accelerator data is
5000 * loaded for CPU models that depend on host capabilities
5001 * (e.g. "host"). Done by x86_cpu_expand_features().
5002 *
5003 * 3) CPUID filtering. This initializes extra data related to
5004 * CPUID, and checks if the host supports all capabilities
5005 * required by the CPU. Runnability of a CPU model is
5006 * determined at this step. Done by x86_cpu_filter_features().
5007 *
5008 * Some operations don't require all steps to be performed.
5009 * More precisely:
5010 *
5011 * - CPU instance creation (instance_init) will run only CPU
5012 * model loading. CPU expansion can't run at instance_init-time
5013 * because host/accelerator data may be not available yet.
5014 * - CPU realization will perform both CPU model expansion and CPUID
5015 * filtering, and return an error in case one of them fails.
5016 * - query-cpu-definitions needs to run all 3 steps. It needs
5017 * to run CPUID filtering, as the 'unavailable-features'
5018 * field is set based on the filtering results.
5019 * - The query-cpu-model-expansion QMP command only needs to run
5020 * CPU model loading and CPU expansion. It should not filter
5021 * any CPUID data based on host capabilities.
5022 */
5023
5024 /* Expand CPU configuration data, based on configured features
5025 * and host/accelerator capabilities when appropriate.
5026 */
5027 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
5028 {
5029 CPUX86State *env = &cpu->env;
5030 FeatureWord w;
5031 GList *l;
5032 Error *local_err = NULL;
5033
5034 /*TODO: Now cpu->max_features doesn't overwrite features
5035 * set using QOM properties, and we can convert
5036 * plus_features & minus_features to global properties
5037 * inside x86_cpu_parse_featurestr() too.
5038 */
5039 if (cpu->max_features) {
5040 for (w = 0; w < FEATURE_WORDS; w++) {
5041 /* Override only features that weren't set explicitly
5042 * by the user.
5043 */
5044 env->features[w] |=
5045 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
5046 ~env->user_features[w] & \
5047 ~feature_word_info[w].no_autoenable_flags;
5048 }
5049 }
5050
5051 for (l = plus_features; l; l = l->next) {
5052 const char *prop = l->data;
5053 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
5054 if (local_err) {
5055 goto out;
5056 }
5057 }
5058
5059 for (l = minus_features; l; l = l->next) {
5060 const char *prop = l->data;
5061 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5062 if (local_err) {
5063 goto out;
5064 }
5065 }
5066
5067 if (!kvm_enabled() || !cpu->expose_kvm) {
5068 env->features[FEAT_KVM] = 0;
5069 }
5070
5071 x86_cpu_enable_xsave_components(cpu);
5072
5073 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5074 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5075 if (cpu->full_cpuid_auto_level) {
5076 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5077 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5078 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5079 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5080 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5081 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5082 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5083 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5084 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5085 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5086 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5087
5088 /* Intel Processor Trace requires CPUID[0x14] */
5089 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5090 kvm_enabled() && cpu->intel_pt_auto_level) {
5091 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5092 }
5093
5094 /* SVM requires CPUID[0x8000000A] */
5095 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5096 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5097 }
5098
5099 /* SEV requires CPUID[0x8000001F] */
5100 if (sev_enabled()) {
5101 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5102 }
5103 }
5104
5105 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5106 if (env->cpuid_level == UINT32_MAX) {
5107 env->cpuid_level = env->cpuid_min_level;
5108 }
5109 if (env->cpuid_xlevel == UINT32_MAX) {
5110 env->cpuid_xlevel = env->cpuid_min_xlevel;
5111 }
5112 if (env->cpuid_xlevel2 == UINT32_MAX) {
5113 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5114 }
5115
5116 out:
5117 if (local_err != NULL) {
5118 error_propagate(errp, local_err);
5119 }
5120 }
5121
5122 /*
5123 * Finishes initialization of CPUID data, filters CPU feature
5124 * words based on host availability of each feature.
5125 *
5126 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5127 */
5128 static int x86_cpu_filter_features(X86CPU *cpu)
5129 {
5130 CPUX86State *env = &cpu->env;
5131 FeatureWord w;
5132 int rv = 0;
5133
5134 for (w = 0; w < FEATURE_WORDS; w++) {
5135 uint32_t host_feat =
5136 x86_cpu_get_supported_feature_word(w, false);
5137 uint32_t requested_features = env->features[w];
5138 env->features[w] &= host_feat;
5139 cpu->filtered_features[w] = requested_features & ~env->features[w];
5140 if (cpu->filtered_features[w]) {
5141 rv = 1;
5142 }
5143 }
5144
5145 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5146 kvm_enabled()) {
5147 KVMState *s = CPU(cpu)->kvm_state;
5148 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5149 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5150 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5151 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5152 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5153
5154 if (!eax_0 ||
5155 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5156 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5157 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5158 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5159 INTEL_PT_ADDR_RANGES_NUM) ||
5160 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5161 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5162 (ecx_0 & INTEL_PT_IP_LIP)) {
5163 /*
5164 * Processor Trace capabilities aren't configurable, so if the
5165 * host can't emulate the capabilities we report on
5166 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5167 */
5168 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5169 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5170 rv = 1;
5171 }
5172 }
5173
5174 return rv;
5175 }
5176
5177 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
5178 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
5179 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
5180 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
5181 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
5182 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
5183 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5184 {
5185 CPUState *cs = CPU(dev);
5186 X86CPU *cpu = X86_CPU(dev);
5187 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5188 CPUX86State *env = &cpu->env;
5189 Error *local_err = NULL;
5190 static bool ht_warned;
5191
5192 if (xcc->host_cpuid_required) {
5193 if (!accel_uses_host_cpuid()) {
5194 char *name = x86_cpu_class_get_model_name(xcc);
5195 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5196 g_free(name);
5197 goto out;
5198 }
5199
5200 if (enable_cpu_pm) {
5201 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5202 &cpu->mwait.ecx, &cpu->mwait.edx);
5203 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5204 }
5205 }
5206
5207 /* mwait extended info: needed for Core compatibility */
5208 /* We always wake on interrupt even if host does not have the capability */
5209 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5210
5211 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5212 error_setg(errp, "apic-id property was not initialized properly");
5213 return;
5214 }
5215
5216 x86_cpu_expand_features(cpu, &local_err);
5217 if (local_err) {
5218 goto out;
5219 }
5220
5221 if (x86_cpu_filter_features(cpu) &&
5222 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5223 x86_cpu_report_filtered_features(cpu);
5224 if (cpu->enforce_cpuid) {
5225 error_setg(&local_err,
5226 accel_uses_host_cpuid() ?
5227 "Host doesn't support requested features" :
5228 "TCG doesn't support requested features");
5229 goto out;
5230 }
5231 }
5232
5233 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5234 * CPUID[1].EDX.
5235 */
5236 if (IS_AMD_CPU(env)) {
5237 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5238 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5239 & CPUID_EXT2_AMD_ALIASES);
5240 }
5241
5242 /* For 64bit systems think about the number of physical bits to present.
5243 * ideally this should be the same as the host; anything other than matching
5244 * the host can cause incorrect guest behaviour.
5245 * QEMU used to pick the magic value of 40 bits that corresponds to
5246 * consumer AMD devices but nothing else.
5247 */
5248 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5249 if (accel_uses_host_cpuid()) {
5250 uint32_t host_phys_bits = x86_host_phys_bits();
5251 static bool warned;
5252
5253 if (cpu->host_phys_bits) {
5254 /* The user asked for us to use the host physical bits */
5255 cpu->phys_bits = host_phys_bits;
5256 if (cpu->host_phys_bits_limit &&
5257 cpu->phys_bits > cpu->host_phys_bits_limit) {
5258 cpu->phys_bits = cpu->host_phys_bits_limit;
5259 }
5260 }
5261
5262 /* Print a warning if the user set it to a value that's not the
5263 * host value.
5264 */
5265 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5266 !warned) {
5267 warn_report("Host physical bits (%u)"
5268 " does not match phys-bits property (%u)",
5269 host_phys_bits, cpu->phys_bits);
5270 warned = true;
5271 }
5272
5273 if (cpu->phys_bits &&
5274 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5275 cpu->phys_bits < 32)) {
5276 error_setg(errp, "phys-bits should be between 32 and %u "
5277 " (but is %u)",
5278 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5279 return;
5280 }
5281 } else {
5282 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5283 error_setg(errp, "TCG only supports phys-bits=%u",
5284 TCG_PHYS_ADDR_BITS);
5285 return;
5286 }
5287 }
5288 /* 0 means it was not explicitly set by the user (or by machine
5289 * compat_props or by the host code above). In this case, the default
5290 * is the value used by TCG (40).
5291 */
5292 if (cpu->phys_bits == 0) {
5293 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5294 }
5295 } else {
5296 /* For 32 bit systems don't use the user set value, but keep
5297 * phys_bits consistent with what we tell the guest.
5298 */
5299 if (cpu->phys_bits != 0) {
5300 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5301 return;
5302 }
5303
5304 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5305 cpu->phys_bits = 36;
5306 } else {
5307 cpu->phys_bits = 32;
5308 }
5309 }
5310
5311 /* Cache information initialization */
5312 if (!cpu->legacy_cache) {
5313 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5314 char *name = x86_cpu_class_get_model_name(xcc);
5315 error_setg(errp,
5316 "CPU model '%s' doesn't support legacy-cache=off", name);
5317 g_free(name);
5318 return;
5319 }
5320 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5321 *xcc->cpu_def->cache_info;
5322 } else {
5323 /* Build legacy cache information */
5324 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5325 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5326 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5327 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5328
5329 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5330 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5331 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5332 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5333
5334 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5335 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5336 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5337 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5338 }
5339
5340
5341 cpu_exec_realizefn(cs, &local_err);
5342 if (local_err != NULL) {
5343 error_propagate(errp, local_err);
5344 return;
5345 }
5346
5347 #ifndef CONFIG_USER_ONLY
5348 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5349
5350 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
5351 x86_cpu_apic_create(cpu, &local_err);
5352 if (local_err != NULL) {
5353 goto out;
5354 }
5355 }
5356 #endif
5357
5358 mce_init(cpu);
5359
5360 #ifndef CONFIG_USER_ONLY
5361 if (tcg_enabled()) {
5362 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5363 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5364
5365 /* Outer container... */
5366 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5367 memory_region_set_enabled(cpu->cpu_as_root, true);
5368
5369 /* ... with two regions inside: normal system memory with low
5370 * priority, and...
5371 */
5372 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5373 get_system_memory(), 0, ~0ull);
5374 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5375 memory_region_set_enabled(cpu->cpu_as_mem, true);
5376
5377 cs->num_ases = 2;
5378 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5379 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5380
5381 /* ... SMRAM with higher priority, linked from /machine/smram. */
5382 cpu->machine_done.notify = x86_cpu_machine_done;
5383 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5384 }
5385 #endif
5386
5387 qemu_init_vcpu(cs);
5388
5389 /*
5390 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5391 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5392 * based on inputs (sockets,cores,threads), it is still better to give
5393 * users a warning.
5394 *
5395 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5396 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5397 */
5398 if (IS_AMD_CPU(env) &&
5399 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5400 cs->nr_threads > 1 && !ht_warned) {
5401 warn_report("This family of AMD CPU doesn't support "
5402 "hyperthreading(%d)",
5403 cs->nr_threads);
5404 error_printf("Please configure -smp options properly"
5405 " or try enabling topoext feature.\n");
5406 ht_warned = true;
5407 }
5408
5409 x86_cpu_apic_realize(cpu, &local_err);
5410 if (local_err != NULL) {
5411 goto out;
5412 }
5413 cpu_reset(cs);
5414
5415 xcc->parent_realize(dev, &local_err);
5416
5417 out:
5418 if (local_err != NULL) {
5419 error_propagate(errp, local_err);
5420 return;
5421 }
5422 }
5423
5424 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5425 {
5426 X86CPU *cpu = X86_CPU(dev);
5427 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5428 Error *local_err = NULL;
5429
5430 #ifndef CONFIG_USER_ONLY
5431 cpu_remove_sync(CPU(dev));
5432 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5433 #endif
5434
5435 if (cpu->apic_state) {
5436 object_unparent(OBJECT(cpu->apic_state));
5437 cpu->apic_state = NULL;
5438 }
5439
5440 xcc->parent_unrealize(dev, &local_err);
5441 if (local_err != NULL) {
5442 error_propagate(errp, local_err);
5443 return;
5444 }
5445 }
5446
5447 typedef struct BitProperty {
5448 FeatureWord w;
5449 uint32_t mask;
5450 } BitProperty;
5451
5452 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5453 void *opaque, Error **errp)
5454 {
5455 X86CPU *cpu = X86_CPU(obj);
5456 BitProperty *fp = opaque;
5457 uint32_t f = cpu->env.features[fp->w];
5458 bool value = (f & fp->mask) == fp->mask;
5459 visit_type_bool(v, name, &value, errp);
5460 }
5461
5462 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5463 void *opaque, Error **errp)
5464 {
5465 DeviceState *dev = DEVICE(obj);
5466 X86CPU *cpu = X86_CPU(obj);
5467 BitProperty *fp = opaque;
5468 Error *local_err = NULL;
5469 bool value;
5470
5471 if (dev->realized) {
5472 qdev_prop_set_after_realize(dev, name, errp);
5473 return;
5474 }
5475
5476 visit_type_bool(v, name, &value, &local_err);
5477 if (local_err) {
5478 error_propagate(errp, local_err);
5479 return;
5480 }
5481
5482 if (value) {
5483 cpu->env.features[fp->w] |= fp->mask;
5484 } else {
5485 cpu->env.features[fp->w] &= ~fp->mask;
5486 }
5487 cpu->env.user_features[fp->w] |= fp->mask;
5488 }
5489
5490 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5491 void *opaque)
5492 {
5493 BitProperty *prop = opaque;
5494 g_free(prop);
5495 }
5496
5497 /* Register a boolean property to get/set a single bit in a uint32_t field.
5498 *
5499 * The same property name can be registered multiple times to make it affect
5500 * multiple bits in the same FeatureWord. In that case, the getter will return
5501 * true only if all bits are set.
5502 */
5503 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5504 const char *prop_name,
5505 FeatureWord w,
5506 int bitnr)
5507 {
5508 BitProperty *fp;
5509 ObjectProperty *op;
5510 uint32_t mask = (1UL << bitnr);
5511
5512 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5513 if (op) {
5514 fp = op->opaque;
5515 assert(fp->w == w);
5516 fp->mask |= mask;
5517 } else {
5518 fp = g_new0(BitProperty, 1);
5519 fp->w = w;
5520 fp->mask = mask;
5521 object_property_add(OBJECT(cpu), prop_name, "bool",
5522 x86_cpu_get_bit_prop,
5523 x86_cpu_set_bit_prop,
5524 x86_cpu_release_bit_prop, fp, &error_abort);
5525 }
5526 }
5527
5528 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5529 FeatureWord w,
5530 int bitnr)
5531 {
5532 FeatureWordInfo *fi = &feature_word_info[w];
5533 const char *name = fi->feat_names[bitnr];
5534
5535 if (!name) {
5536 return;
5537 }
5538
5539 /* Property names should use "-" instead of "_".
5540 * Old names containing underscores are registered as aliases
5541 * using object_property_add_alias()
5542 */
5543 assert(!strchr(name, '_'));
5544 /* aliases don't use "|" delimiters anymore, they are registered
5545 * manually using object_property_add_alias() */
5546 assert(!strchr(name, '|'));
5547 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5548 }
5549
5550 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5551 {
5552 X86CPU *cpu = X86_CPU(cs);
5553 CPUX86State *env = &cpu->env;
5554 GuestPanicInformation *panic_info = NULL;
5555
5556 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5557 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5558
5559 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5560
5561 assert(HV_CRASH_PARAMS >= 5);
5562 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5563 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5564 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5565 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5566 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5567 }
5568
5569 return panic_info;
5570 }
5571 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5572 const char *name, void *opaque,
5573 Error **errp)
5574 {
5575 CPUState *cs = CPU(obj);
5576 GuestPanicInformation *panic_info;
5577
5578 if (!cs->crash_occurred) {
5579 error_setg(errp, "No crash occured");
5580 return;
5581 }
5582
5583 panic_info = x86_cpu_get_crash_info(cs);
5584 if (panic_info == NULL) {
5585 error_setg(errp, "No crash information");
5586 return;
5587 }
5588
5589 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5590 errp);
5591 qapi_free_GuestPanicInformation(panic_info);
5592 }
5593
5594 static void x86_cpu_initfn(Object *obj)
5595 {
5596 X86CPU *cpu = X86_CPU(obj);
5597 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5598 CPUX86State *env = &cpu->env;
5599 FeatureWord w;
5600
5601 cpu_set_cpustate_pointers(cpu);
5602
5603 object_property_add(obj, "family", "int",
5604 x86_cpuid_version_get_family,
5605 x86_cpuid_version_set_family, NULL, NULL, NULL);
5606 object_property_add(obj, "model", "int",
5607 x86_cpuid_version_get_model,
5608 x86_cpuid_version_set_model, NULL, NULL, NULL);
5609 object_property_add(obj, "stepping", "int",
5610 x86_cpuid_version_get_stepping,
5611 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5612 object_property_add_str(obj, "vendor",
5613 x86_cpuid_get_vendor,
5614 x86_cpuid_set_vendor, NULL);
5615 object_property_add_str(obj, "model-id",
5616 x86_cpuid_get_model_id,
5617 x86_cpuid_set_model_id, NULL);
5618 object_property_add(obj, "tsc-frequency", "int",
5619 x86_cpuid_get_tsc_freq,
5620 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5621 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5622 x86_cpu_get_feature_words,
5623 NULL, NULL, (void *)env->features, NULL);
5624 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5625 x86_cpu_get_feature_words,
5626 NULL, NULL, (void *)cpu->filtered_features, NULL);
5627
5628 object_property_add(obj, "crash-information", "GuestPanicInformation",
5629 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5630
5631 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5632
5633 for (w = 0; w < FEATURE_WORDS; w++) {
5634 int bitnr;
5635
5636 for (bitnr = 0; bitnr < 32; bitnr++) {
5637 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5638 }
5639 }
5640
5641 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5642 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5643 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5644 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5645 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5646 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5647 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5648
5649 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5650 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5651 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5652 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5653 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5654 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5655 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5656 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5657 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5658 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5659 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5660 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5661 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5662 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5663 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5664 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5665 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5666 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5667 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5668 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5669 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5670
5671 if (xcc->cpu_def) {
5672 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5673 }
5674 }
5675
5676 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5677 {
5678 X86CPU *cpu = X86_CPU(cs);
5679
5680 return cpu->apic_id;
5681 }
5682
5683 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5684 {
5685 X86CPU *cpu = X86_CPU(cs);
5686
5687 return cpu->env.cr[0] & CR0_PG_MASK;
5688 }
5689
5690 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5691 {
5692 X86CPU *cpu = X86_CPU(cs);
5693
5694 cpu->env.eip = value;
5695 }
5696
5697 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5698 {
5699 X86CPU *cpu = X86_CPU(cs);
5700
5701 cpu->env.eip = tb->pc - tb->cs_base;
5702 }
5703
5704 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5705 {
5706 X86CPU *cpu = X86_CPU(cs);
5707 CPUX86State *env = &cpu->env;
5708
5709 #if !defined(CONFIG_USER_ONLY)
5710 if (interrupt_request & CPU_INTERRUPT_POLL) {
5711 return CPU_INTERRUPT_POLL;
5712 }
5713 #endif
5714 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5715 return CPU_INTERRUPT_SIPI;
5716 }
5717
5718 if (env->hflags2 & HF2_GIF_MASK) {
5719 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5720 !(env->hflags & HF_SMM_MASK)) {
5721 return CPU_INTERRUPT_SMI;
5722 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5723 !(env->hflags2 & HF2_NMI_MASK)) {
5724 return CPU_INTERRUPT_NMI;
5725 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5726 return CPU_INTERRUPT_MCE;
5727 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5728 (((env->hflags2 & HF2_VINTR_MASK) &&
5729 (env->hflags2 & HF2_HIF_MASK)) ||
5730 (!(env->hflags2 & HF2_VINTR_MASK) &&
5731 (env->eflags & IF_MASK &&
5732 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5733 return CPU_INTERRUPT_HARD;
5734 #if !defined(CONFIG_USER_ONLY)
5735 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5736 (env->eflags & IF_MASK) &&
5737 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5738 return CPU_INTERRUPT_VIRQ;
5739 #endif
5740 }
5741 }
5742
5743 return 0;
5744 }
5745
5746 static bool x86_cpu_has_work(CPUState *cs)
5747 {
5748 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5749 }
5750
5751 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5752 {
5753 X86CPU *cpu = X86_CPU(cs);
5754 CPUX86State *env = &cpu->env;
5755
5756 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5757 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5758 : bfd_mach_i386_i8086);
5759 info->print_insn = print_insn_i386;
5760
5761 info->cap_arch = CS_ARCH_X86;
5762 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5763 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5764 : CS_MODE_16);
5765 info->cap_insn_unit = 1;
5766 info->cap_insn_split = 8;
5767 }
5768
5769 void x86_update_hflags(CPUX86State *env)
5770 {
5771 uint32_t hflags;
5772 #define HFLAG_COPY_MASK \
5773 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5774 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5775 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5776 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5777
5778 hflags = env->hflags & HFLAG_COPY_MASK;
5779 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5780 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5781 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5782 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5783 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5784
5785 if (env->cr[4] & CR4_OSFXSR_MASK) {
5786 hflags |= HF_OSFXSR_MASK;
5787 }
5788
5789 if (env->efer & MSR_EFER_LMA) {
5790 hflags |= HF_LMA_MASK;
5791 }
5792
5793 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5794 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5795 } else {
5796 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5797 (DESC_B_SHIFT - HF_CS32_SHIFT);
5798 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5799 (DESC_B_SHIFT - HF_SS32_SHIFT);
5800 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5801 !(hflags & HF_CS32_MASK)) {
5802 hflags |= HF_ADDSEG_MASK;
5803 } else {
5804 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5805 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5806 }
5807 }
5808 env->hflags = hflags;
5809 }
5810
5811 static Property x86_cpu_properties[] = {
5812 #ifdef CONFIG_USER_ONLY
5813 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5814 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5815 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5816 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5817 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5818 #else
5819 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5820 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5821 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5822 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5823 #endif
5824 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5825 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5826 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5827 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5828 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5829 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5830 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5831 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5832 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5833 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5834 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5835 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5836 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5837 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5838 DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
5839 DEFINE_PROP_BOOL("hv-evmcs", X86CPU, hyperv_evmcs, false),
5840 DEFINE_PROP_BOOL("hv-ipi", X86CPU, hyperv_ipi, false),
5841 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5842 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5843 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5844 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5845 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5846 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
5847 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5848 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5849 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5850 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5851 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5852 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5853 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5854 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5855 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5856 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5857 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5858 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5859 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5860 false),
5861 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5862 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5863 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5864 true),
5865 /*
5866 * lecacy_cache defaults to true unless the CPU model provides its
5867 * own cache information (see x86_cpu_load_def()).
5868 */
5869 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5870
5871 /*
5872 * From "Requirements for Implementing the Microsoft
5873 * Hypervisor Interface":
5874 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5875 *
5876 * "Starting with Windows Server 2012 and Windows 8, if
5877 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5878 * the hypervisor imposes no specific limit to the number of VPs.
5879 * In this case, Windows Server 2012 guest VMs may use more than
5880 * 64 VPs, up to the maximum supported number of processors applicable
5881 * to the specific Windows version being used."
5882 */
5883 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5884 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
5885 false),
5886 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
5887 true),
5888 DEFINE_PROP_END_OF_LIST()
5889 };
5890
5891 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5892 {
5893 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5894 CPUClass *cc = CPU_CLASS(oc);
5895 DeviceClass *dc = DEVICE_CLASS(oc);
5896
5897 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5898 &xcc->parent_realize);
5899 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5900 &xcc->parent_unrealize);
5901 dc->props = x86_cpu_properties;
5902
5903 xcc->parent_reset = cc->reset;
5904 cc->reset = x86_cpu_reset;
5905 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5906
5907 cc->class_by_name = x86_cpu_class_by_name;
5908 cc->parse_features = x86_cpu_parse_featurestr;
5909 cc->has_work = x86_cpu_has_work;
5910 #ifdef CONFIG_TCG
5911 cc->do_interrupt = x86_cpu_do_interrupt;
5912 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5913 #endif
5914 cc->dump_state = x86_cpu_dump_state;
5915 cc->get_crash_info = x86_cpu_get_crash_info;
5916 cc->set_pc = x86_cpu_set_pc;
5917 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5918 cc->gdb_read_register = x86_cpu_gdb_read_register;
5919 cc->gdb_write_register = x86_cpu_gdb_write_register;
5920 cc->get_arch_id = x86_cpu_get_arch_id;
5921 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5922 #ifndef CONFIG_USER_ONLY
5923 cc->asidx_from_attrs = x86_asidx_from_attrs;
5924 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5925 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5926 cc->write_elf64_note = x86_cpu_write_elf64_note;
5927 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5928 cc->write_elf32_note = x86_cpu_write_elf32_note;
5929 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5930 cc->vmsd = &vmstate_x86_cpu;
5931 #endif
5932 cc->gdb_arch_name = x86_gdb_arch_name;
5933 #ifdef TARGET_X86_64
5934 cc->gdb_core_xml_file = "i386-64bit.xml";
5935 cc->gdb_num_core_regs = 66;
5936 #else
5937 cc->gdb_core_xml_file = "i386-32bit.xml";
5938 cc->gdb_num_core_regs = 50;
5939 #endif
5940 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5941 cc->debug_excp_handler = breakpoint_handler;
5942 #endif
5943 cc->cpu_exec_enter = x86_cpu_exec_enter;
5944 cc->cpu_exec_exit = x86_cpu_exec_exit;
5945 #ifdef CONFIG_TCG
5946 cc->tcg_initialize = tcg_x86_init;
5947 cc->tlb_fill = x86_cpu_tlb_fill;
5948 #endif
5949 cc->disas_set_info = x86_disas_set_info;
5950
5951 dc->user_creatable = true;
5952 }
5953
5954 static const TypeInfo x86_cpu_type_info = {
5955 .name = TYPE_X86_CPU,
5956 .parent = TYPE_CPU,
5957 .instance_size = sizeof(X86CPU),
5958 .instance_init = x86_cpu_initfn,
5959 .abstract = true,
5960 .class_size = sizeof(X86CPUClass),
5961 .class_init = x86_cpu_common_class_init,
5962 };
5963
5964
5965 /* "base" CPU model, used by query-cpu-model-expansion */
5966 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5967 {
5968 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5969
5970 xcc->static_model = true;
5971 xcc->migration_safe = true;
5972 xcc->model_description = "base CPU model type with no features enabled";
5973 xcc->ordering = 8;
5974 }
5975
5976 static const TypeInfo x86_base_cpu_type_info = {
5977 .name = X86_CPU_TYPE_NAME("base"),
5978 .parent = TYPE_X86_CPU,
5979 .class_init = x86_cpu_base_class_init,
5980 };
5981
5982 static void x86_cpu_register_types(void)
5983 {
5984 int i;
5985
5986 type_register_static(&x86_cpu_type_info);
5987 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5988 x86_register_cpudef_type(&builtin_x86_defs[i]);
5989 }
5990 type_register_static(&max_x86_cpu_type_info);
5991 type_register_static(&x86_base_cpu_type_info);
5992 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5993 type_register_static(&host_x86_cpu_type_info);
5994 #endif
5995 }
5996
5997 type_init(x86_cpu_register_types)