]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Get model-id from CPU object on "-cpu help"
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/hvf.h"
30 #include "sysemu/cpus.h"
31 #include "kvm_i386.h"
32 #include "sev_i386.h"
33
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qemu/option.h"
37 #include "qemu/config-file.h"
38 #include "qapi/error.h"
39 #include "qapi/qapi-visit-machine.h"
40 #include "qapi/qapi-visit-run-state.h"
41 #include "qapi/qmp/qdict.h"
42 #include "qapi/qmp/qerror.h"
43 #include "qapi/visitor.h"
44 #include "qom/qom-qobject.h"
45 #include "sysemu/arch_init.h"
46 #include "qapi/qapi-commands-machine-target.h"
47
48 #include "standard-headers/asm-x86/kvm_para.h"
49
50 #include "sysemu/sysemu.h"
51 #include "sysemu/tcg.h"
52 #include "hw/qdev-properties.h"
53 #include "hw/i386/topology.h"
54 #ifndef CONFIG_USER_ONLY
55 #include "exec/address-spaces.h"
56 #include "hw/hw.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /*
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
348 */
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
357
358 /*
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
361 */
362 static int nodes_in_socket(int nr_cores)
363 {
364 int nodes;
365
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
367
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
370 }
371
372 /*
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
378 */
379 static int cores_in_core_complex(int nr_cores)
380 {
381 int nodes;
382
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
386 }
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
389
390 /*
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
393 */
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
395 }
396
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
401 {
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
405
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
408
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
415 }
416
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
425
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
428
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
432 }
433
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
438 /*
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
441 */
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
447 };
448
449 /*
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
456 */
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
459 {
460 int nodes, cores_in_ccx;
461
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
464
465 cores_in_ccx = cores_in_core_complex(nr_cores);
466
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
471 }
472
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
477 {
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
481
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
484 /*
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
498 */
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
504 }
505 /*
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
512 */
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
517 /*
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
529 */
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
534 }
535 *edx = 0;
536 }
537
538 /*
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
542 */
543
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
555 };
556
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
569 };
570
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
582 };
583
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
596 };
597
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
609 };
610
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
618 };
619
620
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
631 };
632
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
646 };
647
648 /* TLB definitions: */
649
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
654
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
659
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
664
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
669
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
672 /*
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
679 */
680 #define INTEL_PT_MINIMAL_EBX 0xf
681 /*
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
689 */
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
698
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
701 {
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
707 }
708 dst[CPUID_VENDOR_SZ] = '\0';
709 }
710
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
722
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
744
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
750
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_APM_FEATURES 0
774 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
775 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
776 /* missing:
777 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
778
779 typedef enum FeatureWordType {
780 CPUID_FEATURE_WORD,
781 MSR_FEATURE_WORD,
782 } FeatureWordType;
783
784 typedef struct FeatureWordInfo {
785 FeatureWordType type;
786 /* feature flags names are taken from "Intel Processor Identification and
787 * the CPUID Instruction" and AMD's "CPUID Specification".
788 * In cases of disagreement between feature naming conventions,
789 * aliases may be added.
790 */
791 const char *feat_names[32];
792 union {
793 /* If type==CPUID_FEATURE_WORD */
794 struct {
795 uint32_t eax; /* Input EAX for CPUID */
796 bool needs_ecx; /* CPUID instruction uses ECX as input */
797 uint32_t ecx; /* Input ECX value for CPUID */
798 int reg; /* output register (R_* constant) */
799 } cpuid;
800 /* If type==MSR_FEATURE_WORD */
801 struct {
802 uint32_t index;
803 struct { /*CPUID that enumerate this MSR*/
804 FeatureWord cpuid_class;
805 uint32_t cpuid_flag;
806 } cpuid_dep;
807 } msr;
808 };
809 uint32_t tcg_features; /* Feature flags supported by TCG */
810 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
811 uint32_t migratable_flags; /* Feature flags known to be migratable */
812 /* Features that shouldn't be auto-enabled by "-cpu host" */
813 uint32_t no_autoenable_flags;
814 } FeatureWordInfo;
815
816 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
817 [FEAT_1_EDX] = {
818 .type = CPUID_FEATURE_WORD,
819 .feat_names = {
820 "fpu", "vme", "de", "pse",
821 "tsc", "msr", "pae", "mce",
822 "cx8", "apic", NULL, "sep",
823 "mtrr", "pge", "mca", "cmov",
824 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
825 NULL, "ds" /* Intel dts */, "acpi", "mmx",
826 "fxsr", "sse", "sse2", "ss",
827 "ht" /* Intel htt */, "tm", "ia64", "pbe",
828 },
829 .cpuid = {.eax = 1, .reg = R_EDX, },
830 .tcg_features = TCG_FEATURES,
831 },
832 [FEAT_1_ECX] = {
833 .type = CPUID_FEATURE_WORD,
834 .feat_names = {
835 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
836 "ds-cpl", "vmx", "smx", "est",
837 "tm2", "ssse3", "cid", NULL,
838 "fma", "cx16", "xtpr", "pdcm",
839 NULL, "pcid", "dca", "sse4.1",
840 "sse4.2", "x2apic", "movbe", "popcnt",
841 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
842 "avx", "f16c", "rdrand", "hypervisor",
843 },
844 .cpuid = { .eax = 1, .reg = R_ECX, },
845 .tcg_features = TCG_EXT_FEATURES,
846 },
847 /* Feature names that are already defined on feature_name[] but
848 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
849 * names on feat_names below. They are copied automatically
850 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
851 */
852 [FEAT_8000_0001_EDX] = {
853 .type = CPUID_FEATURE_WORD,
854 .feat_names = {
855 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
856 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
857 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
858 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
859 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
860 "nx", NULL, "mmxext", NULL /* mmx */,
861 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
862 NULL, "lm", "3dnowext", "3dnow",
863 },
864 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
865 .tcg_features = TCG_EXT2_FEATURES,
866 },
867 [FEAT_8000_0001_ECX] = {
868 .type = CPUID_FEATURE_WORD,
869 .feat_names = {
870 "lahf-lm", "cmp-legacy", "svm", "extapic",
871 "cr8legacy", "abm", "sse4a", "misalignsse",
872 "3dnowprefetch", "osvw", "ibs", "xop",
873 "skinit", "wdt", NULL, "lwp",
874 "fma4", "tce", NULL, "nodeid-msr",
875 NULL, "tbm", "topoext", "perfctr-core",
876 "perfctr-nb", NULL, NULL, NULL,
877 NULL, NULL, NULL, NULL,
878 },
879 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
880 .tcg_features = TCG_EXT3_FEATURES,
881 /*
882 * TOPOEXT is always allowed but can't be enabled blindly by
883 * "-cpu host", as it requires consistent cache topology info
884 * to be provided so it doesn't confuse guests.
885 */
886 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
887 },
888 [FEAT_C000_0001_EDX] = {
889 .type = CPUID_FEATURE_WORD,
890 .feat_names = {
891 NULL, NULL, "xstore", "xstore-en",
892 NULL, NULL, "xcrypt", "xcrypt-en",
893 "ace2", "ace2-en", "phe", "phe-en",
894 "pmm", "pmm-en", NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL,
898 NULL, NULL, NULL, NULL,
899 },
900 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
901 .tcg_features = TCG_EXT4_FEATURES,
902 },
903 [FEAT_KVM] = {
904 .type = CPUID_FEATURE_WORD,
905 .feat_names = {
906 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
907 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
908 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
909 NULL, NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 "kvmclock-stable-bit", NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
914 },
915 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
916 .tcg_features = TCG_KVM_FEATURES,
917 },
918 [FEAT_KVM_HINTS] = {
919 .type = CPUID_FEATURE_WORD,
920 .feat_names = {
921 "kvm-hint-dedicated", NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 },
930 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
931 .tcg_features = TCG_KVM_FEATURES,
932 /*
933 * KVM hints aren't auto-enabled by -cpu host, they need to be
934 * explicitly enabled in the command-line.
935 */
936 .no_autoenable_flags = ~0U,
937 },
938 /*
939 * .feat_names are commented out for Hyper-V enlightenments because we
940 * don't want to have two different ways for enabling them on QEMU command
941 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
942 * enabling several feature bits simultaneously, exposing these bits
943 * individually may just confuse guests.
944 */
945 [FEAT_HYPERV_EAX] = {
946 .type = CPUID_FEATURE_WORD,
947 .feat_names = {
948 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
949 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
950 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
951 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
952 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
953 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
954 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
955 NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 NULL, NULL, NULL, NULL,
960 },
961 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
962 },
963 [FEAT_HYPERV_EBX] = {
964 .type = CPUID_FEATURE_WORD,
965 .feat_names = {
966 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
967 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
968 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
969 NULL /* hv_create_port */, NULL /* hv_connect_port */,
970 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
971 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
972 NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 NULL, NULL, NULL, NULL,
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
977 },
978 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
979 },
980 [FEAT_HYPERV_EDX] = {
981 .type = CPUID_FEATURE_WORD,
982 .feat_names = {
983 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
984 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
985 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
986 NULL, NULL,
987 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL, NULL,
991 NULL, NULL, NULL, NULL,
992 NULL, NULL, NULL, NULL,
993 },
994 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
995 },
996 [FEAT_HV_RECOMM_EAX] = {
997 .type = CPUID_FEATURE_WORD,
998 .feat_names = {
999 NULL /* hv_recommend_pv_as_switch */,
1000 NULL /* hv_recommend_pv_tlbflush_local */,
1001 NULL /* hv_recommend_pv_tlbflush_remote */,
1002 NULL /* hv_recommend_msr_apic_access */,
1003 NULL /* hv_recommend_msr_reset */,
1004 NULL /* hv_recommend_relaxed_timing */,
1005 NULL /* hv_recommend_dma_remapping */,
1006 NULL /* hv_recommend_int_remapping */,
1007 NULL /* hv_recommend_x2apic_msrs */,
1008 NULL /* hv_recommend_autoeoi_deprecation */,
1009 NULL /* hv_recommend_pv_ipi */,
1010 NULL /* hv_recommend_ex_hypercalls */,
1011 NULL /* hv_hypervisor_is_nested */,
1012 NULL /* hv_recommend_int_mbec */,
1013 NULL /* hv_recommend_evmcs */,
1014 NULL,
1015 NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1019 },
1020 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1021 },
1022 [FEAT_HV_NESTED_EAX] = {
1023 .type = CPUID_FEATURE_WORD,
1024 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1025 },
1026 [FEAT_SVM] = {
1027 .type = CPUID_FEATURE_WORD,
1028 .feat_names = {
1029 "npt", "lbrv", "svm-lock", "nrip-save",
1030 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1031 NULL, NULL, "pause-filter", NULL,
1032 "pfthreshold", NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 NULL, NULL, NULL, NULL,
1036 NULL, NULL, NULL, NULL,
1037 },
1038 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1039 .tcg_features = TCG_SVM_FEATURES,
1040 },
1041 [FEAT_7_0_EBX] = {
1042 .type = CPUID_FEATURE_WORD,
1043 .feat_names = {
1044 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1045 "hle", "avx2", NULL, "smep",
1046 "bmi2", "erms", "invpcid", "rtm",
1047 NULL, NULL, "mpx", NULL,
1048 "avx512f", "avx512dq", "rdseed", "adx",
1049 "smap", "avx512ifma", "pcommit", "clflushopt",
1050 "clwb", "intel-pt", "avx512pf", "avx512er",
1051 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1052 },
1053 .cpuid = {
1054 .eax = 7,
1055 .needs_ecx = true, .ecx = 0,
1056 .reg = R_EBX,
1057 },
1058 .tcg_features = TCG_7_0_EBX_FEATURES,
1059 },
1060 [FEAT_7_0_ECX] = {
1061 .type = CPUID_FEATURE_WORD,
1062 .feat_names = {
1063 NULL, "avx512vbmi", "umip", "pku",
1064 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1065 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1066 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1067 "la57", NULL, NULL, NULL,
1068 NULL, NULL, "rdpid", NULL,
1069 NULL, "cldemote", NULL, "movdiri",
1070 "movdir64b", NULL, NULL, NULL,
1071 },
1072 .cpuid = {
1073 .eax = 7,
1074 .needs_ecx = true, .ecx = 0,
1075 .reg = R_ECX,
1076 },
1077 .tcg_features = TCG_7_0_ECX_FEATURES,
1078 },
1079 [FEAT_7_0_EDX] = {
1080 .type = CPUID_FEATURE_WORD,
1081 .feat_names = {
1082 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1083 NULL, NULL, NULL, NULL,
1084 NULL, NULL, "md-clear", NULL,
1085 NULL, NULL, NULL, NULL,
1086 NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL,
1088 NULL, NULL, "spec-ctrl", "stibp",
1089 NULL, "arch-capabilities", "core-capability", "ssbd",
1090 },
1091 .cpuid = {
1092 .eax = 7,
1093 .needs_ecx = true, .ecx = 0,
1094 .reg = R_EDX,
1095 },
1096 .tcg_features = TCG_7_0_EDX_FEATURES,
1097 },
1098 [FEAT_8000_0007_EDX] = {
1099 .type = CPUID_FEATURE_WORD,
1100 .feat_names = {
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 "invtsc", NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL,
1108 NULL, NULL, NULL, NULL,
1109 },
1110 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1111 .tcg_features = TCG_APM_FEATURES,
1112 .unmigratable_flags = CPUID_APM_INVTSC,
1113 },
1114 [FEAT_8000_0008_EBX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 NULL, "wbnoinvd", NULL, NULL,
1120 "ibpb", NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1124 NULL, NULL, NULL, NULL,
1125 },
1126 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1127 .tcg_features = 0,
1128 .unmigratable_flags = 0,
1129 },
1130 [FEAT_XSAVE] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 NULL, NULL, NULL, NULL,
1140 NULL, NULL, NULL, NULL,
1141 },
1142 .cpuid = {
1143 .eax = 0xd,
1144 .needs_ecx = true, .ecx = 1,
1145 .reg = R_EAX,
1146 },
1147 .tcg_features = TCG_XSAVE_FEATURES,
1148 },
1149 [FEAT_6_EAX] = {
1150 .type = CPUID_FEATURE_WORD,
1151 .feat_names = {
1152 NULL, NULL, "arat", NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 NULL, NULL, NULL, NULL,
1158 NULL, NULL, NULL, NULL,
1159 NULL, NULL, NULL, NULL,
1160 },
1161 .cpuid = { .eax = 6, .reg = R_EAX, },
1162 .tcg_features = TCG_6_EAX_FEATURES,
1163 },
1164 [FEAT_XSAVE_COMP_LO] = {
1165 .type = CPUID_FEATURE_WORD,
1166 .cpuid = {
1167 .eax = 0xD,
1168 .needs_ecx = true, .ecx = 0,
1169 .reg = R_EAX,
1170 },
1171 .tcg_features = ~0U,
1172 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1173 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1174 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1175 XSTATE_PKRU_MASK,
1176 },
1177 [FEAT_XSAVE_COMP_HI] = {
1178 .type = CPUID_FEATURE_WORD,
1179 .cpuid = {
1180 .eax = 0xD,
1181 .needs_ecx = true, .ecx = 0,
1182 .reg = R_EDX,
1183 },
1184 .tcg_features = ~0U,
1185 },
1186 /*Below are MSR exposed features*/
1187 [FEAT_ARCH_CAPABILITIES] = {
1188 .type = MSR_FEATURE_WORD,
1189 .feat_names = {
1190 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1191 "ssb-no", "mds-no", NULL, NULL,
1192 NULL, NULL, NULL, NULL,
1193 NULL, NULL, NULL, NULL,
1194 NULL, NULL, NULL, NULL,
1195 NULL, NULL, NULL, NULL,
1196 NULL, NULL, NULL, NULL,
1197 NULL, NULL, NULL, NULL,
1198 },
1199 .msr = {
1200 .index = MSR_IA32_ARCH_CAPABILITIES,
1201 .cpuid_dep = {
1202 FEAT_7_0_EDX,
1203 CPUID_7_0_EDX_ARCH_CAPABILITIES
1204 }
1205 },
1206 },
1207 [FEAT_CORE_CAPABILITY] = {
1208 .type = MSR_FEATURE_WORD,
1209 .feat_names = {
1210 NULL, NULL, NULL, NULL,
1211 NULL, "split-lock-detect", NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1214 NULL, NULL, NULL, NULL,
1215 NULL, NULL, NULL, NULL,
1216 NULL, NULL, NULL, NULL,
1217 NULL, NULL, NULL, NULL,
1218 },
1219 .msr = {
1220 .index = MSR_IA32_CORE_CAPABILITY,
1221 .cpuid_dep = {
1222 FEAT_7_0_EDX,
1223 CPUID_7_0_EDX_CORE_CAPABILITY,
1224 },
1225 },
1226 },
1227 };
1228
1229 typedef struct X86RegisterInfo32 {
1230 /* Name of register */
1231 const char *name;
1232 /* QAPI enum value register */
1233 X86CPURegister32 qapi_enum;
1234 } X86RegisterInfo32;
1235
1236 #define REGISTER(reg) \
1237 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1238 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1239 REGISTER(EAX),
1240 REGISTER(ECX),
1241 REGISTER(EDX),
1242 REGISTER(EBX),
1243 REGISTER(ESP),
1244 REGISTER(EBP),
1245 REGISTER(ESI),
1246 REGISTER(EDI),
1247 };
1248 #undef REGISTER
1249
1250 typedef struct ExtSaveArea {
1251 uint32_t feature, bits;
1252 uint32_t offset, size;
1253 } ExtSaveArea;
1254
1255 static const ExtSaveArea x86_ext_save_areas[] = {
1256 [XSTATE_FP_BIT] = {
1257 /* x87 FP state component is always enabled if XSAVE is supported */
1258 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1259 /* x87 state is in the legacy region of the XSAVE area */
1260 .offset = 0,
1261 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1262 },
1263 [XSTATE_SSE_BIT] = {
1264 /* SSE state component is always enabled if XSAVE is supported */
1265 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1266 /* SSE state is in the legacy region of the XSAVE area */
1267 .offset = 0,
1268 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1269 },
1270 [XSTATE_YMM_BIT] =
1271 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1272 .offset = offsetof(X86XSaveArea, avx_state),
1273 .size = sizeof(XSaveAVX) },
1274 [XSTATE_BNDREGS_BIT] =
1275 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1276 .offset = offsetof(X86XSaveArea, bndreg_state),
1277 .size = sizeof(XSaveBNDREG) },
1278 [XSTATE_BNDCSR_BIT] =
1279 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1280 .offset = offsetof(X86XSaveArea, bndcsr_state),
1281 .size = sizeof(XSaveBNDCSR) },
1282 [XSTATE_OPMASK_BIT] =
1283 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1284 .offset = offsetof(X86XSaveArea, opmask_state),
1285 .size = sizeof(XSaveOpmask) },
1286 [XSTATE_ZMM_Hi256_BIT] =
1287 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1288 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1289 .size = sizeof(XSaveZMM_Hi256) },
1290 [XSTATE_Hi16_ZMM_BIT] =
1291 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1292 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1293 .size = sizeof(XSaveHi16_ZMM) },
1294 [XSTATE_PKRU_BIT] =
1295 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1296 .offset = offsetof(X86XSaveArea, pkru_state),
1297 .size = sizeof(XSavePKRU) },
1298 };
1299
1300 static uint32_t xsave_area_size(uint64_t mask)
1301 {
1302 int i;
1303 uint64_t ret = 0;
1304
1305 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1306 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1307 if ((mask >> i) & 1) {
1308 ret = MAX(ret, esa->offset + esa->size);
1309 }
1310 }
1311 return ret;
1312 }
1313
1314 static inline bool accel_uses_host_cpuid(void)
1315 {
1316 return kvm_enabled() || hvf_enabled();
1317 }
1318
1319 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1320 {
1321 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1322 cpu->env.features[FEAT_XSAVE_COMP_LO];
1323 }
1324
1325 const char *get_register_name_32(unsigned int reg)
1326 {
1327 if (reg >= CPU_NB_REGS32) {
1328 return NULL;
1329 }
1330 return x86_reg_info_32[reg].name;
1331 }
1332
1333 /*
1334 * Returns the set of feature flags that are supported and migratable by
1335 * QEMU, for a given FeatureWord.
1336 */
1337 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1338 {
1339 FeatureWordInfo *wi = &feature_word_info[w];
1340 uint32_t r = 0;
1341 int i;
1342
1343 for (i = 0; i < 32; i++) {
1344 uint32_t f = 1U << i;
1345
1346 /* If the feature name is known, it is implicitly considered migratable,
1347 * unless it is explicitly set in unmigratable_flags */
1348 if ((wi->migratable_flags & f) ||
1349 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1350 r |= f;
1351 }
1352 }
1353 return r;
1354 }
1355
1356 void host_cpuid(uint32_t function, uint32_t count,
1357 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1358 {
1359 uint32_t vec[4];
1360
1361 #ifdef __x86_64__
1362 asm volatile("cpuid"
1363 : "=a"(vec[0]), "=b"(vec[1]),
1364 "=c"(vec[2]), "=d"(vec[3])
1365 : "0"(function), "c"(count) : "cc");
1366 #elif defined(__i386__)
1367 asm volatile("pusha \n\t"
1368 "cpuid \n\t"
1369 "mov %%eax, 0(%2) \n\t"
1370 "mov %%ebx, 4(%2) \n\t"
1371 "mov %%ecx, 8(%2) \n\t"
1372 "mov %%edx, 12(%2) \n\t"
1373 "popa"
1374 : : "a"(function), "c"(count), "S"(vec)
1375 : "memory", "cc");
1376 #else
1377 abort();
1378 #endif
1379
1380 if (eax)
1381 *eax = vec[0];
1382 if (ebx)
1383 *ebx = vec[1];
1384 if (ecx)
1385 *ecx = vec[2];
1386 if (edx)
1387 *edx = vec[3];
1388 }
1389
1390 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1391 {
1392 uint32_t eax, ebx, ecx, edx;
1393
1394 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1395 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1396
1397 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1398 if (family) {
1399 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1400 }
1401 if (model) {
1402 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1403 }
1404 if (stepping) {
1405 *stepping = eax & 0x0F;
1406 }
1407 }
1408
1409 /* CPU class name definitions: */
1410
1411 /* Return type name for a given CPU model name
1412 * Caller is responsible for freeing the returned string.
1413 */
1414 static char *x86_cpu_type_name(const char *model_name)
1415 {
1416 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1417 }
1418
1419 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1420 {
1421 ObjectClass *oc;
1422 char *typename = x86_cpu_type_name(cpu_model);
1423 oc = object_class_by_name(typename);
1424 g_free(typename);
1425 return oc;
1426 }
1427
1428 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1429 {
1430 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1431 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1432 return g_strndup(class_name,
1433 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1434 }
1435
1436 struct X86CPUDefinition {
1437 const char *name;
1438 uint32_t level;
1439 uint32_t xlevel;
1440 /* vendor is zero-terminated, 12 character ASCII string */
1441 char vendor[CPUID_VENDOR_SZ + 1];
1442 int family;
1443 int model;
1444 int stepping;
1445 FeatureWordArray features;
1446 const char *model_id;
1447 CPUCaches *cache_info;
1448 };
1449
1450 static CPUCaches epyc_cache_info = {
1451 .l1d_cache = &(CPUCacheInfo) {
1452 .type = DATA_CACHE,
1453 .level = 1,
1454 .size = 32 * KiB,
1455 .line_size = 64,
1456 .associativity = 8,
1457 .partitions = 1,
1458 .sets = 64,
1459 .lines_per_tag = 1,
1460 .self_init = 1,
1461 .no_invd_sharing = true,
1462 },
1463 .l1i_cache = &(CPUCacheInfo) {
1464 .type = INSTRUCTION_CACHE,
1465 .level = 1,
1466 .size = 64 * KiB,
1467 .line_size = 64,
1468 .associativity = 4,
1469 .partitions = 1,
1470 .sets = 256,
1471 .lines_per_tag = 1,
1472 .self_init = 1,
1473 .no_invd_sharing = true,
1474 },
1475 .l2_cache = &(CPUCacheInfo) {
1476 .type = UNIFIED_CACHE,
1477 .level = 2,
1478 .size = 512 * KiB,
1479 .line_size = 64,
1480 .associativity = 8,
1481 .partitions = 1,
1482 .sets = 1024,
1483 .lines_per_tag = 1,
1484 },
1485 .l3_cache = &(CPUCacheInfo) {
1486 .type = UNIFIED_CACHE,
1487 .level = 3,
1488 .size = 8 * MiB,
1489 .line_size = 64,
1490 .associativity = 16,
1491 .partitions = 1,
1492 .sets = 8192,
1493 .lines_per_tag = 1,
1494 .self_init = true,
1495 .inclusive = true,
1496 .complex_indexing = true,
1497 },
1498 };
1499
1500 static X86CPUDefinition builtin_x86_defs[] = {
1501 {
1502 .name = "qemu64",
1503 .level = 0xd,
1504 .vendor = CPUID_VENDOR_AMD,
1505 .family = 6,
1506 .model = 6,
1507 .stepping = 3,
1508 .features[FEAT_1_EDX] =
1509 PPRO_FEATURES |
1510 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1511 CPUID_PSE36,
1512 .features[FEAT_1_ECX] =
1513 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1514 .features[FEAT_8000_0001_EDX] =
1515 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1516 .features[FEAT_8000_0001_ECX] =
1517 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1518 .xlevel = 0x8000000A,
1519 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1520 },
1521 {
1522 .name = "phenom",
1523 .level = 5,
1524 .vendor = CPUID_VENDOR_AMD,
1525 .family = 16,
1526 .model = 2,
1527 .stepping = 3,
1528 /* Missing: CPUID_HT */
1529 .features[FEAT_1_EDX] =
1530 PPRO_FEATURES |
1531 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1532 CPUID_PSE36 | CPUID_VME,
1533 .features[FEAT_1_ECX] =
1534 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1535 CPUID_EXT_POPCNT,
1536 .features[FEAT_8000_0001_EDX] =
1537 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1538 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1539 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1540 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1541 CPUID_EXT3_CR8LEG,
1542 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1543 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1544 .features[FEAT_8000_0001_ECX] =
1545 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1546 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1547 /* Missing: CPUID_SVM_LBRV */
1548 .features[FEAT_SVM] =
1549 CPUID_SVM_NPT,
1550 .xlevel = 0x8000001A,
1551 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1552 },
1553 {
1554 .name = "core2duo",
1555 .level = 10,
1556 .vendor = CPUID_VENDOR_INTEL,
1557 .family = 6,
1558 .model = 15,
1559 .stepping = 11,
1560 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1561 .features[FEAT_1_EDX] =
1562 PPRO_FEATURES |
1563 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1564 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1565 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1566 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1567 .features[FEAT_1_ECX] =
1568 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1569 CPUID_EXT_CX16,
1570 .features[FEAT_8000_0001_EDX] =
1571 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1572 .features[FEAT_8000_0001_ECX] =
1573 CPUID_EXT3_LAHF_LM,
1574 .xlevel = 0x80000008,
1575 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1576 },
1577 {
1578 .name = "kvm64",
1579 .level = 0xd,
1580 .vendor = CPUID_VENDOR_INTEL,
1581 .family = 15,
1582 .model = 6,
1583 .stepping = 1,
1584 /* Missing: CPUID_HT */
1585 .features[FEAT_1_EDX] =
1586 PPRO_FEATURES | CPUID_VME |
1587 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1588 CPUID_PSE36,
1589 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1590 .features[FEAT_1_ECX] =
1591 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1592 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1593 .features[FEAT_8000_0001_EDX] =
1594 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1595 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1596 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1597 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1598 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1599 .features[FEAT_8000_0001_ECX] =
1600 0,
1601 .xlevel = 0x80000008,
1602 .model_id = "Common KVM processor"
1603 },
1604 {
1605 .name = "qemu32",
1606 .level = 4,
1607 .vendor = CPUID_VENDOR_INTEL,
1608 .family = 6,
1609 .model = 6,
1610 .stepping = 3,
1611 .features[FEAT_1_EDX] =
1612 PPRO_FEATURES,
1613 .features[FEAT_1_ECX] =
1614 CPUID_EXT_SSE3,
1615 .xlevel = 0x80000004,
1616 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1617 },
1618 {
1619 .name = "kvm32",
1620 .level = 5,
1621 .vendor = CPUID_VENDOR_INTEL,
1622 .family = 15,
1623 .model = 6,
1624 .stepping = 1,
1625 .features[FEAT_1_EDX] =
1626 PPRO_FEATURES | CPUID_VME |
1627 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1628 .features[FEAT_1_ECX] =
1629 CPUID_EXT_SSE3,
1630 .features[FEAT_8000_0001_ECX] =
1631 0,
1632 .xlevel = 0x80000008,
1633 .model_id = "Common 32-bit KVM processor"
1634 },
1635 {
1636 .name = "coreduo",
1637 .level = 10,
1638 .vendor = CPUID_VENDOR_INTEL,
1639 .family = 6,
1640 .model = 14,
1641 .stepping = 8,
1642 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1643 .features[FEAT_1_EDX] =
1644 PPRO_FEATURES | CPUID_VME |
1645 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1646 CPUID_SS,
1647 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1648 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1649 .features[FEAT_1_ECX] =
1650 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1651 .features[FEAT_8000_0001_EDX] =
1652 CPUID_EXT2_NX,
1653 .xlevel = 0x80000008,
1654 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1655 },
1656 {
1657 .name = "486",
1658 .level = 1,
1659 .vendor = CPUID_VENDOR_INTEL,
1660 .family = 4,
1661 .model = 8,
1662 .stepping = 0,
1663 .features[FEAT_1_EDX] =
1664 I486_FEATURES,
1665 .xlevel = 0,
1666 .model_id = "",
1667 },
1668 {
1669 .name = "pentium",
1670 .level = 1,
1671 .vendor = CPUID_VENDOR_INTEL,
1672 .family = 5,
1673 .model = 4,
1674 .stepping = 3,
1675 .features[FEAT_1_EDX] =
1676 PENTIUM_FEATURES,
1677 .xlevel = 0,
1678 .model_id = "",
1679 },
1680 {
1681 .name = "pentium2",
1682 .level = 2,
1683 .vendor = CPUID_VENDOR_INTEL,
1684 .family = 6,
1685 .model = 5,
1686 .stepping = 2,
1687 .features[FEAT_1_EDX] =
1688 PENTIUM2_FEATURES,
1689 .xlevel = 0,
1690 .model_id = "",
1691 },
1692 {
1693 .name = "pentium3",
1694 .level = 3,
1695 .vendor = CPUID_VENDOR_INTEL,
1696 .family = 6,
1697 .model = 7,
1698 .stepping = 3,
1699 .features[FEAT_1_EDX] =
1700 PENTIUM3_FEATURES,
1701 .xlevel = 0,
1702 .model_id = "",
1703 },
1704 {
1705 .name = "athlon",
1706 .level = 2,
1707 .vendor = CPUID_VENDOR_AMD,
1708 .family = 6,
1709 .model = 2,
1710 .stepping = 3,
1711 .features[FEAT_1_EDX] =
1712 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1713 CPUID_MCA,
1714 .features[FEAT_8000_0001_EDX] =
1715 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1716 .xlevel = 0x80000008,
1717 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1718 },
1719 {
1720 .name = "n270",
1721 .level = 10,
1722 .vendor = CPUID_VENDOR_INTEL,
1723 .family = 6,
1724 .model = 28,
1725 .stepping = 2,
1726 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1727 .features[FEAT_1_EDX] =
1728 PPRO_FEATURES |
1729 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1730 CPUID_ACPI | CPUID_SS,
1731 /* Some CPUs got no CPUID_SEP */
1732 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1733 * CPUID_EXT_XTPR */
1734 .features[FEAT_1_ECX] =
1735 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1736 CPUID_EXT_MOVBE,
1737 .features[FEAT_8000_0001_EDX] =
1738 CPUID_EXT2_NX,
1739 .features[FEAT_8000_0001_ECX] =
1740 CPUID_EXT3_LAHF_LM,
1741 .xlevel = 0x80000008,
1742 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1743 },
1744 {
1745 .name = "Conroe",
1746 .level = 10,
1747 .vendor = CPUID_VENDOR_INTEL,
1748 .family = 6,
1749 .model = 15,
1750 .stepping = 3,
1751 .features[FEAT_1_EDX] =
1752 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1753 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1754 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1755 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1756 CPUID_DE | CPUID_FP87,
1757 .features[FEAT_1_ECX] =
1758 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1759 .features[FEAT_8000_0001_EDX] =
1760 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1761 .features[FEAT_8000_0001_ECX] =
1762 CPUID_EXT3_LAHF_LM,
1763 .xlevel = 0x80000008,
1764 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1765 },
1766 {
1767 .name = "Penryn",
1768 .level = 10,
1769 .vendor = CPUID_VENDOR_INTEL,
1770 .family = 6,
1771 .model = 23,
1772 .stepping = 3,
1773 .features[FEAT_1_EDX] =
1774 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1775 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1776 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1777 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1778 CPUID_DE | CPUID_FP87,
1779 .features[FEAT_1_ECX] =
1780 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1781 CPUID_EXT_SSE3,
1782 .features[FEAT_8000_0001_EDX] =
1783 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1784 .features[FEAT_8000_0001_ECX] =
1785 CPUID_EXT3_LAHF_LM,
1786 .xlevel = 0x80000008,
1787 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1788 },
1789 {
1790 .name = "Nehalem",
1791 .level = 11,
1792 .vendor = CPUID_VENDOR_INTEL,
1793 .family = 6,
1794 .model = 26,
1795 .stepping = 3,
1796 .features[FEAT_1_EDX] =
1797 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1798 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1799 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1800 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1801 CPUID_DE | CPUID_FP87,
1802 .features[FEAT_1_ECX] =
1803 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1804 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1805 .features[FEAT_8000_0001_EDX] =
1806 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1807 .features[FEAT_8000_0001_ECX] =
1808 CPUID_EXT3_LAHF_LM,
1809 .xlevel = 0x80000008,
1810 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1811 },
1812 {
1813 .name = "Nehalem-IBRS",
1814 .level = 11,
1815 .vendor = CPUID_VENDOR_INTEL,
1816 .family = 6,
1817 .model = 26,
1818 .stepping = 3,
1819 .features[FEAT_1_EDX] =
1820 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1821 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1822 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1823 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1824 CPUID_DE | CPUID_FP87,
1825 .features[FEAT_1_ECX] =
1826 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1827 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1828 .features[FEAT_7_0_EDX] =
1829 CPUID_7_0_EDX_SPEC_CTRL,
1830 .features[FEAT_8000_0001_EDX] =
1831 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1832 .features[FEAT_8000_0001_ECX] =
1833 CPUID_EXT3_LAHF_LM,
1834 .xlevel = 0x80000008,
1835 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1836 },
1837 {
1838 .name = "Westmere",
1839 .level = 11,
1840 .vendor = CPUID_VENDOR_INTEL,
1841 .family = 6,
1842 .model = 44,
1843 .stepping = 1,
1844 .features[FEAT_1_EDX] =
1845 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1846 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1847 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1848 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1849 CPUID_DE | CPUID_FP87,
1850 .features[FEAT_1_ECX] =
1851 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1852 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1853 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1854 .features[FEAT_8000_0001_EDX] =
1855 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1856 .features[FEAT_8000_0001_ECX] =
1857 CPUID_EXT3_LAHF_LM,
1858 .features[FEAT_6_EAX] =
1859 CPUID_6_EAX_ARAT,
1860 .xlevel = 0x80000008,
1861 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1862 },
1863 {
1864 .name = "Westmere-IBRS",
1865 .level = 11,
1866 .vendor = CPUID_VENDOR_INTEL,
1867 .family = 6,
1868 .model = 44,
1869 .stepping = 1,
1870 .features[FEAT_1_EDX] =
1871 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1872 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1873 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1874 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1875 CPUID_DE | CPUID_FP87,
1876 .features[FEAT_1_ECX] =
1877 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1878 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1879 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1880 .features[FEAT_8000_0001_EDX] =
1881 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1882 .features[FEAT_8000_0001_ECX] =
1883 CPUID_EXT3_LAHF_LM,
1884 .features[FEAT_7_0_EDX] =
1885 CPUID_7_0_EDX_SPEC_CTRL,
1886 .features[FEAT_6_EAX] =
1887 CPUID_6_EAX_ARAT,
1888 .xlevel = 0x80000008,
1889 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1890 },
1891 {
1892 .name = "SandyBridge",
1893 .level = 0xd,
1894 .vendor = CPUID_VENDOR_INTEL,
1895 .family = 6,
1896 .model = 42,
1897 .stepping = 1,
1898 .features[FEAT_1_EDX] =
1899 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1903 CPUID_DE | CPUID_FP87,
1904 .features[FEAT_1_ECX] =
1905 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1906 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1907 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1908 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1909 CPUID_EXT_SSE3,
1910 .features[FEAT_8000_0001_EDX] =
1911 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1912 CPUID_EXT2_SYSCALL,
1913 .features[FEAT_8000_0001_ECX] =
1914 CPUID_EXT3_LAHF_LM,
1915 .features[FEAT_XSAVE] =
1916 CPUID_XSAVE_XSAVEOPT,
1917 .features[FEAT_6_EAX] =
1918 CPUID_6_EAX_ARAT,
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1921 },
1922 {
1923 .name = "SandyBridge-IBRS",
1924 .level = 0xd,
1925 .vendor = CPUID_VENDOR_INTEL,
1926 .family = 6,
1927 .model = 42,
1928 .stepping = 1,
1929 .features[FEAT_1_EDX] =
1930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1934 CPUID_DE | CPUID_FP87,
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1937 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1938 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1939 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1940 CPUID_EXT_SSE3,
1941 .features[FEAT_8000_0001_EDX] =
1942 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1943 CPUID_EXT2_SYSCALL,
1944 .features[FEAT_8000_0001_ECX] =
1945 CPUID_EXT3_LAHF_LM,
1946 .features[FEAT_7_0_EDX] =
1947 CPUID_7_0_EDX_SPEC_CTRL,
1948 .features[FEAT_XSAVE] =
1949 CPUID_XSAVE_XSAVEOPT,
1950 .features[FEAT_6_EAX] =
1951 CPUID_6_EAX_ARAT,
1952 .xlevel = 0x80000008,
1953 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1954 },
1955 {
1956 .name = "IvyBridge",
1957 .level = 0xd,
1958 .vendor = CPUID_VENDOR_INTEL,
1959 .family = 6,
1960 .model = 58,
1961 .stepping = 9,
1962 .features[FEAT_1_EDX] =
1963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1967 CPUID_DE | CPUID_FP87,
1968 .features[FEAT_1_ECX] =
1969 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1970 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1971 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1972 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1973 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1974 .features[FEAT_7_0_EBX] =
1975 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1976 CPUID_7_0_EBX_ERMS,
1977 .features[FEAT_8000_0001_EDX] =
1978 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1979 CPUID_EXT2_SYSCALL,
1980 .features[FEAT_8000_0001_ECX] =
1981 CPUID_EXT3_LAHF_LM,
1982 .features[FEAT_XSAVE] =
1983 CPUID_XSAVE_XSAVEOPT,
1984 .features[FEAT_6_EAX] =
1985 CPUID_6_EAX_ARAT,
1986 .xlevel = 0x80000008,
1987 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1988 },
1989 {
1990 .name = "IvyBridge-IBRS",
1991 .level = 0xd,
1992 .vendor = CPUID_VENDOR_INTEL,
1993 .family = 6,
1994 .model = 58,
1995 .stepping = 9,
1996 .features[FEAT_1_EDX] =
1997 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1998 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1999 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2000 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2001 CPUID_DE | CPUID_FP87,
2002 .features[FEAT_1_ECX] =
2003 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2004 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2005 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2006 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2007 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2008 .features[FEAT_7_0_EBX] =
2009 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2010 CPUID_7_0_EBX_ERMS,
2011 .features[FEAT_8000_0001_EDX] =
2012 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2013 CPUID_EXT2_SYSCALL,
2014 .features[FEAT_8000_0001_ECX] =
2015 CPUID_EXT3_LAHF_LM,
2016 .features[FEAT_7_0_EDX] =
2017 CPUID_7_0_EDX_SPEC_CTRL,
2018 .features[FEAT_XSAVE] =
2019 CPUID_XSAVE_XSAVEOPT,
2020 .features[FEAT_6_EAX] =
2021 CPUID_6_EAX_ARAT,
2022 .xlevel = 0x80000008,
2023 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
2024 },
2025 {
2026 .name = "Haswell-noTSX",
2027 .level = 0xd,
2028 .vendor = CPUID_VENDOR_INTEL,
2029 .family = 6,
2030 .model = 60,
2031 .stepping = 1,
2032 .features[FEAT_1_EDX] =
2033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2037 CPUID_DE | CPUID_FP87,
2038 .features[FEAT_1_ECX] =
2039 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2040 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2041 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2042 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2043 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2044 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2045 .features[FEAT_8000_0001_EDX] =
2046 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2047 CPUID_EXT2_SYSCALL,
2048 .features[FEAT_8000_0001_ECX] =
2049 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2050 .features[FEAT_7_0_EBX] =
2051 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2052 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2053 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2054 .features[FEAT_XSAVE] =
2055 CPUID_XSAVE_XSAVEOPT,
2056 .features[FEAT_6_EAX] =
2057 CPUID_6_EAX_ARAT,
2058 .xlevel = 0x80000008,
2059 .model_id = "Intel Core Processor (Haswell, no TSX)",
2060 },
2061 {
2062 .name = "Haswell-noTSX-IBRS",
2063 .level = 0xd,
2064 .vendor = CPUID_VENDOR_INTEL,
2065 .family = 6,
2066 .model = 60,
2067 .stepping = 1,
2068 .features[FEAT_1_EDX] =
2069 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2070 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2071 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2072 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2073 CPUID_DE | CPUID_FP87,
2074 .features[FEAT_1_ECX] =
2075 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2076 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2077 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2078 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2079 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2080 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2081 .features[FEAT_8000_0001_EDX] =
2082 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2083 CPUID_EXT2_SYSCALL,
2084 .features[FEAT_8000_0001_ECX] =
2085 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2086 .features[FEAT_7_0_EDX] =
2087 CPUID_7_0_EDX_SPEC_CTRL,
2088 .features[FEAT_7_0_EBX] =
2089 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2090 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2091 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2092 .features[FEAT_XSAVE] =
2093 CPUID_XSAVE_XSAVEOPT,
2094 .features[FEAT_6_EAX] =
2095 CPUID_6_EAX_ARAT,
2096 .xlevel = 0x80000008,
2097 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2098 },
2099 {
2100 .name = "Haswell",
2101 .level = 0xd,
2102 .vendor = CPUID_VENDOR_INTEL,
2103 .family = 6,
2104 .model = 60,
2105 .stepping = 4,
2106 .features[FEAT_1_EDX] =
2107 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2108 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2109 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2110 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2111 CPUID_DE | CPUID_FP87,
2112 .features[FEAT_1_ECX] =
2113 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2114 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2115 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2116 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2117 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2118 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2119 .features[FEAT_8000_0001_EDX] =
2120 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2121 CPUID_EXT2_SYSCALL,
2122 .features[FEAT_8000_0001_ECX] =
2123 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2124 .features[FEAT_7_0_EBX] =
2125 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2126 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2127 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2128 CPUID_7_0_EBX_RTM,
2129 .features[FEAT_XSAVE] =
2130 CPUID_XSAVE_XSAVEOPT,
2131 .features[FEAT_6_EAX] =
2132 CPUID_6_EAX_ARAT,
2133 .xlevel = 0x80000008,
2134 .model_id = "Intel Core Processor (Haswell)",
2135 },
2136 {
2137 .name = "Haswell-IBRS",
2138 .level = 0xd,
2139 .vendor = CPUID_VENDOR_INTEL,
2140 .family = 6,
2141 .model = 60,
2142 .stepping = 4,
2143 .features[FEAT_1_EDX] =
2144 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2145 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2146 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2147 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2148 CPUID_DE | CPUID_FP87,
2149 .features[FEAT_1_ECX] =
2150 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2151 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2152 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2153 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2154 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2155 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2156 .features[FEAT_8000_0001_EDX] =
2157 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2158 CPUID_EXT2_SYSCALL,
2159 .features[FEAT_8000_0001_ECX] =
2160 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2161 .features[FEAT_7_0_EDX] =
2162 CPUID_7_0_EDX_SPEC_CTRL,
2163 .features[FEAT_7_0_EBX] =
2164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2165 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2167 CPUID_7_0_EBX_RTM,
2168 .features[FEAT_XSAVE] =
2169 CPUID_XSAVE_XSAVEOPT,
2170 .features[FEAT_6_EAX] =
2171 CPUID_6_EAX_ARAT,
2172 .xlevel = 0x80000008,
2173 .model_id = "Intel Core Processor (Haswell, IBRS)",
2174 },
2175 {
2176 .name = "Broadwell-noTSX",
2177 .level = 0xd,
2178 .vendor = CPUID_VENDOR_INTEL,
2179 .family = 6,
2180 .model = 61,
2181 .stepping = 2,
2182 .features[FEAT_1_EDX] =
2183 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2184 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2185 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2186 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2187 CPUID_DE | CPUID_FP87,
2188 .features[FEAT_1_ECX] =
2189 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2190 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2191 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2192 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2193 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2194 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2195 .features[FEAT_8000_0001_EDX] =
2196 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2197 CPUID_EXT2_SYSCALL,
2198 .features[FEAT_8000_0001_ECX] =
2199 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2200 .features[FEAT_7_0_EBX] =
2201 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2202 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2203 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2204 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2205 CPUID_7_0_EBX_SMAP,
2206 .features[FEAT_XSAVE] =
2207 CPUID_XSAVE_XSAVEOPT,
2208 .features[FEAT_6_EAX] =
2209 CPUID_6_EAX_ARAT,
2210 .xlevel = 0x80000008,
2211 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2212 },
2213 {
2214 .name = "Broadwell-noTSX-IBRS",
2215 .level = 0xd,
2216 .vendor = CPUID_VENDOR_INTEL,
2217 .family = 6,
2218 .model = 61,
2219 .stepping = 2,
2220 .features[FEAT_1_EDX] =
2221 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2222 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2223 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2224 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2225 CPUID_DE | CPUID_FP87,
2226 .features[FEAT_1_ECX] =
2227 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2228 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2229 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2230 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2231 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2232 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2233 .features[FEAT_8000_0001_EDX] =
2234 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2235 CPUID_EXT2_SYSCALL,
2236 .features[FEAT_8000_0001_ECX] =
2237 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2238 .features[FEAT_7_0_EDX] =
2239 CPUID_7_0_EDX_SPEC_CTRL,
2240 .features[FEAT_7_0_EBX] =
2241 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2242 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2243 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2244 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2245 CPUID_7_0_EBX_SMAP,
2246 .features[FEAT_XSAVE] =
2247 CPUID_XSAVE_XSAVEOPT,
2248 .features[FEAT_6_EAX] =
2249 CPUID_6_EAX_ARAT,
2250 .xlevel = 0x80000008,
2251 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2252 },
2253 {
2254 .name = "Broadwell",
2255 .level = 0xd,
2256 .vendor = CPUID_VENDOR_INTEL,
2257 .family = 6,
2258 .model = 61,
2259 .stepping = 2,
2260 .features[FEAT_1_EDX] =
2261 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2262 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2263 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2264 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2265 CPUID_DE | CPUID_FP87,
2266 .features[FEAT_1_ECX] =
2267 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2268 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2269 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2270 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2271 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2272 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2273 .features[FEAT_8000_0001_EDX] =
2274 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2275 CPUID_EXT2_SYSCALL,
2276 .features[FEAT_8000_0001_ECX] =
2277 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2278 .features[FEAT_7_0_EBX] =
2279 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2280 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2281 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2282 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2283 CPUID_7_0_EBX_SMAP,
2284 .features[FEAT_XSAVE] =
2285 CPUID_XSAVE_XSAVEOPT,
2286 .features[FEAT_6_EAX] =
2287 CPUID_6_EAX_ARAT,
2288 .xlevel = 0x80000008,
2289 .model_id = "Intel Core Processor (Broadwell)",
2290 },
2291 {
2292 .name = "Broadwell-IBRS",
2293 .level = 0xd,
2294 .vendor = CPUID_VENDOR_INTEL,
2295 .family = 6,
2296 .model = 61,
2297 .stepping = 2,
2298 .features[FEAT_1_EDX] =
2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2303 CPUID_DE | CPUID_FP87,
2304 .features[FEAT_1_ECX] =
2305 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2306 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2307 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2308 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2309 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2310 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2311 .features[FEAT_8000_0001_EDX] =
2312 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2313 CPUID_EXT2_SYSCALL,
2314 .features[FEAT_8000_0001_ECX] =
2315 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2316 .features[FEAT_7_0_EDX] =
2317 CPUID_7_0_EDX_SPEC_CTRL,
2318 .features[FEAT_7_0_EBX] =
2319 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2320 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2321 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2322 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2323 CPUID_7_0_EBX_SMAP,
2324 .features[FEAT_XSAVE] =
2325 CPUID_XSAVE_XSAVEOPT,
2326 .features[FEAT_6_EAX] =
2327 CPUID_6_EAX_ARAT,
2328 .xlevel = 0x80000008,
2329 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2330 },
2331 {
2332 .name = "Skylake-Client",
2333 .level = 0xd,
2334 .vendor = CPUID_VENDOR_INTEL,
2335 .family = 6,
2336 .model = 94,
2337 .stepping = 3,
2338 .features[FEAT_1_EDX] =
2339 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2340 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2341 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2342 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2343 CPUID_DE | CPUID_FP87,
2344 .features[FEAT_1_ECX] =
2345 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2346 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2347 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2348 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2349 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2350 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2351 .features[FEAT_8000_0001_EDX] =
2352 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2353 CPUID_EXT2_SYSCALL,
2354 .features[FEAT_8000_0001_ECX] =
2355 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2356 .features[FEAT_7_0_EBX] =
2357 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2358 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2359 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2360 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2361 CPUID_7_0_EBX_SMAP,
2362 /* Missing: XSAVES (not supported by some Linux versions,
2363 * including v4.1 to v4.12).
2364 * KVM doesn't yet expose any XSAVES state save component,
2365 * and the only one defined in Skylake (processor tracing)
2366 * probably will block migration anyway.
2367 */
2368 .features[FEAT_XSAVE] =
2369 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2370 CPUID_XSAVE_XGETBV1,
2371 .features[FEAT_6_EAX] =
2372 CPUID_6_EAX_ARAT,
2373 .xlevel = 0x80000008,
2374 .model_id = "Intel Core Processor (Skylake)",
2375 },
2376 {
2377 .name = "Skylake-Client-IBRS",
2378 .level = 0xd,
2379 .vendor = CPUID_VENDOR_INTEL,
2380 .family = 6,
2381 .model = 94,
2382 .stepping = 3,
2383 .features[FEAT_1_EDX] =
2384 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2385 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2386 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2387 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2388 CPUID_DE | CPUID_FP87,
2389 .features[FEAT_1_ECX] =
2390 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2391 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2392 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2393 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2394 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2395 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2396 .features[FEAT_8000_0001_EDX] =
2397 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2398 CPUID_EXT2_SYSCALL,
2399 .features[FEAT_8000_0001_ECX] =
2400 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2401 .features[FEAT_7_0_EDX] =
2402 CPUID_7_0_EDX_SPEC_CTRL,
2403 .features[FEAT_7_0_EBX] =
2404 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2405 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2406 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2407 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2408 CPUID_7_0_EBX_SMAP,
2409 /* Missing: XSAVES (not supported by some Linux versions,
2410 * including v4.1 to v4.12).
2411 * KVM doesn't yet expose any XSAVES state save component,
2412 * and the only one defined in Skylake (processor tracing)
2413 * probably will block migration anyway.
2414 */
2415 .features[FEAT_XSAVE] =
2416 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2417 CPUID_XSAVE_XGETBV1,
2418 .features[FEAT_6_EAX] =
2419 CPUID_6_EAX_ARAT,
2420 .xlevel = 0x80000008,
2421 .model_id = "Intel Core Processor (Skylake, IBRS)",
2422 },
2423 {
2424 .name = "Skylake-Server",
2425 .level = 0xd,
2426 .vendor = CPUID_VENDOR_INTEL,
2427 .family = 6,
2428 .model = 85,
2429 .stepping = 4,
2430 .features[FEAT_1_EDX] =
2431 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2432 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2433 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2434 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2435 CPUID_DE | CPUID_FP87,
2436 .features[FEAT_1_ECX] =
2437 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2438 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2439 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2440 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2441 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2442 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2443 .features[FEAT_8000_0001_EDX] =
2444 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2445 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2446 .features[FEAT_8000_0001_ECX] =
2447 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2448 .features[FEAT_7_0_EBX] =
2449 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2450 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2451 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2452 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2453 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2454 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2455 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2456 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2457 .features[FEAT_7_0_ECX] =
2458 CPUID_7_0_ECX_PKU,
2459 /* Missing: XSAVES (not supported by some Linux versions,
2460 * including v4.1 to v4.12).
2461 * KVM doesn't yet expose any XSAVES state save component,
2462 * and the only one defined in Skylake (processor tracing)
2463 * probably will block migration anyway.
2464 */
2465 .features[FEAT_XSAVE] =
2466 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2467 CPUID_XSAVE_XGETBV1,
2468 .features[FEAT_6_EAX] =
2469 CPUID_6_EAX_ARAT,
2470 .xlevel = 0x80000008,
2471 .model_id = "Intel Xeon Processor (Skylake)",
2472 },
2473 {
2474 .name = "Skylake-Server-IBRS",
2475 .level = 0xd,
2476 .vendor = CPUID_VENDOR_INTEL,
2477 .family = 6,
2478 .model = 85,
2479 .stepping = 4,
2480 .features[FEAT_1_EDX] =
2481 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2482 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2483 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2484 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2485 CPUID_DE | CPUID_FP87,
2486 .features[FEAT_1_ECX] =
2487 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2488 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2489 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2490 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2491 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2492 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2493 .features[FEAT_8000_0001_EDX] =
2494 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2495 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2496 .features[FEAT_8000_0001_ECX] =
2497 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2498 .features[FEAT_7_0_EDX] =
2499 CPUID_7_0_EDX_SPEC_CTRL,
2500 .features[FEAT_7_0_EBX] =
2501 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2502 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2503 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2504 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2505 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2506 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2507 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2508 CPUID_7_0_EBX_AVX512VL,
2509 .features[FEAT_7_0_ECX] =
2510 CPUID_7_0_ECX_PKU,
2511 /* Missing: XSAVES (not supported by some Linux versions,
2512 * including v4.1 to v4.12).
2513 * KVM doesn't yet expose any XSAVES state save component,
2514 * and the only one defined in Skylake (processor tracing)
2515 * probably will block migration anyway.
2516 */
2517 .features[FEAT_XSAVE] =
2518 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2519 CPUID_XSAVE_XGETBV1,
2520 .features[FEAT_6_EAX] =
2521 CPUID_6_EAX_ARAT,
2522 .xlevel = 0x80000008,
2523 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2524 },
2525 {
2526 .name = "Cascadelake-Server",
2527 .level = 0xd,
2528 .vendor = CPUID_VENDOR_INTEL,
2529 .family = 6,
2530 .model = 85,
2531 .stepping = 6,
2532 .features[FEAT_1_EDX] =
2533 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2534 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2535 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2536 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2537 CPUID_DE | CPUID_FP87,
2538 .features[FEAT_1_ECX] =
2539 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2540 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2541 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2542 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2543 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2544 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2545 .features[FEAT_8000_0001_EDX] =
2546 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2547 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2548 .features[FEAT_8000_0001_ECX] =
2549 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2550 .features[FEAT_7_0_EBX] =
2551 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2552 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2553 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2554 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2555 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2556 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2557 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2558 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2559 .features[FEAT_7_0_ECX] =
2560 CPUID_7_0_ECX_PKU |
2561 CPUID_7_0_ECX_AVX512VNNI,
2562 .features[FEAT_7_0_EDX] =
2563 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2564 /* Missing: XSAVES (not supported by some Linux versions,
2565 * including v4.1 to v4.12).
2566 * KVM doesn't yet expose any XSAVES state save component,
2567 * and the only one defined in Skylake (processor tracing)
2568 * probably will block migration anyway.
2569 */
2570 .features[FEAT_XSAVE] =
2571 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2572 CPUID_XSAVE_XGETBV1,
2573 .features[FEAT_6_EAX] =
2574 CPUID_6_EAX_ARAT,
2575 .xlevel = 0x80000008,
2576 .model_id = "Intel Xeon Processor (Cascadelake)",
2577 },
2578 {
2579 .name = "Icelake-Client",
2580 .level = 0xd,
2581 .vendor = CPUID_VENDOR_INTEL,
2582 .family = 6,
2583 .model = 126,
2584 .stepping = 0,
2585 .features[FEAT_1_EDX] =
2586 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2587 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2588 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2589 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2590 CPUID_DE | CPUID_FP87,
2591 .features[FEAT_1_ECX] =
2592 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2593 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2594 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2595 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2596 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2597 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2598 .features[FEAT_8000_0001_EDX] =
2599 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2600 CPUID_EXT2_SYSCALL,
2601 .features[FEAT_8000_0001_ECX] =
2602 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2603 .features[FEAT_8000_0008_EBX] =
2604 CPUID_8000_0008_EBX_WBNOINVD,
2605 .features[FEAT_7_0_EBX] =
2606 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2607 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2608 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2609 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2610 CPUID_7_0_EBX_SMAP,
2611 .features[FEAT_7_0_ECX] =
2612 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2613 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2614 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2615 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2616 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2617 .features[FEAT_7_0_EDX] =
2618 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2619 /* Missing: XSAVES (not supported by some Linux versions,
2620 * including v4.1 to v4.12).
2621 * KVM doesn't yet expose any XSAVES state save component,
2622 * and the only one defined in Skylake (processor tracing)
2623 * probably will block migration anyway.
2624 */
2625 .features[FEAT_XSAVE] =
2626 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2627 CPUID_XSAVE_XGETBV1,
2628 .features[FEAT_6_EAX] =
2629 CPUID_6_EAX_ARAT,
2630 .xlevel = 0x80000008,
2631 .model_id = "Intel Core Processor (Icelake)",
2632 },
2633 {
2634 .name = "Icelake-Server",
2635 .level = 0xd,
2636 .vendor = CPUID_VENDOR_INTEL,
2637 .family = 6,
2638 .model = 134,
2639 .stepping = 0,
2640 .features[FEAT_1_EDX] =
2641 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2642 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2643 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2644 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2645 CPUID_DE | CPUID_FP87,
2646 .features[FEAT_1_ECX] =
2647 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2648 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2649 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2650 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2651 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2652 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2653 .features[FEAT_8000_0001_EDX] =
2654 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2655 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2656 .features[FEAT_8000_0001_ECX] =
2657 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2658 .features[FEAT_8000_0008_EBX] =
2659 CPUID_8000_0008_EBX_WBNOINVD,
2660 .features[FEAT_7_0_EBX] =
2661 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2662 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2663 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2664 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2665 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2666 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2667 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2668 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2669 .features[FEAT_7_0_ECX] =
2670 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2671 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2672 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2673 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2674 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2675 .features[FEAT_7_0_EDX] =
2676 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2677 /* Missing: XSAVES (not supported by some Linux versions,
2678 * including v4.1 to v4.12).
2679 * KVM doesn't yet expose any XSAVES state save component,
2680 * and the only one defined in Skylake (processor tracing)
2681 * probably will block migration anyway.
2682 */
2683 .features[FEAT_XSAVE] =
2684 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2685 CPUID_XSAVE_XGETBV1,
2686 .features[FEAT_6_EAX] =
2687 CPUID_6_EAX_ARAT,
2688 .xlevel = 0x80000008,
2689 .model_id = "Intel Xeon Processor (Icelake)",
2690 },
2691 {
2692 .name = "SnowRidge-Server",
2693 .level = 27,
2694 .vendor = CPUID_VENDOR_INTEL,
2695 .family = 6,
2696 .model = 134,
2697 .stepping = 1,
2698 .features[FEAT_1_EDX] =
2699 /* missing: CPUID_PN CPUID_IA64 */
2700 /* missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
2701 CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE |
2702 CPUID_TSC | CPUID_MSR | CPUID_PAE | CPUID_MCE |
2703 CPUID_CX8 | CPUID_APIC | CPUID_SEP |
2704 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV |
2705 CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH |
2706 CPUID_MMX |
2707 CPUID_FXSR | CPUID_SSE | CPUID_SSE2,
2708 .features[FEAT_1_ECX] =
2709 CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_MONITOR |
2710 CPUID_EXT_VMX |
2711 CPUID_EXT_SSSE3 |
2712 CPUID_EXT_CX16 |
2713 CPUID_EXT_SSE41 |
2714 CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE |
2715 CPUID_EXT_POPCNT |
2716 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | CPUID_EXT_XSAVE |
2717 CPUID_EXT_RDRAND,
2718 .features[FEAT_8000_0001_EDX] =
2719 CPUID_EXT2_SYSCALL |
2720 CPUID_EXT2_NX |
2721 CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2722 CPUID_EXT2_LM,
2723 .features[FEAT_8000_0001_ECX] =
2724 CPUID_EXT3_LAHF_LM |
2725 CPUID_EXT3_3DNOWPREFETCH,
2726 .features[FEAT_7_0_EBX] =
2727 CPUID_7_0_EBX_FSGSBASE |
2728 CPUID_7_0_EBX_SMEP |
2729 CPUID_7_0_EBX_ERMS |
2730 CPUID_7_0_EBX_MPX | /* missing bits 13, 15 */
2731 CPUID_7_0_EBX_RDSEED |
2732 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2733 CPUID_7_0_EBX_CLWB |
2734 CPUID_7_0_EBX_SHA_NI,
2735 .features[FEAT_7_0_ECX] =
2736 CPUID_7_0_ECX_UMIP |
2737 /* missing bit 5 */
2738 CPUID_7_0_ECX_GFNI |
2739 CPUID_7_0_ECX_MOVDIRI | CPUID_7_0_ECX_CLDEMOTE |
2740 CPUID_7_0_ECX_MOVDIR64B,
2741 .features[FEAT_7_0_EDX] =
2742 CPUID_7_0_EDX_SPEC_CTRL |
2743 CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD |
2744 CPUID_7_0_EDX_CORE_CAPABILITY,
2745 .features[FEAT_CORE_CAPABILITY] =
2746 MSR_CORE_CAP_SPLIT_LOCK_DETECT,
2747 /*
2748 * Missing: XSAVES (not supported by some Linux versions,
2749 * including v4.1 to v4.12).
2750 * KVM doesn't yet expose any XSAVES state save component,
2751 * and the only one defined in Skylake (processor tracing)
2752 * probably will block migration anyway.
2753 */
2754 .features[FEAT_XSAVE] =
2755 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2756 CPUID_XSAVE_XGETBV1,
2757 .features[FEAT_6_EAX] =
2758 CPUID_6_EAX_ARAT,
2759 .xlevel = 0x80000008,
2760 .model_id = "Intel Atom Processor (SnowRidge)",
2761 },
2762 {
2763 .name = "KnightsMill",
2764 .level = 0xd,
2765 .vendor = CPUID_VENDOR_INTEL,
2766 .family = 6,
2767 .model = 133,
2768 .stepping = 0,
2769 .features[FEAT_1_EDX] =
2770 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2771 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2772 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2773 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2774 CPUID_PSE | CPUID_DE | CPUID_FP87,
2775 .features[FEAT_1_ECX] =
2776 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2777 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2778 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2779 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2780 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2781 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2782 .features[FEAT_8000_0001_EDX] =
2783 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2784 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2785 .features[FEAT_8000_0001_ECX] =
2786 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2787 .features[FEAT_7_0_EBX] =
2788 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2789 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2790 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2791 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2792 CPUID_7_0_EBX_AVX512ER,
2793 .features[FEAT_7_0_ECX] =
2794 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2795 .features[FEAT_7_0_EDX] =
2796 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2797 .features[FEAT_XSAVE] =
2798 CPUID_XSAVE_XSAVEOPT,
2799 .features[FEAT_6_EAX] =
2800 CPUID_6_EAX_ARAT,
2801 .xlevel = 0x80000008,
2802 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2803 },
2804 {
2805 .name = "Opteron_G1",
2806 .level = 5,
2807 .vendor = CPUID_VENDOR_AMD,
2808 .family = 15,
2809 .model = 6,
2810 .stepping = 1,
2811 .features[FEAT_1_EDX] =
2812 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2813 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2814 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2815 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2816 CPUID_DE | CPUID_FP87,
2817 .features[FEAT_1_ECX] =
2818 CPUID_EXT_SSE3,
2819 .features[FEAT_8000_0001_EDX] =
2820 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2821 .xlevel = 0x80000008,
2822 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2823 },
2824 {
2825 .name = "Opteron_G2",
2826 .level = 5,
2827 .vendor = CPUID_VENDOR_AMD,
2828 .family = 15,
2829 .model = 6,
2830 .stepping = 1,
2831 .features[FEAT_1_EDX] =
2832 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2833 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2834 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2835 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2836 CPUID_DE | CPUID_FP87,
2837 .features[FEAT_1_ECX] =
2838 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2839 .features[FEAT_8000_0001_EDX] =
2840 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2841 .features[FEAT_8000_0001_ECX] =
2842 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2843 .xlevel = 0x80000008,
2844 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2845 },
2846 {
2847 .name = "Opteron_G3",
2848 .level = 5,
2849 .vendor = CPUID_VENDOR_AMD,
2850 .family = 16,
2851 .model = 2,
2852 .stepping = 3,
2853 .features[FEAT_1_EDX] =
2854 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2855 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2856 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2857 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2858 CPUID_DE | CPUID_FP87,
2859 .features[FEAT_1_ECX] =
2860 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2861 CPUID_EXT_SSE3,
2862 .features[FEAT_8000_0001_EDX] =
2863 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2864 CPUID_EXT2_RDTSCP,
2865 .features[FEAT_8000_0001_ECX] =
2866 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2867 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2868 .xlevel = 0x80000008,
2869 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2870 },
2871 {
2872 .name = "Opteron_G4",
2873 .level = 0xd,
2874 .vendor = CPUID_VENDOR_AMD,
2875 .family = 21,
2876 .model = 1,
2877 .stepping = 2,
2878 .features[FEAT_1_EDX] =
2879 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2880 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2881 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2882 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2883 CPUID_DE | CPUID_FP87,
2884 .features[FEAT_1_ECX] =
2885 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2886 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2887 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2888 CPUID_EXT_SSE3,
2889 .features[FEAT_8000_0001_EDX] =
2890 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2891 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2892 .features[FEAT_8000_0001_ECX] =
2893 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2894 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2895 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2896 CPUID_EXT3_LAHF_LM,
2897 .features[FEAT_SVM] =
2898 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2899 /* no xsaveopt! */
2900 .xlevel = 0x8000001A,
2901 .model_id = "AMD Opteron 62xx class CPU",
2902 },
2903 {
2904 .name = "Opteron_G5",
2905 .level = 0xd,
2906 .vendor = CPUID_VENDOR_AMD,
2907 .family = 21,
2908 .model = 2,
2909 .stepping = 0,
2910 .features[FEAT_1_EDX] =
2911 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2912 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2913 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2914 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2915 CPUID_DE | CPUID_FP87,
2916 .features[FEAT_1_ECX] =
2917 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2918 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2919 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2920 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2921 .features[FEAT_8000_0001_EDX] =
2922 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2923 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2924 .features[FEAT_8000_0001_ECX] =
2925 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2926 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2927 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2928 CPUID_EXT3_LAHF_LM,
2929 .features[FEAT_SVM] =
2930 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2931 /* no xsaveopt! */
2932 .xlevel = 0x8000001A,
2933 .model_id = "AMD Opteron 63xx class CPU",
2934 },
2935 {
2936 .name = "EPYC",
2937 .level = 0xd,
2938 .vendor = CPUID_VENDOR_AMD,
2939 .family = 23,
2940 .model = 1,
2941 .stepping = 2,
2942 .features[FEAT_1_EDX] =
2943 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2944 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2945 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2946 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2947 CPUID_VME | CPUID_FP87,
2948 .features[FEAT_1_ECX] =
2949 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2950 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2951 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2952 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2953 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2954 .features[FEAT_8000_0001_EDX] =
2955 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2956 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2957 CPUID_EXT2_SYSCALL,
2958 .features[FEAT_8000_0001_ECX] =
2959 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2960 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2961 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2962 CPUID_EXT3_TOPOEXT,
2963 .features[FEAT_7_0_EBX] =
2964 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2965 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2966 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2967 CPUID_7_0_EBX_SHA_NI,
2968 /* Missing: XSAVES (not supported by some Linux versions,
2969 * including v4.1 to v4.12).
2970 * KVM doesn't yet expose any XSAVES state save component.
2971 */
2972 .features[FEAT_XSAVE] =
2973 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2974 CPUID_XSAVE_XGETBV1,
2975 .features[FEAT_6_EAX] =
2976 CPUID_6_EAX_ARAT,
2977 .features[FEAT_SVM] =
2978 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2979 .xlevel = 0x8000001E,
2980 .model_id = "AMD EPYC Processor",
2981 .cache_info = &epyc_cache_info,
2982 },
2983 {
2984 .name = "EPYC-IBPB",
2985 .level = 0xd,
2986 .vendor = CPUID_VENDOR_AMD,
2987 .family = 23,
2988 .model = 1,
2989 .stepping = 2,
2990 .features[FEAT_1_EDX] =
2991 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2992 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2993 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2994 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2995 CPUID_VME | CPUID_FP87,
2996 .features[FEAT_1_ECX] =
2997 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2998 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2999 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3000 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3001 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
3002 .features[FEAT_8000_0001_EDX] =
3003 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3004 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3005 CPUID_EXT2_SYSCALL,
3006 .features[FEAT_8000_0001_ECX] =
3007 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3008 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3009 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3010 CPUID_EXT3_TOPOEXT,
3011 .features[FEAT_8000_0008_EBX] =
3012 CPUID_8000_0008_EBX_IBPB,
3013 .features[FEAT_7_0_EBX] =
3014 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3015 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3016 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
3017 CPUID_7_0_EBX_SHA_NI,
3018 /* Missing: XSAVES (not supported by some Linux versions,
3019 * including v4.1 to v4.12).
3020 * KVM doesn't yet expose any XSAVES state save component.
3021 */
3022 .features[FEAT_XSAVE] =
3023 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3024 CPUID_XSAVE_XGETBV1,
3025 .features[FEAT_6_EAX] =
3026 CPUID_6_EAX_ARAT,
3027 .features[FEAT_SVM] =
3028 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3029 .xlevel = 0x8000001E,
3030 .model_id = "AMD EPYC Processor (with IBPB)",
3031 .cache_info = &epyc_cache_info,
3032 },
3033 {
3034 .name = "Dhyana",
3035 .level = 0xd,
3036 .vendor = CPUID_VENDOR_HYGON,
3037 .family = 24,
3038 .model = 0,
3039 .stepping = 1,
3040 .features[FEAT_1_EDX] =
3041 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
3042 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
3043 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
3044 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
3045 CPUID_VME | CPUID_FP87,
3046 .features[FEAT_1_ECX] =
3047 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
3048 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
3049 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
3050 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
3051 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
3052 .features[FEAT_8000_0001_EDX] =
3053 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
3054 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
3055 CPUID_EXT2_SYSCALL,
3056 .features[FEAT_8000_0001_ECX] =
3057 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
3058 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
3059 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
3060 CPUID_EXT3_TOPOEXT,
3061 .features[FEAT_8000_0008_EBX] =
3062 CPUID_8000_0008_EBX_IBPB,
3063 .features[FEAT_7_0_EBX] =
3064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
3065 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
3066 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
3067 /*
3068 * Missing: XSAVES (not supported by some Linux versions,
3069 * including v4.1 to v4.12).
3070 * KVM doesn't yet expose any XSAVES state save component.
3071 */
3072 .features[FEAT_XSAVE] =
3073 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3074 CPUID_XSAVE_XGETBV1,
3075 .features[FEAT_6_EAX] =
3076 CPUID_6_EAX_ARAT,
3077 .features[FEAT_SVM] =
3078 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3079 .xlevel = 0x8000001E,
3080 .model_id = "Hygon Dhyana Processor",
3081 .cache_info = &epyc_cache_info,
3082 },
3083 };
3084
3085 typedef struct PropValue {
3086 const char *prop, *value;
3087 } PropValue;
3088
3089 /* KVM-specific features that are automatically added/removed
3090 * from all CPU models when KVM is enabled.
3091 */
3092 static PropValue kvm_default_props[] = {
3093 { "kvmclock", "on" },
3094 { "kvm-nopiodelay", "on" },
3095 { "kvm-asyncpf", "on" },
3096 { "kvm-steal-time", "on" },
3097 { "kvm-pv-eoi", "on" },
3098 { "kvmclock-stable-bit", "on" },
3099 { "x2apic", "on" },
3100 { "acpi", "off" },
3101 { "monitor", "off" },
3102 { "svm", "off" },
3103 { NULL, NULL },
3104 };
3105
3106 /* TCG-specific defaults that override all CPU models when using TCG
3107 */
3108 static PropValue tcg_default_props[] = {
3109 { "vme", "off" },
3110 { NULL, NULL },
3111 };
3112
3113
3114 void x86_cpu_change_kvm_default(const char *prop, const char *value)
3115 {
3116 PropValue *pv;
3117 for (pv = kvm_default_props; pv->prop; pv++) {
3118 if (!strcmp(pv->prop, prop)) {
3119 pv->value = value;
3120 break;
3121 }
3122 }
3123
3124 /* It is valid to call this function only for properties that
3125 * are already present in the kvm_default_props table.
3126 */
3127 assert(pv->prop);
3128 }
3129
3130 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3131 bool migratable_only);
3132
3133 static bool lmce_supported(void)
3134 {
3135 uint64_t mce_cap = 0;
3136
3137 #ifdef CONFIG_KVM
3138 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
3139 return false;
3140 }
3141 #endif
3142
3143 return !!(mce_cap & MCG_LMCE_P);
3144 }
3145
3146 #define CPUID_MODEL_ID_SZ 48
3147
3148 /**
3149 * cpu_x86_fill_model_id:
3150 * Get CPUID model ID string from host CPU.
3151 *
3152 * @str should have at least CPUID_MODEL_ID_SZ bytes
3153 *
3154 * The function does NOT add a null terminator to the string
3155 * automatically.
3156 */
3157 static int cpu_x86_fill_model_id(char *str)
3158 {
3159 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3160 int i;
3161
3162 for (i = 0; i < 3; i++) {
3163 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3164 memcpy(str + i * 16 + 0, &eax, 4);
3165 memcpy(str + i * 16 + 4, &ebx, 4);
3166 memcpy(str + i * 16 + 8, &ecx, 4);
3167 memcpy(str + i * 16 + 12, &edx, 4);
3168 }
3169 return 0;
3170 }
3171
3172 static Property max_x86_cpu_properties[] = {
3173 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3174 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3175 DEFINE_PROP_END_OF_LIST()
3176 };
3177
3178 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3179 {
3180 DeviceClass *dc = DEVICE_CLASS(oc);
3181 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3182
3183 xcc->ordering = 9;
3184
3185 xcc->model_description =
3186 "Enables all features supported by the accelerator in the current host";
3187
3188 dc->props = max_x86_cpu_properties;
3189 }
3190
3191 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3192
3193 static void max_x86_cpu_initfn(Object *obj)
3194 {
3195 X86CPU *cpu = X86_CPU(obj);
3196 CPUX86State *env = &cpu->env;
3197 KVMState *s = kvm_state;
3198
3199 /* We can't fill the features array here because we don't know yet if
3200 * "migratable" is true or false.
3201 */
3202 cpu->max_features = true;
3203
3204 if (accel_uses_host_cpuid()) {
3205 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3206 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3207 int family, model, stepping;
3208
3209 host_vendor_fms(vendor, &family, &model, &stepping);
3210 cpu_x86_fill_model_id(model_id);
3211
3212 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3213 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3214 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3215 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3216 &error_abort);
3217 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3218 &error_abort);
3219
3220 if (kvm_enabled()) {
3221 env->cpuid_min_level =
3222 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3223 env->cpuid_min_xlevel =
3224 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3225 env->cpuid_min_xlevel2 =
3226 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3227 } else {
3228 env->cpuid_min_level =
3229 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3230 env->cpuid_min_xlevel =
3231 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3232 env->cpuid_min_xlevel2 =
3233 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3234 }
3235
3236 if (lmce_supported()) {
3237 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3238 }
3239 } else {
3240 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3241 "vendor", &error_abort);
3242 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3243 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3244 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3245 object_property_set_str(OBJECT(cpu),
3246 "QEMU TCG CPU version " QEMU_HW_VERSION,
3247 "model-id", &error_abort);
3248 }
3249
3250 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3251 }
3252
3253 static const TypeInfo max_x86_cpu_type_info = {
3254 .name = X86_CPU_TYPE_NAME("max"),
3255 .parent = TYPE_X86_CPU,
3256 .instance_init = max_x86_cpu_initfn,
3257 .class_init = max_x86_cpu_class_init,
3258 };
3259
3260 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3261 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3262 {
3263 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3264
3265 xcc->host_cpuid_required = true;
3266 xcc->ordering = 8;
3267
3268 #if defined(CONFIG_KVM)
3269 xcc->model_description =
3270 "KVM processor with all supported host features ";
3271 #elif defined(CONFIG_HVF)
3272 xcc->model_description =
3273 "HVF processor with all supported host features ";
3274 #endif
3275 }
3276
3277 static const TypeInfo host_x86_cpu_type_info = {
3278 .name = X86_CPU_TYPE_NAME("host"),
3279 .parent = X86_CPU_TYPE_NAME("max"),
3280 .class_init = host_x86_cpu_class_init,
3281 };
3282
3283 #endif
3284
3285 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3286 {
3287 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3288
3289 switch (f->type) {
3290 case CPUID_FEATURE_WORD:
3291 {
3292 const char *reg = get_register_name_32(f->cpuid.reg);
3293 assert(reg);
3294 return g_strdup_printf("CPUID.%02XH:%s",
3295 f->cpuid.eax, reg);
3296 }
3297 case MSR_FEATURE_WORD:
3298 return g_strdup_printf("MSR(%02XH)",
3299 f->msr.index);
3300 }
3301
3302 return NULL;
3303 }
3304
3305 static void report_unavailable_features(FeatureWord w, uint32_t mask)
3306 {
3307 FeatureWordInfo *f = &feature_word_info[w];
3308 int i;
3309 char *feat_word_str;
3310
3311 for (i = 0; i < 32; ++i) {
3312 if ((1UL << i) & mask) {
3313 feat_word_str = feature_word_description(f, i);
3314 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3315 accel_uses_host_cpuid() ? "host" : "TCG",
3316 feat_word_str,
3317 f->feat_names[i] ? "." : "",
3318 f->feat_names[i] ? f->feat_names[i] : "", i);
3319 g_free(feat_word_str);
3320 }
3321 }
3322 }
3323
3324 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3325 const char *name, void *opaque,
3326 Error **errp)
3327 {
3328 X86CPU *cpu = X86_CPU(obj);
3329 CPUX86State *env = &cpu->env;
3330 int64_t value;
3331
3332 value = (env->cpuid_version >> 8) & 0xf;
3333 if (value == 0xf) {
3334 value += (env->cpuid_version >> 20) & 0xff;
3335 }
3336 visit_type_int(v, name, &value, errp);
3337 }
3338
3339 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3340 const char *name, void *opaque,
3341 Error **errp)
3342 {
3343 X86CPU *cpu = X86_CPU(obj);
3344 CPUX86State *env = &cpu->env;
3345 const int64_t min = 0;
3346 const int64_t max = 0xff + 0xf;
3347 Error *local_err = NULL;
3348 int64_t value;
3349
3350 visit_type_int(v, name, &value, &local_err);
3351 if (local_err) {
3352 error_propagate(errp, local_err);
3353 return;
3354 }
3355 if (value < min || value > max) {
3356 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3357 name ? name : "null", value, min, max);
3358 return;
3359 }
3360
3361 env->cpuid_version &= ~0xff00f00;
3362 if (value > 0x0f) {
3363 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3364 } else {
3365 env->cpuid_version |= value << 8;
3366 }
3367 }
3368
3369 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3370 const char *name, void *opaque,
3371 Error **errp)
3372 {
3373 X86CPU *cpu = X86_CPU(obj);
3374 CPUX86State *env = &cpu->env;
3375 int64_t value;
3376
3377 value = (env->cpuid_version >> 4) & 0xf;
3378 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3379 visit_type_int(v, name, &value, errp);
3380 }
3381
3382 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3383 const char *name, void *opaque,
3384 Error **errp)
3385 {
3386 X86CPU *cpu = X86_CPU(obj);
3387 CPUX86State *env = &cpu->env;
3388 const int64_t min = 0;
3389 const int64_t max = 0xff;
3390 Error *local_err = NULL;
3391 int64_t value;
3392
3393 visit_type_int(v, name, &value, &local_err);
3394 if (local_err) {
3395 error_propagate(errp, local_err);
3396 return;
3397 }
3398 if (value < min || value > max) {
3399 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3400 name ? name : "null", value, min, max);
3401 return;
3402 }
3403
3404 env->cpuid_version &= ~0xf00f0;
3405 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3406 }
3407
3408 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3409 const char *name, void *opaque,
3410 Error **errp)
3411 {
3412 X86CPU *cpu = X86_CPU(obj);
3413 CPUX86State *env = &cpu->env;
3414 int64_t value;
3415
3416 value = env->cpuid_version & 0xf;
3417 visit_type_int(v, name, &value, errp);
3418 }
3419
3420 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3421 const char *name, void *opaque,
3422 Error **errp)
3423 {
3424 X86CPU *cpu = X86_CPU(obj);
3425 CPUX86State *env = &cpu->env;
3426 const int64_t min = 0;
3427 const int64_t max = 0xf;
3428 Error *local_err = NULL;
3429 int64_t value;
3430
3431 visit_type_int(v, name, &value, &local_err);
3432 if (local_err) {
3433 error_propagate(errp, local_err);
3434 return;
3435 }
3436 if (value < min || value > max) {
3437 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3438 name ? name : "null", value, min, max);
3439 return;
3440 }
3441
3442 env->cpuid_version &= ~0xf;
3443 env->cpuid_version |= value & 0xf;
3444 }
3445
3446 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3447 {
3448 X86CPU *cpu = X86_CPU(obj);
3449 CPUX86State *env = &cpu->env;
3450 char *value;
3451
3452 value = g_malloc(CPUID_VENDOR_SZ + 1);
3453 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3454 env->cpuid_vendor3);
3455 return value;
3456 }
3457
3458 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3459 Error **errp)
3460 {
3461 X86CPU *cpu = X86_CPU(obj);
3462 CPUX86State *env = &cpu->env;
3463 int i;
3464
3465 if (strlen(value) != CPUID_VENDOR_SZ) {
3466 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3467 return;
3468 }
3469
3470 env->cpuid_vendor1 = 0;
3471 env->cpuid_vendor2 = 0;
3472 env->cpuid_vendor3 = 0;
3473 for (i = 0; i < 4; i++) {
3474 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3475 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3476 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3477 }
3478 }
3479
3480 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3481 {
3482 X86CPU *cpu = X86_CPU(obj);
3483 CPUX86State *env = &cpu->env;
3484 char *value;
3485 int i;
3486
3487 value = g_malloc(48 + 1);
3488 for (i = 0; i < 48; i++) {
3489 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3490 }
3491 value[48] = '\0';
3492 return value;
3493 }
3494
3495 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3496 Error **errp)
3497 {
3498 X86CPU *cpu = X86_CPU(obj);
3499 CPUX86State *env = &cpu->env;
3500 int c, len, i;
3501
3502 if (model_id == NULL) {
3503 model_id = "";
3504 }
3505 len = strlen(model_id);
3506 memset(env->cpuid_model, 0, 48);
3507 for (i = 0; i < 48; i++) {
3508 if (i >= len) {
3509 c = '\0';
3510 } else {
3511 c = (uint8_t)model_id[i];
3512 }
3513 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3514 }
3515 }
3516
3517 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3518 void *opaque, Error **errp)
3519 {
3520 X86CPU *cpu = X86_CPU(obj);
3521 int64_t value;
3522
3523 value = cpu->env.tsc_khz * 1000;
3524 visit_type_int(v, name, &value, errp);
3525 }
3526
3527 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3528 void *opaque, Error **errp)
3529 {
3530 X86CPU *cpu = X86_CPU(obj);
3531 const int64_t min = 0;
3532 const int64_t max = INT64_MAX;
3533 Error *local_err = NULL;
3534 int64_t value;
3535
3536 visit_type_int(v, name, &value, &local_err);
3537 if (local_err) {
3538 error_propagate(errp, local_err);
3539 return;
3540 }
3541 if (value < min || value > max) {
3542 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3543 name ? name : "null", value, min, max);
3544 return;
3545 }
3546
3547 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3548 }
3549
3550 /* Generic getter for "feature-words" and "filtered-features" properties */
3551 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3552 const char *name, void *opaque,
3553 Error **errp)
3554 {
3555 uint32_t *array = (uint32_t *)opaque;
3556 FeatureWord w;
3557 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3558 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3559 X86CPUFeatureWordInfoList *list = NULL;
3560
3561 for (w = 0; w < FEATURE_WORDS; w++) {
3562 FeatureWordInfo *wi = &feature_word_info[w];
3563 /*
3564 * We didn't have MSR features when "feature-words" was
3565 * introduced. Therefore skipped other type entries.
3566 */
3567 if (wi->type != CPUID_FEATURE_WORD) {
3568 continue;
3569 }
3570 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3571 qwi->cpuid_input_eax = wi->cpuid.eax;
3572 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3573 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3574 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3575 qwi->features = array[w];
3576
3577 /* List will be in reverse order, but order shouldn't matter */
3578 list_entries[w].next = list;
3579 list_entries[w].value = &word_infos[w];
3580 list = &list_entries[w];
3581 }
3582
3583 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3584 }
3585
3586 /* Convert all '_' in a feature string option name to '-', to make feature
3587 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3588 */
3589 static inline void feat2prop(char *s)
3590 {
3591 while ((s = strchr(s, '_'))) {
3592 *s = '-';
3593 }
3594 }
3595
3596 /* Return the feature property name for a feature flag bit */
3597 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3598 {
3599 /* XSAVE components are automatically enabled by other features,
3600 * so return the original feature name instead
3601 */
3602 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3603 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3604
3605 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3606 x86_ext_save_areas[comp].bits) {
3607 w = x86_ext_save_areas[comp].feature;
3608 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3609 }
3610 }
3611
3612 assert(bitnr < 32);
3613 assert(w < FEATURE_WORDS);
3614 return feature_word_info[w].feat_names[bitnr];
3615 }
3616
3617 /* Compatibily hack to maintain legacy +-feat semantic,
3618 * where +-feat overwrites any feature set by
3619 * feat=on|feat even if the later is parsed after +-feat
3620 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3621 */
3622 static GList *plus_features, *minus_features;
3623
3624 static gint compare_string(gconstpointer a, gconstpointer b)
3625 {
3626 return g_strcmp0(a, b);
3627 }
3628
3629 /* Parse "+feature,-feature,feature=foo" CPU feature string
3630 */
3631 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3632 Error **errp)
3633 {
3634 char *featurestr; /* Single 'key=value" string being parsed */
3635 static bool cpu_globals_initialized;
3636 bool ambiguous = false;
3637
3638 if (cpu_globals_initialized) {
3639 return;
3640 }
3641 cpu_globals_initialized = true;
3642
3643 if (!features) {
3644 return;
3645 }
3646
3647 for (featurestr = strtok(features, ",");
3648 featurestr;
3649 featurestr = strtok(NULL, ",")) {
3650 const char *name;
3651 const char *val = NULL;
3652 char *eq = NULL;
3653 char num[32];
3654 GlobalProperty *prop;
3655
3656 /* Compatibility syntax: */
3657 if (featurestr[0] == '+') {
3658 plus_features = g_list_append(plus_features,
3659 g_strdup(featurestr + 1));
3660 continue;
3661 } else if (featurestr[0] == '-') {
3662 minus_features = g_list_append(minus_features,
3663 g_strdup(featurestr + 1));
3664 continue;
3665 }
3666
3667 eq = strchr(featurestr, '=');
3668 if (eq) {
3669 *eq++ = 0;
3670 val = eq;
3671 } else {
3672 val = "on";
3673 }
3674
3675 feat2prop(featurestr);
3676 name = featurestr;
3677
3678 if (g_list_find_custom(plus_features, name, compare_string)) {
3679 warn_report("Ambiguous CPU model string. "
3680 "Don't mix both \"+%s\" and \"%s=%s\"",
3681 name, name, val);
3682 ambiguous = true;
3683 }
3684 if (g_list_find_custom(minus_features, name, compare_string)) {
3685 warn_report("Ambiguous CPU model string. "
3686 "Don't mix both \"-%s\" and \"%s=%s\"",
3687 name, name, val);
3688 ambiguous = true;
3689 }
3690
3691 /* Special case: */
3692 if (!strcmp(name, "tsc-freq")) {
3693 int ret;
3694 uint64_t tsc_freq;
3695
3696 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3697 if (ret < 0 || tsc_freq > INT64_MAX) {
3698 error_setg(errp, "bad numerical value %s", val);
3699 return;
3700 }
3701 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3702 val = num;
3703 name = "tsc-frequency";
3704 }
3705
3706 prop = g_new0(typeof(*prop), 1);
3707 prop->driver = typename;
3708 prop->property = g_strdup(name);
3709 prop->value = g_strdup(val);
3710 qdev_prop_register_global(prop);
3711 }
3712
3713 if (ambiguous) {
3714 warn_report("Compatibility of ambiguous CPU model "
3715 "strings won't be kept on future QEMU versions");
3716 }
3717 }
3718
3719 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3720 static int x86_cpu_filter_features(X86CPU *cpu);
3721
3722 /* Build a list with the name of all features on a feature word array */
3723 static void x86_cpu_list_feature_names(FeatureWordArray features,
3724 strList **feat_names)
3725 {
3726 FeatureWord w;
3727 strList **next = feat_names;
3728
3729 for (w = 0; w < FEATURE_WORDS; w++) {
3730 uint32_t filtered = features[w];
3731 int i;
3732 for (i = 0; i < 32; i++) {
3733 if (filtered & (1UL << i)) {
3734 strList *new = g_new0(strList, 1);
3735 new->value = g_strdup(x86_cpu_feature_name(w, i));
3736 *next = new;
3737 next = &new->next;
3738 }
3739 }
3740 }
3741 }
3742
3743 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
3744 const char *name, void *opaque,
3745 Error **errp)
3746 {
3747 X86CPU *xc = X86_CPU(obj);
3748 strList *result = NULL;
3749
3750 x86_cpu_list_feature_names(xc->filtered_features, &result);
3751 visit_type_strList(v, "unavailable-features", &result, errp);
3752 }
3753
3754 /* Check for missing features that may prevent the CPU class from
3755 * running using the current machine and accelerator.
3756 */
3757 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3758 strList **missing_feats)
3759 {
3760 X86CPU *xc;
3761 Error *err = NULL;
3762 strList **next = missing_feats;
3763
3764 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3765 strList *new = g_new0(strList, 1);
3766 new->value = g_strdup("kvm");
3767 *missing_feats = new;
3768 return;
3769 }
3770
3771 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3772
3773 x86_cpu_expand_features(xc, &err);
3774 if (err) {
3775 /* Errors at x86_cpu_expand_features should never happen,
3776 * but in case it does, just report the model as not
3777 * runnable at all using the "type" property.
3778 */
3779 strList *new = g_new0(strList, 1);
3780 new->value = g_strdup("type");
3781 *next = new;
3782 next = &new->next;
3783 }
3784
3785 x86_cpu_filter_features(xc);
3786
3787 x86_cpu_list_feature_names(xc->filtered_features, next);
3788
3789 object_unref(OBJECT(xc));
3790 }
3791
3792 /* Print all cpuid feature names in featureset
3793 */
3794 static void listflags(GList *features)
3795 {
3796 size_t len = 0;
3797 GList *tmp;
3798
3799 for (tmp = features; tmp; tmp = tmp->next) {
3800 const char *name = tmp->data;
3801 if ((len + strlen(name) + 1) >= 75) {
3802 qemu_printf("\n");
3803 len = 0;
3804 }
3805 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3806 len += strlen(name) + 1;
3807 }
3808 qemu_printf("\n");
3809 }
3810
3811 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3812 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3813 {
3814 ObjectClass *class_a = (ObjectClass *)a;
3815 ObjectClass *class_b = (ObjectClass *)b;
3816 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3817 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3818 char *name_a, *name_b;
3819 int ret;
3820
3821 if (cc_a->ordering != cc_b->ordering) {
3822 ret = cc_a->ordering - cc_b->ordering;
3823 } else {
3824 name_a = x86_cpu_class_get_model_name(cc_a);
3825 name_b = x86_cpu_class_get_model_name(cc_b);
3826 ret = strcmp(name_a, name_b);
3827 g_free(name_a);
3828 g_free(name_b);
3829 }
3830 return ret;
3831 }
3832
3833 static GSList *get_sorted_cpu_model_list(void)
3834 {
3835 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3836 list = g_slist_sort(list, x86_cpu_list_compare);
3837 return list;
3838 }
3839
3840 static char *x86_cpu_class_get_model_id(X86CPUClass *xc)
3841 {
3842 Object *obj = object_new(object_class_get_name(OBJECT_CLASS(xc)));
3843 char *r = object_property_get_str(obj, "model-id", &error_abort);
3844 object_unref(obj);
3845 return r;
3846 }
3847
3848 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3849 {
3850 ObjectClass *oc = data;
3851 X86CPUClass *cc = X86_CPU_CLASS(oc);
3852 char *name = x86_cpu_class_get_model_name(cc);
3853 char *desc = g_strdup(cc->model_description);
3854
3855 if (!desc) {
3856 desc = x86_cpu_class_get_model_id(cc);
3857 }
3858
3859 qemu_printf("x86 %-20s %-48s\n", name, desc);
3860 g_free(name);
3861 g_free(desc);
3862 }
3863
3864 /* list available CPU models and flags */
3865 void x86_cpu_list(void)
3866 {
3867 int i, j;
3868 GSList *list;
3869 GList *names = NULL;
3870
3871 qemu_printf("Available CPUs:\n");
3872 list = get_sorted_cpu_model_list();
3873 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3874 g_slist_free(list);
3875
3876 names = NULL;
3877 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3878 FeatureWordInfo *fw = &feature_word_info[i];
3879 for (j = 0; j < 32; j++) {
3880 if (fw->feat_names[j]) {
3881 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3882 }
3883 }
3884 }
3885
3886 names = g_list_sort(names, (GCompareFunc)strcmp);
3887
3888 qemu_printf("\nRecognized CPUID flags:\n");
3889 listflags(names);
3890 qemu_printf("\n");
3891 g_list_free(names);
3892 }
3893
3894 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3895 {
3896 ObjectClass *oc = data;
3897 X86CPUClass *cc = X86_CPU_CLASS(oc);
3898 CpuDefinitionInfoList **cpu_list = user_data;
3899 CpuDefinitionInfoList *entry;
3900 CpuDefinitionInfo *info;
3901
3902 info = g_malloc0(sizeof(*info));
3903 info->name = x86_cpu_class_get_model_name(cc);
3904 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3905 info->has_unavailable_features = true;
3906 info->q_typename = g_strdup(object_class_get_name(oc));
3907 info->migration_safe = cc->migration_safe;
3908 info->has_migration_safe = true;
3909 info->q_static = cc->static_model;
3910
3911 entry = g_malloc0(sizeof(*entry));
3912 entry->value = info;
3913 entry->next = *cpu_list;
3914 *cpu_list = entry;
3915 }
3916
3917 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
3918 {
3919 CpuDefinitionInfoList *cpu_list = NULL;
3920 GSList *list = get_sorted_cpu_model_list();
3921 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3922 g_slist_free(list);
3923 return cpu_list;
3924 }
3925
3926 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3927 bool migratable_only)
3928 {
3929 FeatureWordInfo *wi = &feature_word_info[w];
3930 uint32_t r = 0;
3931
3932 if (kvm_enabled()) {
3933 switch (wi->type) {
3934 case CPUID_FEATURE_WORD:
3935 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3936 wi->cpuid.ecx,
3937 wi->cpuid.reg);
3938 break;
3939 case MSR_FEATURE_WORD:
3940 r = kvm_arch_get_supported_msr_feature(kvm_state,
3941 wi->msr.index);
3942 break;
3943 }
3944 } else if (hvf_enabled()) {
3945 if (wi->type != CPUID_FEATURE_WORD) {
3946 return 0;
3947 }
3948 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3949 wi->cpuid.ecx,
3950 wi->cpuid.reg);
3951 } else if (tcg_enabled()) {
3952 r = wi->tcg_features;
3953 } else {
3954 return ~0;
3955 }
3956 if (migratable_only) {
3957 r &= x86_cpu_get_migratable_flags(w);
3958 }
3959 return r;
3960 }
3961
3962 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3963 {
3964 FeatureWord w;
3965
3966 for (w = 0; w < FEATURE_WORDS; w++) {
3967 report_unavailable_features(w, cpu->filtered_features[w]);
3968 }
3969 }
3970
3971 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3972 {
3973 PropValue *pv;
3974 for (pv = props; pv->prop; pv++) {
3975 if (!pv->value) {
3976 continue;
3977 }
3978 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3979 &error_abort);
3980 }
3981 }
3982
3983 /* Load data from X86CPUDefinition into a X86CPU object
3984 */
3985 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3986 {
3987 CPUX86State *env = &cpu->env;
3988 const char *vendor;
3989 char host_vendor[CPUID_VENDOR_SZ + 1];
3990 FeatureWord w;
3991
3992 /*NOTE: any property set by this function should be returned by
3993 * x86_cpu_static_props(), so static expansion of
3994 * query-cpu-model-expansion is always complete.
3995 */
3996
3997 /* CPU models only set _minimum_ values for level/xlevel: */
3998 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3999 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
4000
4001 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
4002 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
4003 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
4004 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
4005 for (w = 0; w < FEATURE_WORDS; w++) {
4006 env->features[w] = def->features[w];
4007 }
4008
4009 /* legacy-cache defaults to 'off' if CPU model provides cache info */
4010 cpu->legacy_cache = !def->cache_info;
4011
4012 /* Special cases not set in the X86CPUDefinition structs: */
4013 /* TODO: in-kernel irqchip for hvf */
4014 if (kvm_enabled()) {
4015 if (!kvm_irqchip_in_kernel()) {
4016 x86_cpu_change_kvm_default("x2apic", "off");
4017 }
4018
4019 x86_cpu_apply_props(cpu, kvm_default_props);
4020 } else if (tcg_enabled()) {
4021 x86_cpu_apply_props(cpu, tcg_default_props);
4022 }
4023
4024 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
4025
4026 /* sysenter isn't supported in compatibility mode on AMD,
4027 * syscall isn't supported in compatibility mode on Intel.
4028 * Normally we advertise the actual CPU vendor, but you can
4029 * override this using the 'vendor' property if you want to use
4030 * KVM's sysenter/syscall emulation in compatibility mode and
4031 * when doing cross vendor migration
4032 */
4033 vendor = def->vendor;
4034 if (accel_uses_host_cpuid()) {
4035 uint32_t ebx = 0, ecx = 0, edx = 0;
4036 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
4037 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
4038 vendor = host_vendor;
4039 }
4040
4041 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
4042
4043 }
4044
4045 #ifndef CONFIG_USER_ONLY
4046 /* Return a QDict containing keys for all properties that can be included
4047 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
4048 * must be included in the dictionary.
4049 */
4050 static QDict *x86_cpu_static_props(void)
4051 {
4052 FeatureWord w;
4053 int i;
4054 static const char *props[] = {
4055 "min-level",
4056 "min-xlevel",
4057 "family",
4058 "model",
4059 "stepping",
4060 "model-id",
4061 "vendor",
4062 "lmce",
4063 NULL,
4064 };
4065 static QDict *d;
4066
4067 if (d) {
4068 return d;
4069 }
4070
4071 d = qdict_new();
4072 for (i = 0; props[i]; i++) {
4073 qdict_put_null(d, props[i]);
4074 }
4075
4076 for (w = 0; w < FEATURE_WORDS; w++) {
4077 FeatureWordInfo *fi = &feature_word_info[w];
4078 int bit;
4079 for (bit = 0; bit < 32; bit++) {
4080 if (!fi->feat_names[bit]) {
4081 continue;
4082 }
4083 qdict_put_null(d, fi->feat_names[bit]);
4084 }
4085 }
4086
4087 return d;
4088 }
4089
4090 /* Add an entry to @props dict, with the value for property. */
4091 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
4092 {
4093 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
4094 &error_abort);
4095
4096 qdict_put_obj(props, prop, value);
4097 }
4098
4099 /* Convert CPU model data from X86CPU object to a property dictionary
4100 * that can recreate exactly the same CPU model.
4101 */
4102 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
4103 {
4104 QDict *sprops = x86_cpu_static_props();
4105 const QDictEntry *e;
4106
4107 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
4108 const char *prop = qdict_entry_key(e);
4109 x86_cpu_expand_prop(cpu, props, prop);
4110 }
4111 }
4112
4113 /* Convert CPU model data from X86CPU object to a property dictionary
4114 * that can recreate exactly the same CPU model, including every
4115 * writeable QOM property.
4116 */
4117 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
4118 {
4119 ObjectPropertyIterator iter;
4120 ObjectProperty *prop;
4121
4122 object_property_iter_init(&iter, OBJECT(cpu));
4123 while ((prop = object_property_iter_next(&iter))) {
4124 /* skip read-only or write-only properties */
4125 if (!prop->get || !prop->set) {
4126 continue;
4127 }
4128
4129 /* "hotplugged" is the only property that is configurable
4130 * on the command-line but will be set differently on CPUs
4131 * created using "-cpu ... -smp ..." and by CPUs created
4132 * on the fly by x86_cpu_from_model() for querying. Skip it.
4133 */
4134 if (!strcmp(prop->name, "hotplugged")) {
4135 continue;
4136 }
4137 x86_cpu_expand_prop(cpu, props, prop->name);
4138 }
4139 }
4140
4141 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4142 {
4143 const QDictEntry *prop;
4144 Error *err = NULL;
4145
4146 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4147 object_property_set_qobject(obj, qdict_entry_value(prop),
4148 qdict_entry_key(prop), &err);
4149 if (err) {
4150 break;
4151 }
4152 }
4153
4154 error_propagate(errp, err);
4155 }
4156
4157 /* Create X86CPU object according to model+props specification */
4158 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4159 {
4160 X86CPU *xc = NULL;
4161 X86CPUClass *xcc;
4162 Error *err = NULL;
4163
4164 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4165 if (xcc == NULL) {
4166 error_setg(&err, "CPU model '%s' not found", model);
4167 goto out;
4168 }
4169
4170 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4171 if (props) {
4172 object_apply_props(OBJECT(xc), props, &err);
4173 if (err) {
4174 goto out;
4175 }
4176 }
4177
4178 x86_cpu_expand_features(xc, &err);
4179 if (err) {
4180 goto out;
4181 }
4182
4183 out:
4184 if (err) {
4185 error_propagate(errp, err);
4186 object_unref(OBJECT(xc));
4187 xc = NULL;
4188 }
4189 return xc;
4190 }
4191
4192 CpuModelExpansionInfo *
4193 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
4194 CpuModelInfo *model,
4195 Error **errp)
4196 {
4197 X86CPU *xc = NULL;
4198 Error *err = NULL;
4199 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4200 QDict *props = NULL;
4201 const char *base_name;
4202
4203 xc = x86_cpu_from_model(model->name,
4204 model->has_props ?
4205 qobject_to(QDict, model->props) :
4206 NULL, &err);
4207 if (err) {
4208 goto out;
4209 }
4210
4211 props = qdict_new();
4212 ret->model = g_new0(CpuModelInfo, 1);
4213 ret->model->props = QOBJECT(props);
4214 ret->model->has_props = true;
4215
4216 switch (type) {
4217 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4218 /* Static expansion will be based on "base" only */
4219 base_name = "base";
4220 x86_cpu_to_dict(xc, props);
4221 break;
4222 case CPU_MODEL_EXPANSION_TYPE_FULL:
4223 /* As we don't return every single property, full expansion needs
4224 * to keep the original model name+props, and add extra
4225 * properties on top of that.
4226 */
4227 base_name = model->name;
4228 x86_cpu_to_dict_full(xc, props);
4229 break;
4230 default:
4231 error_setg(&err, "Unsupported expansion type");
4232 goto out;
4233 }
4234
4235 x86_cpu_to_dict(xc, props);
4236
4237 ret->model->name = g_strdup(base_name);
4238
4239 out:
4240 object_unref(OBJECT(xc));
4241 if (err) {
4242 error_propagate(errp, err);
4243 qapi_free_CpuModelExpansionInfo(ret);
4244 ret = NULL;
4245 }
4246 return ret;
4247 }
4248 #endif /* !CONFIG_USER_ONLY */
4249
4250 static gchar *x86_gdb_arch_name(CPUState *cs)
4251 {
4252 #ifdef TARGET_X86_64
4253 return g_strdup("i386:x86-64");
4254 #else
4255 return g_strdup("i386");
4256 #endif
4257 }
4258
4259 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4260 {
4261 X86CPUDefinition *cpudef = data;
4262 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4263
4264 xcc->cpu_def = cpudef;
4265 xcc->migration_safe = true;
4266 }
4267
4268 static void x86_register_cpudef_type(X86CPUDefinition *def)
4269 {
4270 char *typename = x86_cpu_type_name(def->name);
4271 TypeInfo ti = {
4272 .name = typename,
4273 .parent = TYPE_X86_CPU,
4274 .class_init = x86_cpu_cpudef_class_init,
4275 .class_data = def,
4276 };
4277
4278 /* AMD aliases are handled at runtime based on CPUID vendor, so
4279 * they shouldn't be set on the CPU model table.
4280 */
4281 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4282 /* catch mistakes instead of silently truncating model_id when too long */
4283 assert(def->model_id && strlen(def->model_id) <= 48);
4284
4285
4286 type_register(&ti);
4287 g_free(typename);
4288 }
4289
4290 #if !defined(CONFIG_USER_ONLY)
4291
4292 void cpu_clear_apic_feature(CPUX86State *env)
4293 {
4294 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4295 }
4296
4297 #endif /* !CONFIG_USER_ONLY */
4298
4299 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4300 uint32_t *eax, uint32_t *ebx,
4301 uint32_t *ecx, uint32_t *edx)
4302 {
4303 X86CPU *cpu = env_archcpu(env);
4304 CPUState *cs = env_cpu(env);
4305 uint32_t die_offset;
4306 uint32_t limit;
4307 uint32_t signature[3];
4308
4309 /* Calculate & apply limits for different index ranges */
4310 if (index >= 0xC0000000) {
4311 limit = env->cpuid_xlevel2;
4312 } else if (index >= 0x80000000) {
4313 limit = env->cpuid_xlevel;
4314 } else if (index >= 0x40000000) {
4315 limit = 0x40000001;
4316 } else {
4317 limit = env->cpuid_level;
4318 }
4319
4320 if (index > limit) {
4321 /* Intel documentation states that invalid EAX input will
4322 * return the same information as EAX=cpuid_level
4323 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4324 */
4325 index = env->cpuid_level;
4326 }
4327
4328 switch(index) {
4329 case 0:
4330 *eax = env->cpuid_level;
4331 *ebx = env->cpuid_vendor1;
4332 *edx = env->cpuid_vendor2;
4333 *ecx = env->cpuid_vendor3;
4334 break;
4335 case 1:
4336 *eax = env->cpuid_version;
4337 *ebx = (cpu->apic_id << 24) |
4338 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4339 *ecx = env->features[FEAT_1_ECX];
4340 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4341 *ecx |= CPUID_EXT_OSXSAVE;
4342 }
4343 *edx = env->features[FEAT_1_EDX];
4344 if (cs->nr_cores * cs->nr_threads > 1) {
4345 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4346 *edx |= CPUID_HT;
4347 }
4348 break;
4349 case 2:
4350 /* cache info: needed for Pentium Pro compatibility */
4351 if (cpu->cache_info_passthrough) {
4352 host_cpuid(index, 0, eax, ebx, ecx, edx);
4353 break;
4354 }
4355 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4356 *ebx = 0;
4357 if (!cpu->enable_l3_cache) {
4358 *ecx = 0;
4359 } else {
4360 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4361 }
4362 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4363 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4364 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4365 break;
4366 case 4:
4367 /* cache info: needed for Core compatibility */
4368 if (cpu->cache_info_passthrough) {
4369 host_cpuid(index, count, eax, ebx, ecx, edx);
4370 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4371 *eax &= ~0xFC000000;
4372 if ((*eax & 31) && cs->nr_cores > 1) {
4373 *eax |= (cs->nr_cores - 1) << 26;
4374 }
4375 } else {
4376 *eax = 0;
4377 switch (count) {
4378 case 0: /* L1 dcache info */
4379 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4380 1, cs->nr_cores,
4381 eax, ebx, ecx, edx);
4382 break;
4383 case 1: /* L1 icache info */
4384 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4385 1, cs->nr_cores,
4386 eax, ebx, ecx, edx);
4387 break;
4388 case 2: /* L2 cache info */
4389 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4390 cs->nr_threads, cs->nr_cores,
4391 eax, ebx, ecx, edx);
4392 break;
4393 case 3: /* L3 cache info */
4394 die_offset = apicid_die_offset(env->nr_dies,
4395 cs->nr_cores, cs->nr_threads);
4396 if (cpu->enable_l3_cache) {
4397 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4398 (1 << die_offset), cs->nr_cores,
4399 eax, ebx, ecx, edx);
4400 break;
4401 }
4402 /* fall through */
4403 default: /* end of info */
4404 *eax = *ebx = *ecx = *edx = 0;
4405 break;
4406 }
4407 }
4408 break;
4409 case 5:
4410 /* MONITOR/MWAIT Leaf */
4411 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4412 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4413 *ecx = cpu->mwait.ecx; /* flags */
4414 *edx = cpu->mwait.edx; /* mwait substates */
4415 break;
4416 case 6:
4417 /* Thermal and Power Leaf */
4418 *eax = env->features[FEAT_6_EAX];
4419 *ebx = 0;
4420 *ecx = 0;
4421 *edx = 0;
4422 break;
4423 case 7:
4424 /* Structured Extended Feature Flags Enumeration Leaf */
4425 if (count == 0) {
4426 *eax = 0; /* Maximum ECX value for sub-leaves */
4427 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4428 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4429 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4430 *ecx |= CPUID_7_0_ECX_OSPKE;
4431 }
4432 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4433 } else {
4434 *eax = 0;
4435 *ebx = 0;
4436 *ecx = 0;
4437 *edx = 0;
4438 }
4439 break;
4440 case 9:
4441 /* Direct Cache Access Information Leaf */
4442 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4443 *ebx = 0;
4444 *ecx = 0;
4445 *edx = 0;
4446 break;
4447 case 0xA:
4448 /* Architectural Performance Monitoring Leaf */
4449 if (kvm_enabled() && cpu->enable_pmu) {
4450 KVMState *s = cs->kvm_state;
4451
4452 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4453 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4454 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4455 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4456 } else if (hvf_enabled() && cpu->enable_pmu) {
4457 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4458 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4459 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4460 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4461 } else {
4462 *eax = 0;
4463 *ebx = 0;
4464 *ecx = 0;
4465 *edx = 0;
4466 }
4467 break;
4468 case 0xB:
4469 /* Extended Topology Enumeration Leaf */
4470 if (!cpu->enable_cpuid_0xb) {
4471 *eax = *ebx = *ecx = *edx = 0;
4472 break;
4473 }
4474
4475 *ecx = count & 0xff;
4476 *edx = cpu->apic_id;
4477
4478 switch (count) {
4479 case 0:
4480 *eax = apicid_core_offset(env->nr_dies,
4481 cs->nr_cores, cs->nr_threads);
4482 *ebx = cs->nr_threads;
4483 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4484 break;
4485 case 1:
4486 *eax = apicid_pkg_offset(env->nr_dies,
4487 cs->nr_cores, cs->nr_threads);
4488 *ebx = cs->nr_cores * cs->nr_threads;
4489 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4490 break;
4491 default:
4492 *eax = 0;
4493 *ebx = 0;
4494 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4495 }
4496
4497 assert(!(*eax & ~0x1f));
4498 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4499 break;
4500 case 0x1F:
4501 /* V2 Extended Topology Enumeration Leaf */
4502 if (env->nr_dies < 2) {
4503 *eax = *ebx = *ecx = *edx = 0;
4504 break;
4505 }
4506
4507 *ecx = count & 0xff;
4508 *edx = cpu->apic_id;
4509 switch (count) {
4510 case 0:
4511 *eax = apicid_core_offset(env->nr_dies, cs->nr_cores,
4512 cs->nr_threads);
4513 *ebx = cs->nr_threads;
4514 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4515 break;
4516 case 1:
4517 *eax = apicid_die_offset(env->nr_dies, cs->nr_cores,
4518 cs->nr_threads);
4519 *ebx = cs->nr_cores * cs->nr_threads;
4520 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4521 break;
4522 case 2:
4523 *eax = apicid_pkg_offset(env->nr_dies, cs->nr_cores,
4524 cs->nr_threads);
4525 *ebx = env->nr_dies * cs->nr_cores * cs->nr_threads;
4526 *ecx |= CPUID_TOPOLOGY_LEVEL_DIE;
4527 break;
4528 default:
4529 *eax = 0;
4530 *ebx = 0;
4531 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4532 }
4533 assert(!(*eax & ~0x1f));
4534 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4535 break;
4536 case 0xD: {
4537 /* Processor Extended State */
4538 *eax = 0;
4539 *ebx = 0;
4540 *ecx = 0;
4541 *edx = 0;
4542 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4543 break;
4544 }
4545
4546 if (count == 0) {
4547 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4548 *eax = env->features[FEAT_XSAVE_COMP_LO];
4549 *edx = env->features[FEAT_XSAVE_COMP_HI];
4550 *ebx = xsave_area_size(env->xcr0);
4551 } else if (count == 1) {
4552 *eax = env->features[FEAT_XSAVE];
4553 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4554 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4555 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4556 *eax = esa->size;
4557 *ebx = esa->offset;
4558 }
4559 }
4560 break;
4561 }
4562 case 0x14: {
4563 /* Intel Processor Trace Enumeration */
4564 *eax = 0;
4565 *ebx = 0;
4566 *ecx = 0;
4567 *edx = 0;
4568 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4569 !kvm_enabled()) {
4570 break;
4571 }
4572
4573 if (count == 0) {
4574 *eax = INTEL_PT_MAX_SUBLEAF;
4575 *ebx = INTEL_PT_MINIMAL_EBX;
4576 *ecx = INTEL_PT_MINIMAL_ECX;
4577 } else if (count == 1) {
4578 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4579 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4580 }
4581 break;
4582 }
4583 case 0x40000000:
4584 /*
4585 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4586 * set here, but we restrict to TCG none the less.
4587 */
4588 if (tcg_enabled() && cpu->expose_tcg) {
4589 memcpy(signature, "TCGTCGTCGTCG", 12);
4590 *eax = 0x40000001;
4591 *ebx = signature[0];
4592 *ecx = signature[1];
4593 *edx = signature[2];
4594 } else {
4595 *eax = 0;
4596 *ebx = 0;
4597 *ecx = 0;
4598 *edx = 0;
4599 }
4600 break;
4601 case 0x40000001:
4602 *eax = 0;
4603 *ebx = 0;
4604 *ecx = 0;
4605 *edx = 0;
4606 break;
4607 case 0x80000000:
4608 *eax = env->cpuid_xlevel;
4609 *ebx = env->cpuid_vendor1;
4610 *edx = env->cpuid_vendor2;
4611 *ecx = env->cpuid_vendor3;
4612 break;
4613 case 0x80000001:
4614 *eax = env->cpuid_version;
4615 *ebx = 0;
4616 *ecx = env->features[FEAT_8000_0001_ECX];
4617 *edx = env->features[FEAT_8000_0001_EDX];
4618
4619 /* The Linux kernel checks for the CMPLegacy bit and
4620 * discards multiple thread information if it is set.
4621 * So don't set it here for Intel to make Linux guests happy.
4622 */
4623 if (cs->nr_cores * cs->nr_threads > 1) {
4624 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4625 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4626 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4627 *ecx |= 1 << 1; /* CmpLegacy bit */
4628 }
4629 }
4630 break;
4631 case 0x80000002:
4632 case 0x80000003:
4633 case 0x80000004:
4634 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4635 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4636 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4637 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4638 break;
4639 case 0x80000005:
4640 /* cache info (L1 cache) */
4641 if (cpu->cache_info_passthrough) {
4642 host_cpuid(index, 0, eax, ebx, ecx, edx);
4643 break;
4644 }
4645 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4646 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4647 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4648 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4649 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4650 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4651 break;
4652 case 0x80000006:
4653 /* cache info (L2 cache) */
4654 if (cpu->cache_info_passthrough) {
4655 host_cpuid(index, 0, eax, ebx, ecx, edx);
4656 break;
4657 }
4658 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4659 (L2_DTLB_2M_ENTRIES << 16) | \
4660 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4661 (L2_ITLB_2M_ENTRIES);
4662 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4663 (L2_DTLB_4K_ENTRIES << 16) | \
4664 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4665 (L2_ITLB_4K_ENTRIES);
4666 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4667 cpu->enable_l3_cache ?
4668 env->cache_info_amd.l3_cache : NULL,
4669 ecx, edx);
4670 break;
4671 case 0x80000007:
4672 *eax = 0;
4673 *ebx = 0;
4674 *ecx = 0;
4675 *edx = env->features[FEAT_8000_0007_EDX];
4676 break;
4677 case 0x80000008:
4678 /* virtual & phys address size in low 2 bytes. */
4679 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4680 /* 64 bit processor */
4681 *eax = cpu->phys_bits; /* configurable physical bits */
4682 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4683 *eax |= 0x00003900; /* 57 bits virtual */
4684 } else {
4685 *eax |= 0x00003000; /* 48 bits virtual */
4686 }
4687 } else {
4688 *eax = cpu->phys_bits;
4689 }
4690 *ebx = env->features[FEAT_8000_0008_EBX];
4691 *ecx = 0;
4692 *edx = 0;
4693 if (cs->nr_cores * cs->nr_threads > 1) {
4694 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4695 }
4696 break;
4697 case 0x8000000A:
4698 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4699 *eax = 0x00000001; /* SVM Revision */
4700 *ebx = 0x00000010; /* nr of ASIDs */
4701 *ecx = 0;
4702 *edx = env->features[FEAT_SVM]; /* optional features */
4703 } else {
4704 *eax = 0;
4705 *ebx = 0;
4706 *ecx = 0;
4707 *edx = 0;
4708 }
4709 break;
4710 case 0x8000001D:
4711 *eax = 0;
4712 if (cpu->cache_info_passthrough) {
4713 host_cpuid(index, count, eax, ebx, ecx, edx);
4714 break;
4715 }
4716 switch (count) {
4717 case 0: /* L1 dcache info */
4718 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4719 eax, ebx, ecx, edx);
4720 break;
4721 case 1: /* L1 icache info */
4722 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4723 eax, ebx, ecx, edx);
4724 break;
4725 case 2: /* L2 cache info */
4726 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4727 eax, ebx, ecx, edx);
4728 break;
4729 case 3: /* L3 cache info */
4730 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4731 eax, ebx, ecx, edx);
4732 break;
4733 default: /* end of info */
4734 *eax = *ebx = *ecx = *edx = 0;
4735 break;
4736 }
4737 break;
4738 case 0x8000001E:
4739 assert(cpu->core_id <= 255);
4740 encode_topo_cpuid8000001e(cs, cpu,
4741 eax, ebx, ecx, edx);
4742 break;
4743 case 0xC0000000:
4744 *eax = env->cpuid_xlevel2;
4745 *ebx = 0;
4746 *ecx = 0;
4747 *edx = 0;
4748 break;
4749 case 0xC0000001:
4750 /* Support for VIA CPU's CPUID instruction */
4751 *eax = env->cpuid_version;
4752 *ebx = 0;
4753 *ecx = 0;
4754 *edx = env->features[FEAT_C000_0001_EDX];
4755 break;
4756 case 0xC0000002:
4757 case 0xC0000003:
4758 case 0xC0000004:
4759 /* Reserved for the future, and now filled with zero */
4760 *eax = 0;
4761 *ebx = 0;
4762 *ecx = 0;
4763 *edx = 0;
4764 break;
4765 case 0x8000001F:
4766 *eax = sev_enabled() ? 0x2 : 0;
4767 *ebx = sev_get_cbit_position();
4768 *ebx |= sev_get_reduced_phys_bits() << 6;
4769 *ecx = 0;
4770 *edx = 0;
4771 break;
4772 default:
4773 /* reserved values: zero */
4774 *eax = 0;
4775 *ebx = 0;
4776 *ecx = 0;
4777 *edx = 0;
4778 break;
4779 }
4780 }
4781
4782 /* CPUClass::reset() */
4783 static void x86_cpu_reset(CPUState *s)
4784 {
4785 X86CPU *cpu = X86_CPU(s);
4786 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4787 CPUX86State *env = &cpu->env;
4788 target_ulong cr4;
4789 uint64_t xcr0;
4790 int i;
4791
4792 xcc->parent_reset(s);
4793
4794 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4795
4796 env->old_exception = -1;
4797
4798 /* init to reset state */
4799
4800 env->hflags2 |= HF2_GIF_MASK;
4801
4802 cpu_x86_update_cr0(env, 0x60000010);
4803 env->a20_mask = ~0x0;
4804 env->smbase = 0x30000;
4805 env->msr_smi_count = 0;
4806
4807 env->idt.limit = 0xffff;
4808 env->gdt.limit = 0xffff;
4809 env->ldt.limit = 0xffff;
4810 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4811 env->tr.limit = 0xffff;
4812 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4813
4814 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4815 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4816 DESC_R_MASK | DESC_A_MASK);
4817 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4818 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4819 DESC_A_MASK);
4820 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4821 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4822 DESC_A_MASK);
4823 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4824 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4825 DESC_A_MASK);
4826 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4827 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4828 DESC_A_MASK);
4829 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4830 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4831 DESC_A_MASK);
4832
4833 env->eip = 0xfff0;
4834 env->regs[R_EDX] = env->cpuid_version;
4835
4836 env->eflags = 0x2;
4837
4838 /* FPU init */
4839 for (i = 0; i < 8; i++) {
4840 env->fptags[i] = 1;
4841 }
4842 cpu_set_fpuc(env, 0x37f);
4843
4844 env->mxcsr = 0x1f80;
4845 /* All units are in INIT state. */
4846 env->xstate_bv = 0;
4847
4848 env->pat = 0x0007040600070406ULL;
4849 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4850 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
4851 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
4852 }
4853
4854 memset(env->dr, 0, sizeof(env->dr));
4855 env->dr[6] = DR6_FIXED_1;
4856 env->dr[7] = DR7_FIXED_1;
4857 cpu_breakpoint_remove_all(s, BP_CPU);
4858 cpu_watchpoint_remove_all(s, BP_CPU);
4859
4860 cr4 = 0;
4861 xcr0 = XSTATE_FP_MASK;
4862
4863 #ifdef CONFIG_USER_ONLY
4864 /* Enable all the features for user-mode. */
4865 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4866 xcr0 |= XSTATE_SSE_MASK;
4867 }
4868 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4869 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4870 if (env->features[esa->feature] & esa->bits) {
4871 xcr0 |= 1ull << i;
4872 }
4873 }
4874
4875 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4876 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4877 }
4878 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4879 cr4 |= CR4_FSGSBASE_MASK;
4880 }
4881 #endif
4882
4883 env->xcr0 = xcr0;
4884 cpu_x86_update_cr4(env, cr4);
4885
4886 /*
4887 * SDM 11.11.5 requires:
4888 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4889 * - IA32_MTRR_PHYSMASKn.V = 0
4890 * All other bits are undefined. For simplification, zero it all.
4891 */
4892 env->mtrr_deftype = 0;
4893 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4894 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4895
4896 env->interrupt_injected = -1;
4897 env->exception_nr = -1;
4898 env->exception_pending = 0;
4899 env->exception_injected = 0;
4900 env->exception_has_payload = false;
4901 env->exception_payload = 0;
4902 env->nmi_injected = false;
4903 #if !defined(CONFIG_USER_ONLY)
4904 /* We hard-wire the BSP to the first CPU. */
4905 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4906
4907 s->halted = !cpu_is_bsp(cpu);
4908
4909 if (kvm_enabled()) {
4910 kvm_arch_reset_vcpu(cpu);
4911 }
4912 else if (hvf_enabled()) {
4913 hvf_reset_vcpu(s);
4914 }
4915 #endif
4916 }
4917
4918 #ifndef CONFIG_USER_ONLY
4919 bool cpu_is_bsp(X86CPU *cpu)
4920 {
4921 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4922 }
4923
4924 /* TODO: remove me, when reset over QOM tree is implemented */
4925 static void x86_cpu_machine_reset_cb(void *opaque)
4926 {
4927 X86CPU *cpu = opaque;
4928 cpu_reset(CPU(cpu));
4929 }
4930 #endif
4931
4932 static void mce_init(X86CPU *cpu)
4933 {
4934 CPUX86State *cenv = &cpu->env;
4935 unsigned int bank;
4936
4937 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4938 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4939 (CPUID_MCE | CPUID_MCA)) {
4940 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4941 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4942 cenv->mcg_ctl = ~(uint64_t)0;
4943 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4944 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4945 }
4946 }
4947 }
4948
4949 #ifndef CONFIG_USER_ONLY
4950 APICCommonClass *apic_get_class(void)
4951 {
4952 const char *apic_type = "apic";
4953
4954 /* TODO: in-kernel irqchip for hvf */
4955 if (kvm_apic_in_kernel()) {
4956 apic_type = "kvm-apic";
4957 } else if (xen_enabled()) {
4958 apic_type = "xen-apic";
4959 }
4960
4961 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4962 }
4963
4964 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4965 {
4966 APICCommonState *apic;
4967 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4968
4969 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4970
4971 object_property_add_child(OBJECT(cpu), "lapic",
4972 OBJECT(cpu->apic_state), &error_abort);
4973 object_unref(OBJECT(cpu->apic_state));
4974
4975 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4976 /* TODO: convert to link<> */
4977 apic = APIC_COMMON(cpu->apic_state);
4978 apic->cpu = cpu;
4979 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4980 }
4981
4982 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4983 {
4984 APICCommonState *apic;
4985 static bool apic_mmio_map_once;
4986
4987 if (cpu->apic_state == NULL) {
4988 return;
4989 }
4990 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4991 errp);
4992
4993 /* Map APIC MMIO area */
4994 apic = APIC_COMMON(cpu->apic_state);
4995 if (!apic_mmio_map_once) {
4996 memory_region_add_subregion_overlap(get_system_memory(),
4997 apic->apicbase &
4998 MSR_IA32_APICBASE_BASE,
4999 &apic->io_memory,
5000 0x1000);
5001 apic_mmio_map_once = true;
5002 }
5003 }
5004
5005 static void x86_cpu_machine_done(Notifier *n, void *unused)
5006 {
5007 X86CPU *cpu = container_of(n, X86CPU, machine_done);
5008 MemoryRegion *smram =
5009 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
5010
5011 if (smram) {
5012 cpu->smram = g_new(MemoryRegion, 1);
5013 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
5014 smram, 0, 1ull << 32);
5015 memory_region_set_enabled(cpu->smram, true);
5016 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
5017 }
5018 }
5019 #else
5020 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
5021 {
5022 }
5023 #endif
5024
5025 /* Note: Only safe for use on x86(-64) hosts */
5026 static uint32_t x86_host_phys_bits(void)
5027 {
5028 uint32_t eax;
5029 uint32_t host_phys_bits;
5030
5031 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
5032 if (eax >= 0x80000008) {
5033 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
5034 /* Note: According to AMD doc 25481 rev 2.34 they have a field
5035 * at 23:16 that can specify a maximum physical address bits for
5036 * the guest that can override this value; but I've not seen
5037 * anything with that set.
5038 */
5039 host_phys_bits = eax & 0xff;
5040 } else {
5041 /* It's an odd 64 bit machine that doesn't have the leaf for
5042 * physical address bits; fall back to 36 that's most older
5043 * Intel.
5044 */
5045 host_phys_bits = 36;
5046 }
5047
5048 return host_phys_bits;
5049 }
5050
5051 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
5052 {
5053 if (*min < value) {
5054 *min = value;
5055 }
5056 }
5057
5058 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
5059 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
5060 {
5061 CPUX86State *env = &cpu->env;
5062 FeatureWordInfo *fi = &feature_word_info[w];
5063 uint32_t eax = fi->cpuid.eax;
5064 uint32_t region = eax & 0xF0000000;
5065
5066 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
5067 if (!env->features[w]) {
5068 return;
5069 }
5070
5071 switch (region) {
5072 case 0x00000000:
5073 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
5074 break;
5075 case 0x80000000:
5076 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
5077 break;
5078 case 0xC0000000:
5079 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
5080 break;
5081 }
5082 }
5083
5084 /* Calculate XSAVE components based on the configured CPU feature flags */
5085 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
5086 {
5087 CPUX86State *env = &cpu->env;
5088 int i;
5089 uint64_t mask;
5090
5091 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5092 return;
5093 }
5094
5095 mask = 0;
5096 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5097 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5098 if (env->features[esa->feature] & esa->bits) {
5099 mask |= (1ULL << i);
5100 }
5101 }
5102
5103 env->features[FEAT_XSAVE_COMP_LO] = mask;
5104 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
5105 }
5106
5107 /***** Steps involved on loading and filtering CPUID data
5108 *
5109 * When initializing and realizing a CPU object, the steps
5110 * involved in setting up CPUID data are:
5111 *
5112 * 1) Loading CPU model definition (X86CPUDefinition). This is
5113 * implemented by x86_cpu_load_def() and should be completely
5114 * transparent, as it is done automatically by instance_init.
5115 * No code should need to look at X86CPUDefinition structs
5116 * outside instance_init.
5117 *
5118 * 2) CPU expansion. This is done by realize before CPUID
5119 * filtering, and will make sure host/accelerator data is
5120 * loaded for CPU models that depend on host capabilities
5121 * (e.g. "host"). Done by x86_cpu_expand_features().
5122 *
5123 * 3) CPUID filtering. This initializes extra data related to
5124 * CPUID, and checks if the host supports all capabilities
5125 * required by the CPU. Runnability of a CPU model is
5126 * determined at this step. Done by x86_cpu_filter_features().
5127 *
5128 * Some operations don't require all steps to be performed.
5129 * More precisely:
5130 *
5131 * - CPU instance creation (instance_init) will run only CPU
5132 * model loading. CPU expansion can't run at instance_init-time
5133 * because host/accelerator data may be not available yet.
5134 * - CPU realization will perform both CPU model expansion and CPUID
5135 * filtering, and return an error in case one of them fails.
5136 * - query-cpu-definitions needs to run all 3 steps. It needs
5137 * to run CPUID filtering, as the 'unavailable-features'
5138 * field is set based on the filtering results.
5139 * - The query-cpu-model-expansion QMP command only needs to run
5140 * CPU model loading and CPU expansion. It should not filter
5141 * any CPUID data based on host capabilities.
5142 */
5143
5144 /* Expand CPU configuration data, based on configured features
5145 * and host/accelerator capabilities when appropriate.
5146 */
5147 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
5148 {
5149 CPUX86State *env = &cpu->env;
5150 FeatureWord w;
5151 GList *l;
5152 Error *local_err = NULL;
5153
5154 /*TODO: Now cpu->max_features doesn't overwrite features
5155 * set using QOM properties, and we can convert
5156 * plus_features & minus_features to global properties
5157 * inside x86_cpu_parse_featurestr() too.
5158 */
5159 if (cpu->max_features) {
5160 for (w = 0; w < FEATURE_WORDS; w++) {
5161 /* Override only features that weren't set explicitly
5162 * by the user.
5163 */
5164 env->features[w] |=
5165 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
5166 ~env->user_features[w] & \
5167 ~feature_word_info[w].no_autoenable_flags;
5168 }
5169 }
5170
5171 for (l = plus_features; l; l = l->next) {
5172 const char *prop = l->data;
5173 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
5174 if (local_err) {
5175 goto out;
5176 }
5177 }
5178
5179 for (l = minus_features; l; l = l->next) {
5180 const char *prop = l->data;
5181 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5182 if (local_err) {
5183 goto out;
5184 }
5185 }
5186
5187 if (!kvm_enabled() || !cpu->expose_kvm) {
5188 env->features[FEAT_KVM] = 0;
5189 }
5190
5191 x86_cpu_enable_xsave_components(cpu);
5192
5193 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5194 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5195 if (cpu->full_cpuid_auto_level) {
5196 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5197 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5198 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5199 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5200 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5201 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5202 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5203 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5204 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5205 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5206 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5207
5208 /* Intel Processor Trace requires CPUID[0x14] */
5209 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5210 kvm_enabled() && cpu->intel_pt_auto_level) {
5211 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5212 }
5213
5214 /* CPU topology with multi-dies support requires CPUID[0x1F] */
5215 if (env->nr_dies > 1) {
5216 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x1F);
5217 }
5218
5219 /* SVM requires CPUID[0x8000000A] */
5220 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5221 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5222 }
5223
5224 /* SEV requires CPUID[0x8000001F] */
5225 if (sev_enabled()) {
5226 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5227 }
5228 }
5229
5230 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5231 if (env->cpuid_level == UINT32_MAX) {
5232 env->cpuid_level = env->cpuid_min_level;
5233 }
5234 if (env->cpuid_xlevel == UINT32_MAX) {
5235 env->cpuid_xlevel = env->cpuid_min_xlevel;
5236 }
5237 if (env->cpuid_xlevel2 == UINT32_MAX) {
5238 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5239 }
5240
5241 out:
5242 if (local_err != NULL) {
5243 error_propagate(errp, local_err);
5244 }
5245 }
5246
5247 /*
5248 * Finishes initialization of CPUID data, filters CPU feature
5249 * words based on host availability of each feature.
5250 *
5251 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5252 */
5253 static int x86_cpu_filter_features(X86CPU *cpu)
5254 {
5255 CPUX86State *env = &cpu->env;
5256 FeatureWord w;
5257 int rv = 0;
5258
5259 for (w = 0; w < FEATURE_WORDS; w++) {
5260 uint32_t host_feat =
5261 x86_cpu_get_supported_feature_word(w, false);
5262 uint32_t requested_features = env->features[w];
5263 uint32_t available_features = requested_features & host_feat;
5264 if (!cpu->force_features) {
5265 env->features[w] = available_features;
5266 }
5267 cpu->filtered_features[w] = requested_features & ~available_features;
5268 if (cpu->filtered_features[w]) {
5269 rv = 1;
5270 }
5271 }
5272
5273 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5274 kvm_enabled()) {
5275 KVMState *s = CPU(cpu)->kvm_state;
5276 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5277 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5278 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5279 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5280 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5281
5282 if (!eax_0 ||
5283 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5284 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5285 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5286 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5287 INTEL_PT_ADDR_RANGES_NUM) ||
5288 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5289 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5290 (ecx_0 & INTEL_PT_IP_LIP)) {
5291 /*
5292 * Processor Trace capabilities aren't configurable, so if the
5293 * host can't emulate the capabilities we report on
5294 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5295 */
5296 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5297 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5298 rv = 1;
5299 }
5300 }
5301
5302 return rv;
5303 }
5304
5305 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5306 {
5307 CPUState *cs = CPU(dev);
5308 X86CPU *cpu = X86_CPU(dev);
5309 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5310 CPUX86State *env = &cpu->env;
5311 Error *local_err = NULL;
5312 static bool ht_warned;
5313
5314 if (xcc->host_cpuid_required) {
5315 if (!accel_uses_host_cpuid()) {
5316 char *name = x86_cpu_class_get_model_name(xcc);
5317 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5318 g_free(name);
5319 goto out;
5320 }
5321
5322 if (enable_cpu_pm) {
5323 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5324 &cpu->mwait.ecx, &cpu->mwait.edx);
5325 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5326 }
5327 }
5328
5329 /* mwait extended info: needed for Core compatibility */
5330 /* We always wake on interrupt even if host does not have the capability */
5331 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5332
5333 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5334 error_setg(errp, "apic-id property was not initialized properly");
5335 return;
5336 }
5337
5338 x86_cpu_expand_features(cpu, &local_err);
5339 if (local_err) {
5340 goto out;
5341 }
5342
5343 if (x86_cpu_filter_features(cpu) &&
5344 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5345 x86_cpu_report_filtered_features(cpu);
5346 if (cpu->enforce_cpuid) {
5347 error_setg(&local_err,
5348 accel_uses_host_cpuid() ?
5349 "Host doesn't support requested features" :
5350 "TCG doesn't support requested features");
5351 goto out;
5352 }
5353 }
5354
5355 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5356 * CPUID[1].EDX.
5357 */
5358 if (IS_AMD_CPU(env)) {
5359 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5360 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5361 & CPUID_EXT2_AMD_ALIASES);
5362 }
5363
5364 /* For 64bit systems think about the number of physical bits to present.
5365 * ideally this should be the same as the host; anything other than matching
5366 * the host can cause incorrect guest behaviour.
5367 * QEMU used to pick the magic value of 40 bits that corresponds to
5368 * consumer AMD devices but nothing else.
5369 */
5370 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5371 if (accel_uses_host_cpuid()) {
5372 uint32_t host_phys_bits = x86_host_phys_bits();
5373 static bool warned;
5374
5375 /* Print a warning if the user set it to a value that's not the
5376 * host value.
5377 */
5378 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5379 !warned) {
5380 warn_report("Host physical bits (%u)"
5381 " does not match phys-bits property (%u)",
5382 host_phys_bits, cpu->phys_bits);
5383 warned = true;
5384 }
5385
5386 if (cpu->host_phys_bits) {
5387 /* The user asked for us to use the host physical bits */
5388 cpu->phys_bits = host_phys_bits;
5389 if (cpu->host_phys_bits_limit &&
5390 cpu->phys_bits > cpu->host_phys_bits_limit) {
5391 cpu->phys_bits = cpu->host_phys_bits_limit;
5392 }
5393 }
5394
5395 if (cpu->phys_bits &&
5396 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5397 cpu->phys_bits < 32)) {
5398 error_setg(errp, "phys-bits should be between 32 and %u "
5399 " (but is %u)",
5400 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5401 return;
5402 }
5403 } else {
5404 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5405 error_setg(errp, "TCG only supports phys-bits=%u",
5406 TCG_PHYS_ADDR_BITS);
5407 return;
5408 }
5409 }
5410 /* 0 means it was not explicitly set by the user (or by machine
5411 * compat_props or by the host code above). In this case, the default
5412 * is the value used by TCG (40).
5413 */
5414 if (cpu->phys_bits == 0) {
5415 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5416 }
5417 } else {
5418 /* For 32 bit systems don't use the user set value, but keep
5419 * phys_bits consistent with what we tell the guest.
5420 */
5421 if (cpu->phys_bits != 0) {
5422 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5423 return;
5424 }
5425
5426 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5427 cpu->phys_bits = 36;
5428 } else {
5429 cpu->phys_bits = 32;
5430 }
5431 }
5432
5433 /* Cache information initialization */
5434 if (!cpu->legacy_cache) {
5435 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5436 char *name = x86_cpu_class_get_model_name(xcc);
5437 error_setg(errp,
5438 "CPU model '%s' doesn't support legacy-cache=off", name);
5439 g_free(name);
5440 return;
5441 }
5442 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5443 *xcc->cpu_def->cache_info;
5444 } else {
5445 /* Build legacy cache information */
5446 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5447 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5448 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5449 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5450
5451 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5452 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5453 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5454 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5455
5456 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5457 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5458 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5459 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5460 }
5461
5462
5463 cpu_exec_realizefn(cs, &local_err);
5464 if (local_err != NULL) {
5465 error_propagate(errp, local_err);
5466 return;
5467 }
5468
5469 #ifndef CONFIG_USER_ONLY
5470 MachineState *ms = MACHINE(qdev_get_machine());
5471 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5472
5473 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
5474 x86_cpu_apic_create(cpu, &local_err);
5475 if (local_err != NULL) {
5476 goto out;
5477 }
5478 }
5479 #endif
5480
5481 mce_init(cpu);
5482
5483 #ifndef CONFIG_USER_ONLY
5484 if (tcg_enabled()) {
5485 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5486 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5487
5488 /* Outer container... */
5489 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5490 memory_region_set_enabled(cpu->cpu_as_root, true);
5491
5492 /* ... with two regions inside: normal system memory with low
5493 * priority, and...
5494 */
5495 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5496 get_system_memory(), 0, ~0ull);
5497 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5498 memory_region_set_enabled(cpu->cpu_as_mem, true);
5499
5500 cs->num_ases = 2;
5501 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5502 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5503
5504 /* ... SMRAM with higher priority, linked from /machine/smram. */
5505 cpu->machine_done.notify = x86_cpu_machine_done;
5506 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5507 }
5508 #endif
5509
5510 qemu_init_vcpu(cs);
5511
5512 /*
5513 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5514 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5515 * based on inputs (sockets,cores,threads), it is still better to give
5516 * users a warning.
5517 *
5518 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5519 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5520 */
5521 if (IS_AMD_CPU(env) &&
5522 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5523 cs->nr_threads > 1 && !ht_warned) {
5524 warn_report("This family of AMD CPU doesn't support "
5525 "hyperthreading(%d)",
5526 cs->nr_threads);
5527 error_printf("Please configure -smp options properly"
5528 " or try enabling topoext feature.\n");
5529 ht_warned = true;
5530 }
5531
5532 x86_cpu_apic_realize(cpu, &local_err);
5533 if (local_err != NULL) {
5534 goto out;
5535 }
5536 cpu_reset(cs);
5537
5538 xcc->parent_realize(dev, &local_err);
5539
5540 out:
5541 if (local_err != NULL) {
5542 error_propagate(errp, local_err);
5543 return;
5544 }
5545 }
5546
5547 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5548 {
5549 X86CPU *cpu = X86_CPU(dev);
5550 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5551 Error *local_err = NULL;
5552
5553 #ifndef CONFIG_USER_ONLY
5554 cpu_remove_sync(CPU(dev));
5555 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5556 #endif
5557
5558 if (cpu->apic_state) {
5559 object_unparent(OBJECT(cpu->apic_state));
5560 cpu->apic_state = NULL;
5561 }
5562
5563 xcc->parent_unrealize(dev, &local_err);
5564 if (local_err != NULL) {
5565 error_propagate(errp, local_err);
5566 return;
5567 }
5568 }
5569
5570 typedef struct BitProperty {
5571 FeatureWord w;
5572 uint32_t mask;
5573 } BitProperty;
5574
5575 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5576 void *opaque, Error **errp)
5577 {
5578 X86CPU *cpu = X86_CPU(obj);
5579 BitProperty *fp = opaque;
5580 uint32_t f = cpu->env.features[fp->w];
5581 bool value = (f & fp->mask) == fp->mask;
5582 visit_type_bool(v, name, &value, errp);
5583 }
5584
5585 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5586 void *opaque, Error **errp)
5587 {
5588 DeviceState *dev = DEVICE(obj);
5589 X86CPU *cpu = X86_CPU(obj);
5590 BitProperty *fp = opaque;
5591 Error *local_err = NULL;
5592 bool value;
5593
5594 if (dev->realized) {
5595 qdev_prop_set_after_realize(dev, name, errp);
5596 return;
5597 }
5598
5599 visit_type_bool(v, name, &value, &local_err);
5600 if (local_err) {
5601 error_propagate(errp, local_err);
5602 return;
5603 }
5604
5605 if (value) {
5606 cpu->env.features[fp->w] |= fp->mask;
5607 } else {
5608 cpu->env.features[fp->w] &= ~fp->mask;
5609 }
5610 cpu->env.user_features[fp->w] |= fp->mask;
5611 }
5612
5613 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5614 void *opaque)
5615 {
5616 BitProperty *prop = opaque;
5617 g_free(prop);
5618 }
5619
5620 /* Register a boolean property to get/set a single bit in a uint32_t field.
5621 *
5622 * The same property name can be registered multiple times to make it affect
5623 * multiple bits in the same FeatureWord. In that case, the getter will return
5624 * true only if all bits are set.
5625 */
5626 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5627 const char *prop_name,
5628 FeatureWord w,
5629 int bitnr)
5630 {
5631 BitProperty *fp;
5632 ObjectProperty *op;
5633 uint32_t mask = (1UL << bitnr);
5634
5635 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5636 if (op) {
5637 fp = op->opaque;
5638 assert(fp->w == w);
5639 fp->mask |= mask;
5640 } else {
5641 fp = g_new0(BitProperty, 1);
5642 fp->w = w;
5643 fp->mask = mask;
5644 object_property_add(OBJECT(cpu), prop_name, "bool",
5645 x86_cpu_get_bit_prop,
5646 x86_cpu_set_bit_prop,
5647 x86_cpu_release_bit_prop, fp, &error_abort);
5648 }
5649 }
5650
5651 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5652 FeatureWord w,
5653 int bitnr)
5654 {
5655 FeatureWordInfo *fi = &feature_word_info[w];
5656 const char *name = fi->feat_names[bitnr];
5657
5658 if (!name) {
5659 return;
5660 }
5661
5662 /* Property names should use "-" instead of "_".
5663 * Old names containing underscores are registered as aliases
5664 * using object_property_add_alias()
5665 */
5666 assert(!strchr(name, '_'));
5667 /* aliases don't use "|" delimiters anymore, they are registered
5668 * manually using object_property_add_alias() */
5669 assert(!strchr(name, '|'));
5670 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5671 }
5672
5673 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5674 {
5675 X86CPU *cpu = X86_CPU(cs);
5676 CPUX86State *env = &cpu->env;
5677 GuestPanicInformation *panic_info = NULL;
5678
5679 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5680 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5681
5682 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5683
5684 assert(HV_CRASH_PARAMS >= 5);
5685 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5686 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5687 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5688 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5689 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5690 }
5691
5692 return panic_info;
5693 }
5694 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5695 const char *name, void *opaque,
5696 Error **errp)
5697 {
5698 CPUState *cs = CPU(obj);
5699 GuestPanicInformation *panic_info;
5700
5701 if (!cs->crash_occurred) {
5702 error_setg(errp, "No crash occured");
5703 return;
5704 }
5705
5706 panic_info = x86_cpu_get_crash_info(cs);
5707 if (panic_info == NULL) {
5708 error_setg(errp, "No crash information");
5709 return;
5710 }
5711
5712 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5713 errp);
5714 qapi_free_GuestPanicInformation(panic_info);
5715 }
5716
5717 static void x86_cpu_initfn(Object *obj)
5718 {
5719 X86CPU *cpu = X86_CPU(obj);
5720 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5721 CPUX86State *env = &cpu->env;
5722 FeatureWord w;
5723
5724 env->nr_dies = 1;
5725 cpu_set_cpustate_pointers(cpu);
5726
5727 object_property_add(obj, "family", "int",
5728 x86_cpuid_version_get_family,
5729 x86_cpuid_version_set_family, NULL, NULL, NULL);
5730 object_property_add(obj, "model", "int",
5731 x86_cpuid_version_get_model,
5732 x86_cpuid_version_set_model, NULL, NULL, NULL);
5733 object_property_add(obj, "stepping", "int",
5734 x86_cpuid_version_get_stepping,
5735 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5736 object_property_add_str(obj, "vendor",
5737 x86_cpuid_get_vendor,
5738 x86_cpuid_set_vendor, NULL);
5739 object_property_add_str(obj, "model-id",
5740 x86_cpuid_get_model_id,
5741 x86_cpuid_set_model_id, NULL);
5742 object_property_add(obj, "tsc-frequency", "int",
5743 x86_cpuid_get_tsc_freq,
5744 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5745 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5746 x86_cpu_get_feature_words,
5747 NULL, NULL, (void *)env->features, NULL);
5748 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5749 x86_cpu_get_feature_words,
5750 NULL, NULL, (void *)cpu->filtered_features, NULL);
5751 /*
5752 * The "unavailable-features" property has the same semantics as
5753 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
5754 * QMP command: they list the features that would have prevented the
5755 * CPU from running if the "enforce" flag was set.
5756 */
5757 object_property_add(obj, "unavailable-features", "strList",
5758 x86_cpu_get_unavailable_features,
5759 NULL, NULL, NULL, &error_abort);
5760
5761 object_property_add(obj, "crash-information", "GuestPanicInformation",
5762 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5763
5764 for (w = 0; w < FEATURE_WORDS; w++) {
5765 int bitnr;
5766
5767 for (bitnr = 0; bitnr < 32; bitnr++) {
5768 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5769 }
5770 }
5771
5772 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5773 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5774 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5775 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5776 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5777 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5778 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5779
5780 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5781 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5782 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5783 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5784 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5785 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5786 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5787 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5788 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5789 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5790 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5791 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5792 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5793 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5794 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5795 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5796 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5797 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5798 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5799 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5800 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5801
5802 if (xcc->cpu_def) {
5803 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5804 }
5805 }
5806
5807 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5808 {
5809 X86CPU *cpu = X86_CPU(cs);
5810
5811 return cpu->apic_id;
5812 }
5813
5814 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5815 {
5816 X86CPU *cpu = X86_CPU(cs);
5817
5818 return cpu->env.cr[0] & CR0_PG_MASK;
5819 }
5820
5821 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5822 {
5823 X86CPU *cpu = X86_CPU(cs);
5824
5825 cpu->env.eip = value;
5826 }
5827
5828 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5829 {
5830 X86CPU *cpu = X86_CPU(cs);
5831
5832 cpu->env.eip = tb->pc - tb->cs_base;
5833 }
5834
5835 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5836 {
5837 X86CPU *cpu = X86_CPU(cs);
5838 CPUX86State *env = &cpu->env;
5839
5840 #if !defined(CONFIG_USER_ONLY)
5841 if (interrupt_request & CPU_INTERRUPT_POLL) {
5842 return CPU_INTERRUPT_POLL;
5843 }
5844 #endif
5845 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5846 return CPU_INTERRUPT_SIPI;
5847 }
5848
5849 if (env->hflags2 & HF2_GIF_MASK) {
5850 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5851 !(env->hflags & HF_SMM_MASK)) {
5852 return CPU_INTERRUPT_SMI;
5853 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5854 !(env->hflags2 & HF2_NMI_MASK)) {
5855 return CPU_INTERRUPT_NMI;
5856 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5857 return CPU_INTERRUPT_MCE;
5858 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5859 (((env->hflags2 & HF2_VINTR_MASK) &&
5860 (env->hflags2 & HF2_HIF_MASK)) ||
5861 (!(env->hflags2 & HF2_VINTR_MASK) &&
5862 (env->eflags & IF_MASK &&
5863 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5864 return CPU_INTERRUPT_HARD;
5865 #if !defined(CONFIG_USER_ONLY)
5866 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5867 (env->eflags & IF_MASK) &&
5868 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5869 return CPU_INTERRUPT_VIRQ;
5870 #endif
5871 }
5872 }
5873
5874 return 0;
5875 }
5876
5877 static bool x86_cpu_has_work(CPUState *cs)
5878 {
5879 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5880 }
5881
5882 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5883 {
5884 X86CPU *cpu = X86_CPU(cs);
5885 CPUX86State *env = &cpu->env;
5886
5887 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5888 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5889 : bfd_mach_i386_i8086);
5890 info->print_insn = print_insn_i386;
5891
5892 info->cap_arch = CS_ARCH_X86;
5893 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5894 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5895 : CS_MODE_16);
5896 info->cap_insn_unit = 1;
5897 info->cap_insn_split = 8;
5898 }
5899
5900 void x86_update_hflags(CPUX86State *env)
5901 {
5902 uint32_t hflags;
5903 #define HFLAG_COPY_MASK \
5904 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5905 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5906 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5907 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5908
5909 hflags = env->hflags & HFLAG_COPY_MASK;
5910 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5911 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5912 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5913 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5914 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5915
5916 if (env->cr[4] & CR4_OSFXSR_MASK) {
5917 hflags |= HF_OSFXSR_MASK;
5918 }
5919
5920 if (env->efer & MSR_EFER_LMA) {
5921 hflags |= HF_LMA_MASK;
5922 }
5923
5924 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5925 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5926 } else {
5927 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5928 (DESC_B_SHIFT - HF_CS32_SHIFT);
5929 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5930 (DESC_B_SHIFT - HF_SS32_SHIFT);
5931 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5932 !(hflags & HF_CS32_MASK)) {
5933 hflags |= HF_ADDSEG_MASK;
5934 } else {
5935 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5936 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5937 }
5938 }
5939 env->hflags = hflags;
5940 }
5941
5942 static Property x86_cpu_properties[] = {
5943 #ifdef CONFIG_USER_ONLY
5944 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5945 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5946 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5947 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5948 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
5949 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5950 #else
5951 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5952 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5953 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5954 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
5955 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5956 #endif
5957 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5958 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5959
5960 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
5961 HYPERV_SPINLOCK_NEVER_RETRY),
5962 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
5963 HYPERV_FEAT_RELAXED, 0),
5964 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
5965 HYPERV_FEAT_VAPIC, 0),
5966 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
5967 HYPERV_FEAT_TIME, 0),
5968 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
5969 HYPERV_FEAT_CRASH, 0),
5970 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
5971 HYPERV_FEAT_RESET, 0),
5972 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
5973 HYPERV_FEAT_VPINDEX, 0),
5974 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
5975 HYPERV_FEAT_RUNTIME, 0),
5976 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
5977 HYPERV_FEAT_SYNIC, 0),
5978 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
5979 HYPERV_FEAT_STIMER, 0),
5980 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
5981 HYPERV_FEAT_FREQUENCIES, 0),
5982 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
5983 HYPERV_FEAT_REENLIGHTENMENT, 0),
5984 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
5985 HYPERV_FEAT_TLBFLUSH, 0),
5986 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
5987 HYPERV_FEAT_EVMCS, 0),
5988 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
5989 HYPERV_FEAT_IPI, 0),
5990 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
5991 HYPERV_FEAT_STIMER_DIRECT, 0),
5992 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
5993
5994 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5995 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5996 DEFINE_PROP_BOOL("x-force-features", X86CPU, force_features, false),
5997 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5998 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5999 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
6000 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
6001 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
6002 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
6003 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
6004 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
6005 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
6006 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
6007 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
6008 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
6009 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
6010 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
6011 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
6012 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
6013 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
6014 false),
6015 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
6016 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
6017 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
6018 true),
6019 /*
6020 * lecacy_cache defaults to true unless the CPU model provides its
6021 * own cache information (see x86_cpu_load_def()).
6022 */
6023 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
6024
6025 /*
6026 * From "Requirements for Implementing the Microsoft
6027 * Hypervisor Interface":
6028 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
6029 *
6030 * "Starting with Windows Server 2012 and Windows 8, if
6031 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
6032 * the hypervisor imposes no specific limit to the number of VPs.
6033 * In this case, Windows Server 2012 guest VMs may use more than
6034 * 64 VPs, up to the maximum supported number of processors applicable
6035 * to the specific Windows version being used."
6036 */
6037 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
6038 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
6039 false),
6040 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
6041 true),
6042 DEFINE_PROP_END_OF_LIST()
6043 };
6044
6045 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
6046 {
6047 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6048 CPUClass *cc = CPU_CLASS(oc);
6049 DeviceClass *dc = DEVICE_CLASS(oc);
6050
6051 device_class_set_parent_realize(dc, x86_cpu_realizefn,
6052 &xcc->parent_realize);
6053 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
6054 &xcc->parent_unrealize);
6055 dc->props = x86_cpu_properties;
6056
6057 xcc->parent_reset = cc->reset;
6058 cc->reset = x86_cpu_reset;
6059 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
6060
6061 cc->class_by_name = x86_cpu_class_by_name;
6062 cc->parse_features = x86_cpu_parse_featurestr;
6063 cc->has_work = x86_cpu_has_work;
6064 #ifdef CONFIG_TCG
6065 cc->do_interrupt = x86_cpu_do_interrupt;
6066 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
6067 #endif
6068 cc->dump_state = x86_cpu_dump_state;
6069 cc->get_crash_info = x86_cpu_get_crash_info;
6070 cc->set_pc = x86_cpu_set_pc;
6071 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
6072 cc->gdb_read_register = x86_cpu_gdb_read_register;
6073 cc->gdb_write_register = x86_cpu_gdb_write_register;
6074 cc->get_arch_id = x86_cpu_get_arch_id;
6075 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
6076 #ifndef CONFIG_USER_ONLY
6077 cc->asidx_from_attrs = x86_asidx_from_attrs;
6078 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
6079 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
6080 cc->write_elf64_note = x86_cpu_write_elf64_note;
6081 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
6082 cc->write_elf32_note = x86_cpu_write_elf32_note;
6083 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
6084 cc->vmsd = &vmstate_x86_cpu;
6085 #endif
6086 cc->gdb_arch_name = x86_gdb_arch_name;
6087 #ifdef TARGET_X86_64
6088 cc->gdb_core_xml_file = "i386-64bit.xml";
6089 cc->gdb_num_core_regs = 66;
6090 #else
6091 cc->gdb_core_xml_file = "i386-32bit.xml";
6092 cc->gdb_num_core_regs = 50;
6093 #endif
6094 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
6095 cc->debug_excp_handler = breakpoint_handler;
6096 #endif
6097 cc->cpu_exec_enter = x86_cpu_exec_enter;
6098 cc->cpu_exec_exit = x86_cpu_exec_exit;
6099 #ifdef CONFIG_TCG
6100 cc->tcg_initialize = tcg_x86_init;
6101 cc->tlb_fill = x86_cpu_tlb_fill;
6102 #endif
6103 cc->disas_set_info = x86_disas_set_info;
6104
6105 dc->user_creatable = true;
6106 }
6107
6108 static const TypeInfo x86_cpu_type_info = {
6109 .name = TYPE_X86_CPU,
6110 .parent = TYPE_CPU,
6111 .instance_size = sizeof(X86CPU),
6112 .instance_init = x86_cpu_initfn,
6113 .abstract = true,
6114 .class_size = sizeof(X86CPUClass),
6115 .class_init = x86_cpu_common_class_init,
6116 };
6117
6118
6119 /* "base" CPU model, used by query-cpu-model-expansion */
6120 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
6121 {
6122 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6123
6124 xcc->static_model = true;
6125 xcc->migration_safe = true;
6126 xcc->model_description = "base CPU model type with no features enabled";
6127 xcc->ordering = 8;
6128 }
6129
6130 static const TypeInfo x86_base_cpu_type_info = {
6131 .name = X86_CPU_TYPE_NAME("base"),
6132 .parent = TYPE_X86_CPU,
6133 .class_init = x86_cpu_base_class_init,
6134 };
6135
6136 static void x86_cpu_register_types(void)
6137 {
6138 int i;
6139
6140 type_register_static(&x86_cpu_type_info);
6141 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
6142 x86_register_cpudef_type(&builtin_x86_defs[i]);
6143 }
6144 type_register_static(&max_x86_cpu_type_info);
6145 type_register_static(&x86_base_cpu_type_info);
6146 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
6147 type_register_static(&host_x86_cpu_type_info);
6148 #endif
6149 }
6150
6151 type_init(x86_cpu_register_types)