]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
4b5cd493385979494edde46f0f6af51e00fda950
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/hvf.h"
30 #include "sysemu/cpus.h"
31 #include "kvm_i386.h"
32 #include "sev_i386.h"
33
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qemu/option.h"
37 #include "qemu/config-file.h"
38 #include "qapi/error.h"
39 #include "qapi/qapi-visit-machine.h"
40 #include "qapi/qapi-visit-run-state.h"
41 #include "qapi/qmp/qdict.h"
42 #include "qapi/qmp/qerror.h"
43 #include "qapi/visitor.h"
44 #include "qom/qom-qobject.h"
45 #include "sysemu/arch_init.h"
46 #include "qapi/qapi-commands-machine-target.h"
47
48 #include "standard-headers/asm-x86/kvm_para.h"
49
50 #include "sysemu/sysemu.h"
51 #include "sysemu/tcg.h"
52 #include "hw/qdev-properties.h"
53 #include "hw/i386/topology.h"
54 #ifndef CONFIG_USER_ONLY
55 #include "exec/address-spaces.h"
56 #include "hw/hw.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /*
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
348 */
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
357
358 /*
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
361 */
362 static int nodes_in_socket(int nr_cores)
363 {
364 int nodes;
365
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
367
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
370 }
371
372 /*
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
378 */
379 static int cores_in_core_complex(int nr_cores)
380 {
381 int nodes;
382
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
386 }
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
389
390 /*
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
393 */
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
395 }
396
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
401 {
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
405
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
408
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
415 }
416
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
425
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
428
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
432 }
433
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
438 /*
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
441 */
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
447 };
448
449 /*
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
456 */
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
459 {
460 int nodes, cores_in_ccx;
461
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
464
465 cores_in_ccx = cores_in_core_complex(nr_cores);
466
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
471 }
472
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
477 {
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
481
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
484 /*
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
498 */
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
504 }
505 /*
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
512 */
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
517 /*
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
529 */
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
534 }
535 *edx = 0;
536 }
537
538 /*
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
542 */
543
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
555 };
556
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
569 };
570
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
582 };
583
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
596 };
597
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
609 };
610
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
618 };
619
620
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
631 };
632
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
646 };
647
648 /* TLB definitions: */
649
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
654
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
659
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
664
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
669
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
672 /*
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
679 */
680 #define INTEL_PT_MINIMAL_EBX 0xf
681 /*
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
689 */
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
698
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
701 {
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
707 }
708 dst[CPUID_VENDOR_SZ] = '\0';
709 }
710
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
722
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
744
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
750
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_APM_FEATURES 0
774 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
775 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
776 /* missing:
777 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
778
779 typedef enum FeatureWordType {
780 CPUID_FEATURE_WORD,
781 MSR_FEATURE_WORD,
782 } FeatureWordType;
783
784 typedef struct FeatureWordInfo {
785 FeatureWordType type;
786 /* feature flags names are taken from "Intel Processor Identification and
787 * the CPUID Instruction" and AMD's "CPUID Specification".
788 * In cases of disagreement between feature naming conventions,
789 * aliases may be added.
790 */
791 const char *feat_names[32];
792 union {
793 /* If type==CPUID_FEATURE_WORD */
794 struct {
795 uint32_t eax; /* Input EAX for CPUID */
796 bool needs_ecx; /* CPUID instruction uses ECX as input */
797 uint32_t ecx; /* Input ECX value for CPUID */
798 int reg; /* output register (R_* constant) */
799 } cpuid;
800 /* If type==MSR_FEATURE_WORD */
801 struct {
802 uint32_t index;
803 struct { /*CPUID that enumerate this MSR*/
804 FeatureWord cpuid_class;
805 uint32_t cpuid_flag;
806 } cpuid_dep;
807 } msr;
808 };
809 uint32_t tcg_features; /* Feature flags supported by TCG */
810 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
811 uint32_t migratable_flags; /* Feature flags known to be migratable */
812 /* Features that shouldn't be auto-enabled by "-cpu host" */
813 uint32_t no_autoenable_flags;
814 } FeatureWordInfo;
815
816 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
817 [FEAT_1_EDX] = {
818 .type = CPUID_FEATURE_WORD,
819 .feat_names = {
820 "fpu", "vme", "de", "pse",
821 "tsc", "msr", "pae", "mce",
822 "cx8", "apic", NULL, "sep",
823 "mtrr", "pge", "mca", "cmov",
824 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
825 NULL, "ds" /* Intel dts */, "acpi", "mmx",
826 "fxsr", "sse", "sse2", "ss",
827 "ht" /* Intel htt */, "tm", "ia64", "pbe",
828 },
829 .cpuid = {.eax = 1, .reg = R_EDX, },
830 .tcg_features = TCG_FEATURES,
831 },
832 [FEAT_1_ECX] = {
833 .type = CPUID_FEATURE_WORD,
834 .feat_names = {
835 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
836 "ds-cpl", "vmx", "smx", "est",
837 "tm2", "ssse3", "cid", NULL,
838 "fma", "cx16", "xtpr", "pdcm",
839 NULL, "pcid", "dca", "sse4.1",
840 "sse4.2", "x2apic", "movbe", "popcnt",
841 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
842 "avx", "f16c", "rdrand", "hypervisor",
843 },
844 .cpuid = { .eax = 1, .reg = R_ECX, },
845 .tcg_features = TCG_EXT_FEATURES,
846 },
847 /* Feature names that are already defined on feature_name[] but
848 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
849 * names on feat_names below. They are copied automatically
850 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
851 */
852 [FEAT_8000_0001_EDX] = {
853 .type = CPUID_FEATURE_WORD,
854 .feat_names = {
855 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
856 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
857 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
858 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
859 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
860 "nx", NULL, "mmxext", NULL /* mmx */,
861 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
862 NULL, "lm", "3dnowext", "3dnow",
863 },
864 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
865 .tcg_features = TCG_EXT2_FEATURES,
866 },
867 [FEAT_8000_0001_ECX] = {
868 .type = CPUID_FEATURE_WORD,
869 .feat_names = {
870 "lahf-lm", "cmp-legacy", "svm", "extapic",
871 "cr8legacy", "abm", "sse4a", "misalignsse",
872 "3dnowprefetch", "osvw", "ibs", "xop",
873 "skinit", "wdt", NULL, "lwp",
874 "fma4", "tce", NULL, "nodeid-msr",
875 NULL, "tbm", "topoext", "perfctr-core",
876 "perfctr-nb", NULL, NULL, NULL,
877 NULL, NULL, NULL, NULL,
878 },
879 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
880 .tcg_features = TCG_EXT3_FEATURES,
881 /*
882 * TOPOEXT is always allowed but can't be enabled blindly by
883 * "-cpu host", as it requires consistent cache topology info
884 * to be provided so it doesn't confuse guests.
885 */
886 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
887 },
888 [FEAT_C000_0001_EDX] = {
889 .type = CPUID_FEATURE_WORD,
890 .feat_names = {
891 NULL, NULL, "xstore", "xstore-en",
892 NULL, NULL, "xcrypt", "xcrypt-en",
893 "ace2", "ace2-en", "phe", "phe-en",
894 "pmm", "pmm-en", NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL,
898 NULL, NULL, NULL, NULL,
899 },
900 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
901 .tcg_features = TCG_EXT4_FEATURES,
902 },
903 [FEAT_KVM] = {
904 .type = CPUID_FEATURE_WORD,
905 .feat_names = {
906 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
907 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
908 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
909 NULL, NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 "kvmclock-stable-bit", NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
914 },
915 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
916 .tcg_features = TCG_KVM_FEATURES,
917 },
918 [FEAT_KVM_HINTS] = {
919 .type = CPUID_FEATURE_WORD,
920 .feat_names = {
921 "kvm-hint-dedicated", NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 },
930 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
931 .tcg_features = TCG_KVM_FEATURES,
932 /*
933 * KVM hints aren't auto-enabled by -cpu host, they need to be
934 * explicitly enabled in the command-line.
935 */
936 .no_autoenable_flags = ~0U,
937 },
938 /*
939 * .feat_names are commented out for Hyper-V enlightenments because we
940 * don't want to have two different ways for enabling them on QEMU command
941 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
942 * enabling several feature bits simultaneously, exposing these bits
943 * individually may just confuse guests.
944 */
945 [FEAT_HYPERV_EAX] = {
946 .type = CPUID_FEATURE_WORD,
947 .feat_names = {
948 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
949 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
950 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
951 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
952 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
953 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
954 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
955 NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 NULL, NULL, NULL, NULL,
960 },
961 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
962 },
963 [FEAT_HYPERV_EBX] = {
964 .type = CPUID_FEATURE_WORD,
965 .feat_names = {
966 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
967 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
968 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
969 NULL /* hv_create_port */, NULL /* hv_connect_port */,
970 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
971 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
972 NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 NULL, NULL, NULL, NULL,
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
977 },
978 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
979 },
980 [FEAT_HYPERV_EDX] = {
981 .type = CPUID_FEATURE_WORD,
982 .feat_names = {
983 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
984 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
985 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
986 NULL, NULL,
987 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL, NULL,
991 NULL, NULL, NULL, NULL,
992 NULL, NULL, NULL, NULL,
993 },
994 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
995 },
996 [FEAT_HV_RECOMM_EAX] = {
997 .type = CPUID_FEATURE_WORD,
998 .feat_names = {
999 NULL /* hv_recommend_pv_as_switch */,
1000 NULL /* hv_recommend_pv_tlbflush_local */,
1001 NULL /* hv_recommend_pv_tlbflush_remote */,
1002 NULL /* hv_recommend_msr_apic_access */,
1003 NULL /* hv_recommend_msr_reset */,
1004 NULL /* hv_recommend_relaxed_timing */,
1005 NULL /* hv_recommend_dma_remapping */,
1006 NULL /* hv_recommend_int_remapping */,
1007 NULL /* hv_recommend_x2apic_msrs */,
1008 NULL /* hv_recommend_autoeoi_deprecation */,
1009 NULL /* hv_recommend_pv_ipi */,
1010 NULL /* hv_recommend_ex_hypercalls */,
1011 NULL /* hv_hypervisor_is_nested */,
1012 NULL /* hv_recommend_int_mbec */,
1013 NULL /* hv_recommend_evmcs */,
1014 NULL,
1015 NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1019 },
1020 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1021 },
1022 [FEAT_HV_NESTED_EAX] = {
1023 .type = CPUID_FEATURE_WORD,
1024 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1025 },
1026 [FEAT_SVM] = {
1027 .type = CPUID_FEATURE_WORD,
1028 .feat_names = {
1029 "npt", "lbrv", "svm-lock", "nrip-save",
1030 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1031 NULL, NULL, "pause-filter", NULL,
1032 "pfthreshold", NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 NULL, NULL, NULL, NULL,
1036 NULL, NULL, NULL, NULL,
1037 },
1038 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1039 .tcg_features = TCG_SVM_FEATURES,
1040 },
1041 [FEAT_7_0_EBX] = {
1042 .type = CPUID_FEATURE_WORD,
1043 .feat_names = {
1044 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1045 "hle", "avx2", NULL, "smep",
1046 "bmi2", "erms", "invpcid", "rtm",
1047 NULL, NULL, "mpx", NULL,
1048 "avx512f", "avx512dq", "rdseed", "adx",
1049 "smap", "avx512ifma", "pcommit", "clflushopt",
1050 "clwb", "intel-pt", "avx512pf", "avx512er",
1051 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1052 },
1053 .cpuid = {
1054 .eax = 7,
1055 .needs_ecx = true, .ecx = 0,
1056 .reg = R_EBX,
1057 },
1058 .tcg_features = TCG_7_0_EBX_FEATURES,
1059 },
1060 [FEAT_7_0_ECX] = {
1061 .type = CPUID_FEATURE_WORD,
1062 .feat_names = {
1063 NULL, "avx512vbmi", "umip", "pku",
1064 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1065 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1066 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1067 "la57", NULL, NULL, NULL,
1068 NULL, NULL, "rdpid", NULL,
1069 NULL, "cldemote", NULL, "movdiri",
1070 "movdir64b", NULL, NULL, NULL,
1071 },
1072 .cpuid = {
1073 .eax = 7,
1074 .needs_ecx = true, .ecx = 0,
1075 .reg = R_ECX,
1076 },
1077 .tcg_features = TCG_7_0_ECX_FEATURES,
1078 },
1079 [FEAT_7_0_EDX] = {
1080 .type = CPUID_FEATURE_WORD,
1081 .feat_names = {
1082 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1083 NULL, NULL, NULL, NULL,
1084 NULL, NULL, "md-clear", NULL,
1085 NULL, NULL, NULL, NULL,
1086 NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL,
1088 NULL, NULL, "spec-ctrl", "stibp",
1089 NULL, "arch-capabilities", "core-capability", "ssbd",
1090 },
1091 .cpuid = {
1092 .eax = 7,
1093 .needs_ecx = true, .ecx = 0,
1094 .reg = R_EDX,
1095 },
1096 .tcg_features = TCG_7_0_EDX_FEATURES,
1097 },
1098 [FEAT_8000_0007_EDX] = {
1099 .type = CPUID_FEATURE_WORD,
1100 .feat_names = {
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 "invtsc", NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL,
1108 NULL, NULL, NULL, NULL,
1109 },
1110 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1111 .tcg_features = TCG_APM_FEATURES,
1112 .unmigratable_flags = CPUID_APM_INVTSC,
1113 },
1114 [FEAT_8000_0008_EBX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 NULL, "wbnoinvd", NULL, NULL,
1120 "ibpb", NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1124 NULL, NULL, NULL, NULL,
1125 },
1126 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1127 .tcg_features = 0,
1128 .unmigratable_flags = 0,
1129 },
1130 [FEAT_XSAVE] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 NULL, NULL, NULL, NULL,
1140 NULL, NULL, NULL, NULL,
1141 },
1142 .cpuid = {
1143 .eax = 0xd,
1144 .needs_ecx = true, .ecx = 1,
1145 .reg = R_EAX,
1146 },
1147 .tcg_features = TCG_XSAVE_FEATURES,
1148 },
1149 [FEAT_6_EAX] = {
1150 .type = CPUID_FEATURE_WORD,
1151 .feat_names = {
1152 NULL, NULL, "arat", NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 NULL, NULL, NULL, NULL,
1158 NULL, NULL, NULL, NULL,
1159 NULL, NULL, NULL, NULL,
1160 },
1161 .cpuid = { .eax = 6, .reg = R_EAX, },
1162 .tcg_features = TCG_6_EAX_FEATURES,
1163 },
1164 [FEAT_XSAVE_COMP_LO] = {
1165 .type = CPUID_FEATURE_WORD,
1166 .cpuid = {
1167 .eax = 0xD,
1168 .needs_ecx = true, .ecx = 0,
1169 .reg = R_EAX,
1170 },
1171 .tcg_features = ~0U,
1172 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1173 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1174 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1175 XSTATE_PKRU_MASK,
1176 },
1177 [FEAT_XSAVE_COMP_HI] = {
1178 .type = CPUID_FEATURE_WORD,
1179 .cpuid = {
1180 .eax = 0xD,
1181 .needs_ecx = true, .ecx = 0,
1182 .reg = R_EDX,
1183 },
1184 .tcg_features = ~0U,
1185 },
1186 /*Below are MSR exposed features*/
1187 [FEAT_ARCH_CAPABILITIES] = {
1188 .type = MSR_FEATURE_WORD,
1189 .feat_names = {
1190 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1191 "ssb-no", "mds-no", NULL, NULL,
1192 NULL, NULL, NULL, NULL,
1193 NULL, NULL, NULL, NULL,
1194 NULL, NULL, NULL, NULL,
1195 NULL, NULL, NULL, NULL,
1196 NULL, NULL, NULL, NULL,
1197 NULL, NULL, NULL, NULL,
1198 },
1199 .msr = {
1200 .index = MSR_IA32_ARCH_CAPABILITIES,
1201 .cpuid_dep = {
1202 FEAT_7_0_EDX,
1203 CPUID_7_0_EDX_ARCH_CAPABILITIES
1204 }
1205 },
1206 },
1207 [FEAT_CORE_CAPABILITY] = {
1208 .type = MSR_FEATURE_WORD,
1209 .feat_names = {
1210 NULL, NULL, NULL, NULL,
1211 NULL, "split-lock-detect", NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1214 NULL, NULL, NULL, NULL,
1215 NULL, NULL, NULL, NULL,
1216 NULL, NULL, NULL, NULL,
1217 NULL, NULL, NULL, NULL,
1218 },
1219 .msr = {
1220 .index = MSR_IA32_CORE_CAPABILITY,
1221 .cpuid_dep = {
1222 FEAT_7_0_EDX,
1223 CPUID_7_0_EDX_CORE_CAPABILITY,
1224 },
1225 },
1226 },
1227 };
1228
1229 typedef struct X86RegisterInfo32 {
1230 /* Name of register */
1231 const char *name;
1232 /* QAPI enum value register */
1233 X86CPURegister32 qapi_enum;
1234 } X86RegisterInfo32;
1235
1236 #define REGISTER(reg) \
1237 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1238 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1239 REGISTER(EAX),
1240 REGISTER(ECX),
1241 REGISTER(EDX),
1242 REGISTER(EBX),
1243 REGISTER(ESP),
1244 REGISTER(EBP),
1245 REGISTER(ESI),
1246 REGISTER(EDI),
1247 };
1248 #undef REGISTER
1249
1250 typedef struct ExtSaveArea {
1251 uint32_t feature, bits;
1252 uint32_t offset, size;
1253 } ExtSaveArea;
1254
1255 static const ExtSaveArea x86_ext_save_areas[] = {
1256 [XSTATE_FP_BIT] = {
1257 /* x87 FP state component is always enabled if XSAVE is supported */
1258 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1259 /* x87 state is in the legacy region of the XSAVE area */
1260 .offset = 0,
1261 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1262 },
1263 [XSTATE_SSE_BIT] = {
1264 /* SSE state component is always enabled if XSAVE is supported */
1265 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1266 /* SSE state is in the legacy region of the XSAVE area */
1267 .offset = 0,
1268 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1269 },
1270 [XSTATE_YMM_BIT] =
1271 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1272 .offset = offsetof(X86XSaveArea, avx_state),
1273 .size = sizeof(XSaveAVX) },
1274 [XSTATE_BNDREGS_BIT] =
1275 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1276 .offset = offsetof(X86XSaveArea, bndreg_state),
1277 .size = sizeof(XSaveBNDREG) },
1278 [XSTATE_BNDCSR_BIT] =
1279 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1280 .offset = offsetof(X86XSaveArea, bndcsr_state),
1281 .size = sizeof(XSaveBNDCSR) },
1282 [XSTATE_OPMASK_BIT] =
1283 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1284 .offset = offsetof(X86XSaveArea, opmask_state),
1285 .size = sizeof(XSaveOpmask) },
1286 [XSTATE_ZMM_Hi256_BIT] =
1287 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1288 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1289 .size = sizeof(XSaveZMM_Hi256) },
1290 [XSTATE_Hi16_ZMM_BIT] =
1291 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1292 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1293 .size = sizeof(XSaveHi16_ZMM) },
1294 [XSTATE_PKRU_BIT] =
1295 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1296 .offset = offsetof(X86XSaveArea, pkru_state),
1297 .size = sizeof(XSavePKRU) },
1298 };
1299
1300 static uint32_t xsave_area_size(uint64_t mask)
1301 {
1302 int i;
1303 uint64_t ret = 0;
1304
1305 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1306 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1307 if ((mask >> i) & 1) {
1308 ret = MAX(ret, esa->offset + esa->size);
1309 }
1310 }
1311 return ret;
1312 }
1313
1314 static inline bool accel_uses_host_cpuid(void)
1315 {
1316 return kvm_enabled() || hvf_enabled();
1317 }
1318
1319 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1320 {
1321 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1322 cpu->env.features[FEAT_XSAVE_COMP_LO];
1323 }
1324
1325 const char *get_register_name_32(unsigned int reg)
1326 {
1327 if (reg >= CPU_NB_REGS32) {
1328 return NULL;
1329 }
1330 return x86_reg_info_32[reg].name;
1331 }
1332
1333 /*
1334 * Returns the set of feature flags that are supported and migratable by
1335 * QEMU, for a given FeatureWord.
1336 */
1337 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1338 {
1339 FeatureWordInfo *wi = &feature_word_info[w];
1340 uint32_t r = 0;
1341 int i;
1342
1343 for (i = 0; i < 32; i++) {
1344 uint32_t f = 1U << i;
1345
1346 /* If the feature name is known, it is implicitly considered migratable,
1347 * unless it is explicitly set in unmigratable_flags */
1348 if ((wi->migratable_flags & f) ||
1349 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1350 r |= f;
1351 }
1352 }
1353 return r;
1354 }
1355
1356 void host_cpuid(uint32_t function, uint32_t count,
1357 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1358 {
1359 uint32_t vec[4];
1360
1361 #ifdef __x86_64__
1362 asm volatile("cpuid"
1363 : "=a"(vec[0]), "=b"(vec[1]),
1364 "=c"(vec[2]), "=d"(vec[3])
1365 : "0"(function), "c"(count) : "cc");
1366 #elif defined(__i386__)
1367 asm volatile("pusha \n\t"
1368 "cpuid \n\t"
1369 "mov %%eax, 0(%2) \n\t"
1370 "mov %%ebx, 4(%2) \n\t"
1371 "mov %%ecx, 8(%2) \n\t"
1372 "mov %%edx, 12(%2) \n\t"
1373 "popa"
1374 : : "a"(function), "c"(count), "S"(vec)
1375 : "memory", "cc");
1376 #else
1377 abort();
1378 #endif
1379
1380 if (eax)
1381 *eax = vec[0];
1382 if (ebx)
1383 *ebx = vec[1];
1384 if (ecx)
1385 *ecx = vec[2];
1386 if (edx)
1387 *edx = vec[3];
1388 }
1389
1390 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1391 {
1392 uint32_t eax, ebx, ecx, edx;
1393
1394 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1395 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1396
1397 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1398 if (family) {
1399 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1400 }
1401 if (model) {
1402 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1403 }
1404 if (stepping) {
1405 *stepping = eax & 0x0F;
1406 }
1407 }
1408
1409 /* CPU class name definitions: */
1410
1411 /* Return type name for a given CPU model name
1412 * Caller is responsible for freeing the returned string.
1413 */
1414 static char *x86_cpu_type_name(const char *model_name)
1415 {
1416 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1417 }
1418
1419 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1420 {
1421 ObjectClass *oc;
1422 char *typename = x86_cpu_type_name(cpu_model);
1423 oc = object_class_by_name(typename);
1424 g_free(typename);
1425 return oc;
1426 }
1427
1428 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1429 {
1430 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1431 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1432 return g_strndup(class_name,
1433 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1434 }
1435
1436 struct X86CPUDefinition {
1437 const char *name;
1438 uint32_t level;
1439 uint32_t xlevel;
1440 /* vendor is zero-terminated, 12 character ASCII string */
1441 char vendor[CPUID_VENDOR_SZ + 1];
1442 int family;
1443 int model;
1444 int stepping;
1445 FeatureWordArray features;
1446 const char *model_id;
1447 CPUCaches *cache_info;
1448 };
1449
1450 static CPUCaches epyc_cache_info = {
1451 .l1d_cache = &(CPUCacheInfo) {
1452 .type = DATA_CACHE,
1453 .level = 1,
1454 .size = 32 * KiB,
1455 .line_size = 64,
1456 .associativity = 8,
1457 .partitions = 1,
1458 .sets = 64,
1459 .lines_per_tag = 1,
1460 .self_init = 1,
1461 .no_invd_sharing = true,
1462 },
1463 .l1i_cache = &(CPUCacheInfo) {
1464 .type = INSTRUCTION_CACHE,
1465 .level = 1,
1466 .size = 64 * KiB,
1467 .line_size = 64,
1468 .associativity = 4,
1469 .partitions = 1,
1470 .sets = 256,
1471 .lines_per_tag = 1,
1472 .self_init = 1,
1473 .no_invd_sharing = true,
1474 },
1475 .l2_cache = &(CPUCacheInfo) {
1476 .type = UNIFIED_CACHE,
1477 .level = 2,
1478 .size = 512 * KiB,
1479 .line_size = 64,
1480 .associativity = 8,
1481 .partitions = 1,
1482 .sets = 1024,
1483 .lines_per_tag = 1,
1484 },
1485 .l3_cache = &(CPUCacheInfo) {
1486 .type = UNIFIED_CACHE,
1487 .level = 3,
1488 .size = 8 * MiB,
1489 .line_size = 64,
1490 .associativity = 16,
1491 .partitions = 1,
1492 .sets = 8192,
1493 .lines_per_tag = 1,
1494 .self_init = true,
1495 .inclusive = true,
1496 .complex_indexing = true,
1497 },
1498 };
1499
1500 static X86CPUDefinition builtin_x86_defs[] = {
1501 {
1502 .name = "qemu64",
1503 .level = 0xd,
1504 .vendor = CPUID_VENDOR_AMD,
1505 .family = 6,
1506 .model = 6,
1507 .stepping = 3,
1508 .features[FEAT_1_EDX] =
1509 PPRO_FEATURES |
1510 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1511 CPUID_PSE36,
1512 .features[FEAT_1_ECX] =
1513 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1514 .features[FEAT_8000_0001_EDX] =
1515 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1516 .features[FEAT_8000_0001_ECX] =
1517 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1518 .xlevel = 0x8000000A,
1519 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1520 },
1521 {
1522 .name = "phenom",
1523 .level = 5,
1524 .vendor = CPUID_VENDOR_AMD,
1525 .family = 16,
1526 .model = 2,
1527 .stepping = 3,
1528 /* Missing: CPUID_HT */
1529 .features[FEAT_1_EDX] =
1530 PPRO_FEATURES |
1531 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1532 CPUID_PSE36 | CPUID_VME,
1533 .features[FEAT_1_ECX] =
1534 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1535 CPUID_EXT_POPCNT,
1536 .features[FEAT_8000_0001_EDX] =
1537 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1538 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1539 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1540 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1541 CPUID_EXT3_CR8LEG,
1542 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1543 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1544 .features[FEAT_8000_0001_ECX] =
1545 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1546 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1547 /* Missing: CPUID_SVM_LBRV */
1548 .features[FEAT_SVM] =
1549 CPUID_SVM_NPT,
1550 .xlevel = 0x8000001A,
1551 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1552 },
1553 {
1554 .name = "core2duo",
1555 .level = 10,
1556 .vendor = CPUID_VENDOR_INTEL,
1557 .family = 6,
1558 .model = 15,
1559 .stepping = 11,
1560 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1561 .features[FEAT_1_EDX] =
1562 PPRO_FEATURES |
1563 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1564 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1565 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1566 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1567 .features[FEAT_1_ECX] =
1568 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1569 CPUID_EXT_CX16,
1570 .features[FEAT_8000_0001_EDX] =
1571 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1572 .features[FEAT_8000_0001_ECX] =
1573 CPUID_EXT3_LAHF_LM,
1574 .xlevel = 0x80000008,
1575 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1576 },
1577 {
1578 .name = "kvm64",
1579 .level = 0xd,
1580 .vendor = CPUID_VENDOR_INTEL,
1581 .family = 15,
1582 .model = 6,
1583 .stepping = 1,
1584 /* Missing: CPUID_HT */
1585 .features[FEAT_1_EDX] =
1586 PPRO_FEATURES | CPUID_VME |
1587 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1588 CPUID_PSE36,
1589 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1590 .features[FEAT_1_ECX] =
1591 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1592 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1593 .features[FEAT_8000_0001_EDX] =
1594 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1595 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1596 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1597 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1598 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1599 .features[FEAT_8000_0001_ECX] =
1600 0,
1601 .xlevel = 0x80000008,
1602 .model_id = "Common KVM processor"
1603 },
1604 {
1605 .name = "qemu32",
1606 .level = 4,
1607 .vendor = CPUID_VENDOR_INTEL,
1608 .family = 6,
1609 .model = 6,
1610 .stepping = 3,
1611 .features[FEAT_1_EDX] =
1612 PPRO_FEATURES,
1613 .features[FEAT_1_ECX] =
1614 CPUID_EXT_SSE3,
1615 .xlevel = 0x80000004,
1616 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1617 },
1618 {
1619 .name = "kvm32",
1620 .level = 5,
1621 .vendor = CPUID_VENDOR_INTEL,
1622 .family = 15,
1623 .model = 6,
1624 .stepping = 1,
1625 .features[FEAT_1_EDX] =
1626 PPRO_FEATURES | CPUID_VME |
1627 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1628 .features[FEAT_1_ECX] =
1629 CPUID_EXT_SSE3,
1630 .features[FEAT_8000_0001_ECX] =
1631 0,
1632 .xlevel = 0x80000008,
1633 .model_id = "Common 32-bit KVM processor"
1634 },
1635 {
1636 .name = "coreduo",
1637 .level = 10,
1638 .vendor = CPUID_VENDOR_INTEL,
1639 .family = 6,
1640 .model = 14,
1641 .stepping = 8,
1642 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1643 .features[FEAT_1_EDX] =
1644 PPRO_FEATURES | CPUID_VME |
1645 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1646 CPUID_SS,
1647 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1648 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1649 .features[FEAT_1_ECX] =
1650 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1651 .features[FEAT_8000_0001_EDX] =
1652 CPUID_EXT2_NX,
1653 .xlevel = 0x80000008,
1654 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1655 },
1656 {
1657 .name = "486",
1658 .level = 1,
1659 .vendor = CPUID_VENDOR_INTEL,
1660 .family = 4,
1661 .model = 8,
1662 .stepping = 0,
1663 .features[FEAT_1_EDX] =
1664 I486_FEATURES,
1665 .xlevel = 0,
1666 .model_id = "",
1667 },
1668 {
1669 .name = "pentium",
1670 .level = 1,
1671 .vendor = CPUID_VENDOR_INTEL,
1672 .family = 5,
1673 .model = 4,
1674 .stepping = 3,
1675 .features[FEAT_1_EDX] =
1676 PENTIUM_FEATURES,
1677 .xlevel = 0,
1678 .model_id = "",
1679 },
1680 {
1681 .name = "pentium2",
1682 .level = 2,
1683 .vendor = CPUID_VENDOR_INTEL,
1684 .family = 6,
1685 .model = 5,
1686 .stepping = 2,
1687 .features[FEAT_1_EDX] =
1688 PENTIUM2_FEATURES,
1689 .xlevel = 0,
1690 .model_id = "",
1691 },
1692 {
1693 .name = "pentium3",
1694 .level = 3,
1695 .vendor = CPUID_VENDOR_INTEL,
1696 .family = 6,
1697 .model = 7,
1698 .stepping = 3,
1699 .features[FEAT_1_EDX] =
1700 PENTIUM3_FEATURES,
1701 .xlevel = 0,
1702 .model_id = "",
1703 },
1704 {
1705 .name = "athlon",
1706 .level = 2,
1707 .vendor = CPUID_VENDOR_AMD,
1708 .family = 6,
1709 .model = 2,
1710 .stepping = 3,
1711 .features[FEAT_1_EDX] =
1712 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1713 CPUID_MCA,
1714 .features[FEAT_8000_0001_EDX] =
1715 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1716 .xlevel = 0x80000008,
1717 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1718 },
1719 {
1720 .name = "n270",
1721 .level = 10,
1722 .vendor = CPUID_VENDOR_INTEL,
1723 .family = 6,
1724 .model = 28,
1725 .stepping = 2,
1726 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1727 .features[FEAT_1_EDX] =
1728 PPRO_FEATURES |
1729 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1730 CPUID_ACPI | CPUID_SS,
1731 /* Some CPUs got no CPUID_SEP */
1732 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1733 * CPUID_EXT_XTPR */
1734 .features[FEAT_1_ECX] =
1735 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1736 CPUID_EXT_MOVBE,
1737 .features[FEAT_8000_0001_EDX] =
1738 CPUID_EXT2_NX,
1739 .features[FEAT_8000_0001_ECX] =
1740 CPUID_EXT3_LAHF_LM,
1741 .xlevel = 0x80000008,
1742 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1743 },
1744 {
1745 .name = "Conroe",
1746 .level = 10,
1747 .vendor = CPUID_VENDOR_INTEL,
1748 .family = 6,
1749 .model = 15,
1750 .stepping = 3,
1751 .features[FEAT_1_EDX] =
1752 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1753 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1754 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1755 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1756 CPUID_DE | CPUID_FP87,
1757 .features[FEAT_1_ECX] =
1758 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1759 .features[FEAT_8000_0001_EDX] =
1760 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1761 .features[FEAT_8000_0001_ECX] =
1762 CPUID_EXT3_LAHF_LM,
1763 .xlevel = 0x80000008,
1764 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1765 },
1766 {
1767 .name = "Penryn",
1768 .level = 10,
1769 .vendor = CPUID_VENDOR_INTEL,
1770 .family = 6,
1771 .model = 23,
1772 .stepping = 3,
1773 .features[FEAT_1_EDX] =
1774 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1775 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1776 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1777 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1778 CPUID_DE | CPUID_FP87,
1779 .features[FEAT_1_ECX] =
1780 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1781 CPUID_EXT_SSE3,
1782 .features[FEAT_8000_0001_EDX] =
1783 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1784 .features[FEAT_8000_0001_ECX] =
1785 CPUID_EXT3_LAHF_LM,
1786 .xlevel = 0x80000008,
1787 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1788 },
1789 {
1790 .name = "Nehalem",
1791 .level = 11,
1792 .vendor = CPUID_VENDOR_INTEL,
1793 .family = 6,
1794 .model = 26,
1795 .stepping = 3,
1796 .features[FEAT_1_EDX] =
1797 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1798 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1799 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1800 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1801 CPUID_DE | CPUID_FP87,
1802 .features[FEAT_1_ECX] =
1803 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1804 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1805 .features[FEAT_8000_0001_EDX] =
1806 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1807 .features[FEAT_8000_0001_ECX] =
1808 CPUID_EXT3_LAHF_LM,
1809 .xlevel = 0x80000008,
1810 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1811 },
1812 {
1813 .name = "Nehalem-IBRS",
1814 .level = 11,
1815 .vendor = CPUID_VENDOR_INTEL,
1816 .family = 6,
1817 .model = 26,
1818 .stepping = 3,
1819 .features[FEAT_1_EDX] =
1820 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1821 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1822 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1823 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1824 CPUID_DE | CPUID_FP87,
1825 .features[FEAT_1_ECX] =
1826 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1827 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1828 .features[FEAT_7_0_EDX] =
1829 CPUID_7_0_EDX_SPEC_CTRL,
1830 .features[FEAT_8000_0001_EDX] =
1831 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1832 .features[FEAT_8000_0001_ECX] =
1833 CPUID_EXT3_LAHF_LM,
1834 .xlevel = 0x80000008,
1835 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1836 },
1837 {
1838 .name = "Westmere",
1839 .level = 11,
1840 .vendor = CPUID_VENDOR_INTEL,
1841 .family = 6,
1842 .model = 44,
1843 .stepping = 1,
1844 .features[FEAT_1_EDX] =
1845 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1846 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1847 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1848 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1849 CPUID_DE | CPUID_FP87,
1850 .features[FEAT_1_ECX] =
1851 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1852 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1853 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1854 .features[FEAT_8000_0001_EDX] =
1855 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1856 .features[FEAT_8000_0001_ECX] =
1857 CPUID_EXT3_LAHF_LM,
1858 .features[FEAT_6_EAX] =
1859 CPUID_6_EAX_ARAT,
1860 .xlevel = 0x80000008,
1861 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1862 },
1863 {
1864 .name = "Westmere-IBRS",
1865 .level = 11,
1866 .vendor = CPUID_VENDOR_INTEL,
1867 .family = 6,
1868 .model = 44,
1869 .stepping = 1,
1870 .features[FEAT_1_EDX] =
1871 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1872 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1873 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1874 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1875 CPUID_DE | CPUID_FP87,
1876 .features[FEAT_1_ECX] =
1877 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1878 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1879 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1880 .features[FEAT_8000_0001_EDX] =
1881 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1882 .features[FEAT_8000_0001_ECX] =
1883 CPUID_EXT3_LAHF_LM,
1884 .features[FEAT_7_0_EDX] =
1885 CPUID_7_0_EDX_SPEC_CTRL,
1886 .features[FEAT_6_EAX] =
1887 CPUID_6_EAX_ARAT,
1888 .xlevel = 0x80000008,
1889 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1890 },
1891 {
1892 .name = "SandyBridge",
1893 .level = 0xd,
1894 .vendor = CPUID_VENDOR_INTEL,
1895 .family = 6,
1896 .model = 42,
1897 .stepping = 1,
1898 .features[FEAT_1_EDX] =
1899 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1903 CPUID_DE | CPUID_FP87,
1904 .features[FEAT_1_ECX] =
1905 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1906 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1907 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1908 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1909 CPUID_EXT_SSE3,
1910 .features[FEAT_8000_0001_EDX] =
1911 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1912 CPUID_EXT2_SYSCALL,
1913 .features[FEAT_8000_0001_ECX] =
1914 CPUID_EXT3_LAHF_LM,
1915 .features[FEAT_XSAVE] =
1916 CPUID_XSAVE_XSAVEOPT,
1917 .features[FEAT_6_EAX] =
1918 CPUID_6_EAX_ARAT,
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1921 },
1922 {
1923 .name = "SandyBridge-IBRS",
1924 .level = 0xd,
1925 .vendor = CPUID_VENDOR_INTEL,
1926 .family = 6,
1927 .model = 42,
1928 .stepping = 1,
1929 .features[FEAT_1_EDX] =
1930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1934 CPUID_DE | CPUID_FP87,
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1937 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1938 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1939 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1940 CPUID_EXT_SSE3,
1941 .features[FEAT_8000_0001_EDX] =
1942 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1943 CPUID_EXT2_SYSCALL,
1944 .features[FEAT_8000_0001_ECX] =
1945 CPUID_EXT3_LAHF_LM,
1946 .features[FEAT_7_0_EDX] =
1947 CPUID_7_0_EDX_SPEC_CTRL,
1948 .features[FEAT_XSAVE] =
1949 CPUID_XSAVE_XSAVEOPT,
1950 .features[FEAT_6_EAX] =
1951 CPUID_6_EAX_ARAT,
1952 .xlevel = 0x80000008,
1953 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1954 },
1955 {
1956 .name = "IvyBridge",
1957 .level = 0xd,
1958 .vendor = CPUID_VENDOR_INTEL,
1959 .family = 6,
1960 .model = 58,
1961 .stepping = 9,
1962 .features[FEAT_1_EDX] =
1963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1967 CPUID_DE | CPUID_FP87,
1968 .features[FEAT_1_ECX] =
1969 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1970 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1971 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1972 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1973 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1974 .features[FEAT_7_0_EBX] =
1975 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1976 CPUID_7_0_EBX_ERMS,
1977 .features[FEAT_8000_0001_EDX] =
1978 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1979 CPUID_EXT2_SYSCALL,
1980 .features[FEAT_8000_0001_ECX] =
1981 CPUID_EXT3_LAHF_LM,
1982 .features[FEAT_XSAVE] =
1983 CPUID_XSAVE_XSAVEOPT,
1984 .features[FEAT_6_EAX] =
1985 CPUID_6_EAX_ARAT,
1986 .xlevel = 0x80000008,
1987 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1988 },
1989 {
1990 .name = "IvyBridge-IBRS",
1991 .level = 0xd,
1992 .vendor = CPUID_VENDOR_INTEL,
1993 .family = 6,
1994 .model = 58,
1995 .stepping = 9,
1996 .features[FEAT_1_EDX] =
1997 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1998 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1999 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2000 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2001 CPUID_DE | CPUID_FP87,
2002 .features[FEAT_1_ECX] =
2003 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2004 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2005 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2006 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2007 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2008 .features[FEAT_7_0_EBX] =
2009 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2010 CPUID_7_0_EBX_ERMS,
2011 .features[FEAT_8000_0001_EDX] =
2012 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2013 CPUID_EXT2_SYSCALL,
2014 .features[FEAT_8000_0001_ECX] =
2015 CPUID_EXT3_LAHF_LM,
2016 .features[FEAT_7_0_EDX] =
2017 CPUID_7_0_EDX_SPEC_CTRL,
2018 .features[FEAT_XSAVE] =
2019 CPUID_XSAVE_XSAVEOPT,
2020 .features[FEAT_6_EAX] =
2021 CPUID_6_EAX_ARAT,
2022 .xlevel = 0x80000008,
2023 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
2024 },
2025 {
2026 .name = "Haswell-noTSX",
2027 .level = 0xd,
2028 .vendor = CPUID_VENDOR_INTEL,
2029 .family = 6,
2030 .model = 60,
2031 .stepping = 1,
2032 .features[FEAT_1_EDX] =
2033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2037 CPUID_DE | CPUID_FP87,
2038 .features[FEAT_1_ECX] =
2039 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2040 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2041 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2042 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2043 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2044 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2045 .features[FEAT_8000_0001_EDX] =
2046 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2047 CPUID_EXT2_SYSCALL,
2048 .features[FEAT_8000_0001_ECX] =
2049 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2050 .features[FEAT_7_0_EBX] =
2051 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2052 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2053 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2054 .features[FEAT_XSAVE] =
2055 CPUID_XSAVE_XSAVEOPT,
2056 .features[FEAT_6_EAX] =
2057 CPUID_6_EAX_ARAT,
2058 .xlevel = 0x80000008,
2059 .model_id = "Intel Core Processor (Haswell, no TSX)",
2060 },
2061 {
2062 .name = "Haswell-noTSX-IBRS",
2063 .level = 0xd,
2064 .vendor = CPUID_VENDOR_INTEL,
2065 .family = 6,
2066 .model = 60,
2067 .stepping = 1,
2068 .features[FEAT_1_EDX] =
2069 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2070 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2071 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2072 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2073 CPUID_DE | CPUID_FP87,
2074 .features[FEAT_1_ECX] =
2075 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2076 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2077 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2078 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2079 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2080 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2081 .features[FEAT_8000_0001_EDX] =
2082 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2083 CPUID_EXT2_SYSCALL,
2084 .features[FEAT_8000_0001_ECX] =
2085 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2086 .features[FEAT_7_0_EDX] =
2087 CPUID_7_0_EDX_SPEC_CTRL,
2088 .features[FEAT_7_0_EBX] =
2089 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2090 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2091 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2092 .features[FEAT_XSAVE] =
2093 CPUID_XSAVE_XSAVEOPT,
2094 .features[FEAT_6_EAX] =
2095 CPUID_6_EAX_ARAT,
2096 .xlevel = 0x80000008,
2097 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2098 },
2099 {
2100 .name = "Haswell",
2101 .level = 0xd,
2102 .vendor = CPUID_VENDOR_INTEL,
2103 .family = 6,
2104 .model = 60,
2105 .stepping = 4,
2106 .features[FEAT_1_EDX] =
2107 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2108 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2109 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2110 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2111 CPUID_DE | CPUID_FP87,
2112 .features[FEAT_1_ECX] =
2113 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2114 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2115 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2116 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2117 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2118 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2119 .features[FEAT_8000_0001_EDX] =
2120 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2121 CPUID_EXT2_SYSCALL,
2122 .features[FEAT_8000_0001_ECX] =
2123 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2124 .features[FEAT_7_0_EBX] =
2125 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2126 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2127 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2128 CPUID_7_0_EBX_RTM,
2129 .features[FEAT_XSAVE] =
2130 CPUID_XSAVE_XSAVEOPT,
2131 .features[FEAT_6_EAX] =
2132 CPUID_6_EAX_ARAT,
2133 .xlevel = 0x80000008,
2134 .model_id = "Intel Core Processor (Haswell)",
2135 },
2136 {
2137 .name = "Haswell-IBRS",
2138 .level = 0xd,
2139 .vendor = CPUID_VENDOR_INTEL,
2140 .family = 6,
2141 .model = 60,
2142 .stepping = 4,
2143 .features[FEAT_1_EDX] =
2144 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2145 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2146 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2147 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2148 CPUID_DE | CPUID_FP87,
2149 .features[FEAT_1_ECX] =
2150 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2151 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2152 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2153 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2154 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2155 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2156 .features[FEAT_8000_0001_EDX] =
2157 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2158 CPUID_EXT2_SYSCALL,
2159 .features[FEAT_8000_0001_ECX] =
2160 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2161 .features[FEAT_7_0_EDX] =
2162 CPUID_7_0_EDX_SPEC_CTRL,
2163 .features[FEAT_7_0_EBX] =
2164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2165 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2167 CPUID_7_0_EBX_RTM,
2168 .features[FEAT_XSAVE] =
2169 CPUID_XSAVE_XSAVEOPT,
2170 .features[FEAT_6_EAX] =
2171 CPUID_6_EAX_ARAT,
2172 .xlevel = 0x80000008,
2173 .model_id = "Intel Core Processor (Haswell, IBRS)",
2174 },
2175 {
2176 .name = "Broadwell-noTSX",
2177 .level = 0xd,
2178 .vendor = CPUID_VENDOR_INTEL,
2179 .family = 6,
2180 .model = 61,
2181 .stepping = 2,
2182 .features[FEAT_1_EDX] =
2183 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2184 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2185 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2186 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2187 CPUID_DE | CPUID_FP87,
2188 .features[FEAT_1_ECX] =
2189 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2190 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2191 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2192 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2193 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2194 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2195 .features[FEAT_8000_0001_EDX] =
2196 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2197 CPUID_EXT2_SYSCALL,
2198 .features[FEAT_8000_0001_ECX] =
2199 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2200 .features[FEAT_7_0_EBX] =
2201 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2202 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2203 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2204 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2205 CPUID_7_0_EBX_SMAP,
2206 .features[FEAT_XSAVE] =
2207 CPUID_XSAVE_XSAVEOPT,
2208 .features[FEAT_6_EAX] =
2209 CPUID_6_EAX_ARAT,
2210 .xlevel = 0x80000008,
2211 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2212 },
2213 {
2214 .name = "Broadwell-noTSX-IBRS",
2215 .level = 0xd,
2216 .vendor = CPUID_VENDOR_INTEL,
2217 .family = 6,
2218 .model = 61,
2219 .stepping = 2,
2220 .features[FEAT_1_EDX] =
2221 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2222 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2223 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2224 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2225 CPUID_DE | CPUID_FP87,
2226 .features[FEAT_1_ECX] =
2227 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2228 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2229 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2230 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2231 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2232 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2233 .features[FEAT_8000_0001_EDX] =
2234 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2235 CPUID_EXT2_SYSCALL,
2236 .features[FEAT_8000_0001_ECX] =
2237 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2238 .features[FEAT_7_0_EDX] =
2239 CPUID_7_0_EDX_SPEC_CTRL,
2240 .features[FEAT_7_0_EBX] =
2241 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2242 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2243 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2244 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2245 CPUID_7_0_EBX_SMAP,
2246 .features[FEAT_XSAVE] =
2247 CPUID_XSAVE_XSAVEOPT,
2248 .features[FEAT_6_EAX] =
2249 CPUID_6_EAX_ARAT,
2250 .xlevel = 0x80000008,
2251 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2252 },
2253 {
2254 .name = "Broadwell",
2255 .level = 0xd,
2256 .vendor = CPUID_VENDOR_INTEL,
2257 .family = 6,
2258 .model = 61,
2259 .stepping = 2,
2260 .features[FEAT_1_EDX] =
2261 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2262 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2263 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2264 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2265 CPUID_DE | CPUID_FP87,
2266 .features[FEAT_1_ECX] =
2267 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2268 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2269 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2270 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2271 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2272 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2273 .features[FEAT_8000_0001_EDX] =
2274 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2275 CPUID_EXT2_SYSCALL,
2276 .features[FEAT_8000_0001_ECX] =
2277 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2278 .features[FEAT_7_0_EBX] =
2279 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2280 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2281 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2282 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2283 CPUID_7_0_EBX_SMAP,
2284 .features[FEAT_XSAVE] =
2285 CPUID_XSAVE_XSAVEOPT,
2286 .features[FEAT_6_EAX] =
2287 CPUID_6_EAX_ARAT,
2288 .xlevel = 0x80000008,
2289 .model_id = "Intel Core Processor (Broadwell)",
2290 },
2291 {
2292 .name = "Broadwell-IBRS",
2293 .level = 0xd,
2294 .vendor = CPUID_VENDOR_INTEL,
2295 .family = 6,
2296 .model = 61,
2297 .stepping = 2,
2298 .features[FEAT_1_EDX] =
2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2303 CPUID_DE | CPUID_FP87,
2304 .features[FEAT_1_ECX] =
2305 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2306 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2307 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2308 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2309 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2310 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2311 .features[FEAT_8000_0001_EDX] =
2312 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2313 CPUID_EXT2_SYSCALL,
2314 .features[FEAT_8000_0001_ECX] =
2315 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2316 .features[FEAT_7_0_EDX] =
2317 CPUID_7_0_EDX_SPEC_CTRL,
2318 .features[FEAT_7_0_EBX] =
2319 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2320 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2321 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2322 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2323 CPUID_7_0_EBX_SMAP,
2324 .features[FEAT_XSAVE] =
2325 CPUID_XSAVE_XSAVEOPT,
2326 .features[FEAT_6_EAX] =
2327 CPUID_6_EAX_ARAT,
2328 .xlevel = 0x80000008,
2329 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2330 },
2331 {
2332 .name = "Skylake-Client",
2333 .level = 0xd,
2334 .vendor = CPUID_VENDOR_INTEL,
2335 .family = 6,
2336 .model = 94,
2337 .stepping = 3,
2338 .features[FEAT_1_EDX] =
2339 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2340 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2341 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2342 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2343 CPUID_DE | CPUID_FP87,
2344 .features[FEAT_1_ECX] =
2345 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2346 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2347 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2348 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2349 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2350 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2351 .features[FEAT_8000_0001_EDX] =
2352 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2353 CPUID_EXT2_SYSCALL,
2354 .features[FEAT_8000_0001_ECX] =
2355 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2356 .features[FEAT_7_0_EBX] =
2357 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2358 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2359 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2360 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2361 CPUID_7_0_EBX_SMAP,
2362 /* Missing: XSAVES (not supported by some Linux versions,
2363 * including v4.1 to v4.12).
2364 * KVM doesn't yet expose any XSAVES state save component,
2365 * and the only one defined in Skylake (processor tracing)
2366 * probably will block migration anyway.
2367 */
2368 .features[FEAT_XSAVE] =
2369 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2370 CPUID_XSAVE_XGETBV1,
2371 .features[FEAT_6_EAX] =
2372 CPUID_6_EAX_ARAT,
2373 .xlevel = 0x80000008,
2374 .model_id = "Intel Core Processor (Skylake)",
2375 },
2376 {
2377 .name = "Skylake-Client-IBRS",
2378 .level = 0xd,
2379 .vendor = CPUID_VENDOR_INTEL,
2380 .family = 6,
2381 .model = 94,
2382 .stepping = 3,
2383 .features[FEAT_1_EDX] =
2384 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2385 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2386 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2387 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2388 CPUID_DE | CPUID_FP87,
2389 .features[FEAT_1_ECX] =
2390 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2391 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2392 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2393 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2394 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2395 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2396 .features[FEAT_8000_0001_EDX] =
2397 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2398 CPUID_EXT2_SYSCALL,
2399 .features[FEAT_8000_0001_ECX] =
2400 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2401 .features[FEAT_7_0_EDX] =
2402 CPUID_7_0_EDX_SPEC_CTRL,
2403 .features[FEAT_7_0_EBX] =
2404 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2405 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2406 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2407 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2408 CPUID_7_0_EBX_SMAP,
2409 /* Missing: XSAVES (not supported by some Linux versions,
2410 * including v4.1 to v4.12).
2411 * KVM doesn't yet expose any XSAVES state save component,
2412 * and the only one defined in Skylake (processor tracing)
2413 * probably will block migration anyway.
2414 */
2415 .features[FEAT_XSAVE] =
2416 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2417 CPUID_XSAVE_XGETBV1,
2418 .features[FEAT_6_EAX] =
2419 CPUID_6_EAX_ARAT,
2420 .xlevel = 0x80000008,
2421 .model_id = "Intel Core Processor (Skylake, IBRS)",
2422 },
2423 {
2424 .name = "Skylake-Server",
2425 .level = 0xd,
2426 .vendor = CPUID_VENDOR_INTEL,
2427 .family = 6,
2428 .model = 85,
2429 .stepping = 4,
2430 .features[FEAT_1_EDX] =
2431 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2432 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2433 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2434 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2435 CPUID_DE | CPUID_FP87,
2436 .features[FEAT_1_ECX] =
2437 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2438 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2439 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2440 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2441 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2442 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2443 .features[FEAT_8000_0001_EDX] =
2444 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2445 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2446 .features[FEAT_8000_0001_ECX] =
2447 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2448 .features[FEAT_7_0_EBX] =
2449 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2450 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2451 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2452 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2453 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2454 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2455 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2456 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2457 .features[FEAT_7_0_ECX] =
2458 CPUID_7_0_ECX_PKU,
2459 /* Missing: XSAVES (not supported by some Linux versions,
2460 * including v4.1 to v4.12).
2461 * KVM doesn't yet expose any XSAVES state save component,
2462 * and the only one defined in Skylake (processor tracing)
2463 * probably will block migration anyway.
2464 */
2465 .features[FEAT_XSAVE] =
2466 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2467 CPUID_XSAVE_XGETBV1,
2468 .features[FEAT_6_EAX] =
2469 CPUID_6_EAX_ARAT,
2470 .xlevel = 0x80000008,
2471 .model_id = "Intel Xeon Processor (Skylake)",
2472 },
2473 {
2474 .name = "Skylake-Server-IBRS",
2475 .level = 0xd,
2476 .vendor = CPUID_VENDOR_INTEL,
2477 .family = 6,
2478 .model = 85,
2479 .stepping = 4,
2480 .features[FEAT_1_EDX] =
2481 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2482 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2483 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2484 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2485 CPUID_DE | CPUID_FP87,
2486 .features[FEAT_1_ECX] =
2487 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2488 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2489 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2490 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2491 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2492 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2493 .features[FEAT_8000_0001_EDX] =
2494 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2495 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2496 .features[FEAT_8000_0001_ECX] =
2497 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2498 .features[FEAT_7_0_EDX] =
2499 CPUID_7_0_EDX_SPEC_CTRL,
2500 .features[FEAT_7_0_EBX] =
2501 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2502 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2503 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2504 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2505 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2506 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2507 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2508 CPUID_7_0_EBX_AVX512VL,
2509 .features[FEAT_7_0_ECX] =
2510 CPUID_7_0_ECX_PKU,
2511 /* Missing: XSAVES (not supported by some Linux versions,
2512 * including v4.1 to v4.12).
2513 * KVM doesn't yet expose any XSAVES state save component,
2514 * and the only one defined in Skylake (processor tracing)
2515 * probably will block migration anyway.
2516 */
2517 .features[FEAT_XSAVE] =
2518 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2519 CPUID_XSAVE_XGETBV1,
2520 .features[FEAT_6_EAX] =
2521 CPUID_6_EAX_ARAT,
2522 .xlevel = 0x80000008,
2523 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2524 },
2525 {
2526 .name = "Cascadelake-Server",
2527 .level = 0xd,
2528 .vendor = CPUID_VENDOR_INTEL,
2529 .family = 6,
2530 .model = 85,
2531 .stepping = 6,
2532 .features[FEAT_1_EDX] =
2533 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2534 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2535 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2536 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2537 CPUID_DE | CPUID_FP87,
2538 .features[FEAT_1_ECX] =
2539 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2540 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2541 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2542 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2543 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2544 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2545 .features[FEAT_8000_0001_EDX] =
2546 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2547 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2548 .features[FEAT_8000_0001_ECX] =
2549 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2550 .features[FEAT_7_0_EBX] =
2551 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2552 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2553 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2554 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2555 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2556 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2557 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2558 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2559 .features[FEAT_7_0_ECX] =
2560 CPUID_7_0_ECX_PKU |
2561 CPUID_7_0_ECX_AVX512VNNI,
2562 .features[FEAT_7_0_EDX] =
2563 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2564 /* Missing: XSAVES (not supported by some Linux versions,
2565 * including v4.1 to v4.12).
2566 * KVM doesn't yet expose any XSAVES state save component,
2567 * and the only one defined in Skylake (processor tracing)
2568 * probably will block migration anyway.
2569 */
2570 .features[FEAT_XSAVE] =
2571 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2572 CPUID_XSAVE_XGETBV1,
2573 .features[FEAT_6_EAX] =
2574 CPUID_6_EAX_ARAT,
2575 .xlevel = 0x80000008,
2576 .model_id = "Intel Xeon Processor (Cascadelake)",
2577 },
2578 {
2579 .name = "Icelake-Client",
2580 .level = 0xd,
2581 .vendor = CPUID_VENDOR_INTEL,
2582 .family = 6,
2583 .model = 126,
2584 .stepping = 0,
2585 .features[FEAT_1_EDX] =
2586 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2587 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2588 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2589 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2590 CPUID_DE | CPUID_FP87,
2591 .features[FEAT_1_ECX] =
2592 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2593 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2594 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2595 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2596 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2597 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2598 .features[FEAT_8000_0001_EDX] =
2599 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2600 CPUID_EXT2_SYSCALL,
2601 .features[FEAT_8000_0001_ECX] =
2602 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2603 .features[FEAT_8000_0008_EBX] =
2604 CPUID_8000_0008_EBX_WBNOINVD,
2605 .features[FEAT_7_0_EBX] =
2606 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2607 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2608 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2609 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2610 CPUID_7_0_EBX_SMAP,
2611 .features[FEAT_7_0_ECX] =
2612 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2613 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2614 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2615 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2616 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2617 .features[FEAT_7_0_EDX] =
2618 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2619 /* Missing: XSAVES (not supported by some Linux versions,
2620 * including v4.1 to v4.12).
2621 * KVM doesn't yet expose any XSAVES state save component,
2622 * and the only one defined in Skylake (processor tracing)
2623 * probably will block migration anyway.
2624 */
2625 .features[FEAT_XSAVE] =
2626 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2627 CPUID_XSAVE_XGETBV1,
2628 .features[FEAT_6_EAX] =
2629 CPUID_6_EAX_ARAT,
2630 .xlevel = 0x80000008,
2631 .model_id = "Intel Core Processor (Icelake)",
2632 },
2633 {
2634 .name = "Icelake-Server",
2635 .level = 0xd,
2636 .vendor = CPUID_VENDOR_INTEL,
2637 .family = 6,
2638 .model = 134,
2639 .stepping = 0,
2640 .features[FEAT_1_EDX] =
2641 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2642 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2643 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2644 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2645 CPUID_DE | CPUID_FP87,
2646 .features[FEAT_1_ECX] =
2647 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2648 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2649 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2650 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2651 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2652 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2653 .features[FEAT_8000_0001_EDX] =
2654 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2655 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2656 .features[FEAT_8000_0001_ECX] =
2657 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2658 .features[FEAT_8000_0008_EBX] =
2659 CPUID_8000_0008_EBX_WBNOINVD,
2660 .features[FEAT_7_0_EBX] =
2661 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2662 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2663 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2664 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2665 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2666 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2667 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2668 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2669 .features[FEAT_7_0_ECX] =
2670 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2671 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2672 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2673 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2674 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2675 .features[FEAT_7_0_EDX] =
2676 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2677 /* Missing: XSAVES (not supported by some Linux versions,
2678 * including v4.1 to v4.12).
2679 * KVM doesn't yet expose any XSAVES state save component,
2680 * and the only one defined in Skylake (processor tracing)
2681 * probably will block migration anyway.
2682 */
2683 .features[FEAT_XSAVE] =
2684 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2685 CPUID_XSAVE_XGETBV1,
2686 .features[FEAT_6_EAX] =
2687 CPUID_6_EAX_ARAT,
2688 .xlevel = 0x80000008,
2689 .model_id = "Intel Xeon Processor (Icelake)",
2690 },
2691 {
2692 .name = "KnightsMill",
2693 .level = 0xd,
2694 .vendor = CPUID_VENDOR_INTEL,
2695 .family = 6,
2696 .model = 133,
2697 .stepping = 0,
2698 .features[FEAT_1_EDX] =
2699 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2700 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2701 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2702 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2703 CPUID_PSE | CPUID_DE | CPUID_FP87,
2704 .features[FEAT_1_ECX] =
2705 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2706 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2707 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2708 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2709 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2710 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2711 .features[FEAT_8000_0001_EDX] =
2712 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2713 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2714 .features[FEAT_8000_0001_ECX] =
2715 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2716 .features[FEAT_7_0_EBX] =
2717 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2718 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2719 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2720 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2721 CPUID_7_0_EBX_AVX512ER,
2722 .features[FEAT_7_0_ECX] =
2723 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2724 .features[FEAT_7_0_EDX] =
2725 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2726 .features[FEAT_XSAVE] =
2727 CPUID_XSAVE_XSAVEOPT,
2728 .features[FEAT_6_EAX] =
2729 CPUID_6_EAX_ARAT,
2730 .xlevel = 0x80000008,
2731 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2732 },
2733 {
2734 .name = "Opteron_G1",
2735 .level = 5,
2736 .vendor = CPUID_VENDOR_AMD,
2737 .family = 15,
2738 .model = 6,
2739 .stepping = 1,
2740 .features[FEAT_1_EDX] =
2741 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2742 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2743 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2744 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2745 CPUID_DE | CPUID_FP87,
2746 .features[FEAT_1_ECX] =
2747 CPUID_EXT_SSE3,
2748 .features[FEAT_8000_0001_EDX] =
2749 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2750 .xlevel = 0x80000008,
2751 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2752 },
2753 {
2754 .name = "Opteron_G2",
2755 .level = 5,
2756 .vendor = CPUID_VENDOR_AMD,
2757 .family = 15,
2758 .model = 6,
2759 .stepping = 1,
2760 .features[FEAT_1_EDX] =
2761 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2762 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2763 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2764 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2765 CPUID_DE | CPUID_FP87,
2766 .features[FEAT_1_ECX] =
2767 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2768 .features[FEAT_8000_0001_EDX] =
2769 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2770 .features[FEAT_8000_0001_ECX] =
2771 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2772 .xlevel = 0x80000008,
2773 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2774 },
2775 {
2776 .name = "Opteron_G3",
2777 .level = 5,
2778 .vendor = CPUID_VENDOR_AMD,
2779 .family = 16,
2780 .model = 2,
2781 .stepping = 3,
2782 .features[FEAT_1_EDX] =
2783 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2784 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2785 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2786 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2787 CPUID_DE | CPUID_FP87,
2788 .features[FEAT_1_ECX] =
2789 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2790 CPUID_EXT_SSE3,
2791 .features[FEAT_8000_0001_EDX] =
2792 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2793 CPUID_EXT2_RDTSCP,
2794 .features[FEAT_8000_0001_ECX] =
2795 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2796 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2797 .xlevel = 0x80000008,
2798 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2799 },
2800 {
2801 .name = "Opteron_G4",
2802 .level = 0xd,
2803 .vendor = CPUID_VENDOR_AMD,
2804 .family = 21,
2805 .model = 1,
2806 .stepping = 2,
2807 .features[FEAT_1_EDX] =
2808 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2809 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2810 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2811 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2812 CPUID_DE | CPUID_FP87,
2813 .features[FEAT_1_ECX] =
2814 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2815 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2816 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2817 CPUID_EXT_SSE3,
2818 .features[FEAT_8000_0001_EDX] =
2819 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2820 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2821 .features[FEAT_8000_0001_ECX] =
2822 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2823 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2824 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2825 CPUID_EXT3_LAHF_LM,
2826 .features[FEAT_SVM] =
2827 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2828 /* no xsaveopt! */
2829 .xlevel = 0x8000001A,
2830 .model_id = "AMD Opteron 62xx class CPU",
2831 },
2832 {
2833 .name = "Opteron_G5",
2834 .level = 0xd,
2835 .vendor = CPUID_VENDOR_AMD,
2836 .family = 21,
2837 .model = 2,
2838 .stepping = 0,
2839 .features[FEAT_1_EDX] =
2840 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2841 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2842 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2843 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2844 CPUID_DE | CPUID_FP87,
2845 .features[FEAT_1_ECX] =
2846 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2847 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2848 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2849 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2850 .features[FEAT_8000_0001_EDX] =
2851 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2852 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2853 .features[FEAT_8000_0001_ECX] =
2854 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2855 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2856 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2857 CPUID_EXT3_LAHF_LM,
2858 .features[FEAT_SVM] =
2859 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2860 /* no xsaveopt! */
2861 .xlevel = 0x8000001A,
2862 .model_id = "AMD Opteron 63xx class CPU",
2863 },
2864 {
2865 .name = "EPYC",
2866 .level = 0xd,
2867 .vendor = CPUID_VENDOR_AMD,
2868 .family = 23,
2869 .model = 1,
2870 .stepping = 2,
2871 .features[FEAT_1_EDX] =
2872 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2873 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2874 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2875 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2876 CPUID_VME | CPUID_FP87,
2877 .features[FEAT_1_ECX] =
2878 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2879 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2880 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2881 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2882 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2883 .features[FEAT_8000_0001_EDX] =
2884 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2885 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2886 CPUID_EXT2_SYSCALL,
2887 .features[FEAT_8000_0001_ECX] =
2888 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2889 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2890 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2891 CPUID_EXT3_TOPOEXT,
2892 .features[FEAT_7_0_EBX] =
2893 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2894 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2895 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2896 CPUID_7_0_EBX_SHA_NI,
2897 /* Missing: XSAVES (not supported by some Linux versions,
2898 * including v4.1 to v4.12).
2899 * KVM doesn't yet expose any XSAVES state save component.
2900 */
2901 .features[FEAT_XSAVE] =
2902 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2903 CPUID_XSAVE_XGETBV1,
2904 .features[FEAT_6_EAX] =
2905 CPUID_6_EAX_ARAT,
2906 .features[FEAT_SVM] =
2907 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2908 .xlevel = 0x8000001E,
2909 .model_id = "AMD EPYC Processor",
2910 .cache_info = &epyc_cache_info,
2911 },
2912 {
2913 .name = "EPYC-IBPB",
2914 .level = 0xd,
2915 .vendor = CPUID_VENDOR_AMD,
2916 .family = 23,
2917 .model = 1,
2918 .stepping = 2,
2919 .features[FEAT_1_EDX] =
2920 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2921 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2922 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2923 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2924 CPUID_VME | CPUID_FP87,
2925 .features[FEAT_1_ECX] =
2926 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2927 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2928 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2929 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2930 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2931 .features[FEAT_8000_0001_EDX] =
2932 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2933 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2934 CPUID_EXT2_SYSCALL,
2935 .features[FEAT_8000_0001_ECX] =
2936 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2937 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2938 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2939 CPUID_EXT3_TOPOEXT,
2940 .features[FEAT_8000_0008_EBX] =
2941 CPUID_8000_0008_EBX_IBPB,
2942 .features[FEAT_7_0_EBX] =
2943 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2944 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2945 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2946 CPUID_7_0_EBX_SHA_NI,
2947 /* Missing: XSAVES (not supported by some Linux versions,
2948 * including v4.1 to v4.12).
2949 * KVM doesn't yet expose any XSAVES state save component.
2950 */
2951 .features[FEAT_XSAVE] =
2952 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2953 CPUID_XSAVE_XGETBV1,
2954 .features[FEAT_6_EAX] =
2955 CPUID_6_EAX_ARAT,
2956 .features[FEAT_SVM] =
2957 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2958 .xlevel = 0x8000001E,
2959 .model_id = "AMD EPYC Processor (with IBPB)",
2960 .cache_info = &epyc_cache_info,
2961 },
2962 {
2963 .name = "Dhyana",
2964 .level = 0xd,
2965 .vendor = CPUID_VENDOR_HYGON,
2966 .family = 24,
2967 .model = 0,
2968 .stepping = 1,
2969 .features[FEAT_1_EDX] =
2970 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2971 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2972 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2973 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2974 CPUID_VME | CPUID_FP87,
2975 .features[FEAT_1_ECX] =
2976 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2977 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
2978 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2979 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2980 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
2981 .features[FEAT_8000_0001_EDX] =
2982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2983 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2984 CPUID_EXT2_SYSCALL,
2985 .features[FEAT_8000_0001_ECX] =
2986 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2987 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2988 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2989 CPUID_EXT3_TOPOEXT,
2990 .features[FEAT_8000_0008_EBX] =
2991 CPUID_8000_0008_EBX_IBPB,
2992 .features[FEAT_7_0_EBX] =
2993 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2994 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2995 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
2996 /*
2997 * Missing: XSAVES (not supported by some Linux versions,
2998 * including v4.1 to v4.12).
2999 * KVM doesn't yet expose any XSAVES state save component.
3000 */
3001 .features[FEAT_XSAVE] =
3002 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3003 CPUID_XSAVE_XGETBV1,
3004 .features[FEAT_6_EAX] =
3005 CPUID_6_EAX_ARAT,
3006 .features[FEAT_SVM] =
3007 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3008 .xlevel = 0x8000001E,
3009 .model_id = "Hygon Dhyana Processor",
3010 .cache_info = &epyc_cache_info,
3011 },
3012 };
3013
3014 typedef struct PropValue {
3015 const char *prop, *value;
3016 } PropValue;
3017
3018 /* KVM-specific features that are automatically added/removed
3019 * from all CPU models when KVM is enabled.
3020 */
3021 static PropValue kvm_default_props[] = {
3022 { "kvmclock", "on" },
3023 { "kvm-nopiodelay", "on" },
3024 { "kvm-asyncpf", "on" },
3025 { "kvm-steal-time", "on" },
3026 { "kvm-pv-eoi", "on" },
3027 { "kvmclock-stable-bit", "on" },
3028 { "x2apic", "on" },
3029 { "acpi", "off" },
3030 { "monitor", "off" },
3031 { "svm", "off" },
3032 { NULL, NULL },
3033 };
3034
3035 /* TCG-specific defaults that override all CPU models when using TCG
3036 */
3037 static PropValue tcg_default_props[] = {
3038 { "vme", "off" },
3039 { NULL, NULL },
3040 };
3041
3042
3043 void x86_cpu_change_kvm_default(const char *prop, const char *value)
3044 {
3045 PropValue *pv;
3046 for (pv = kvm_default_props; pv->prop; pv++) {
3047 if (!strcmp(pv->prop, prop)) {
3048 pv->value = value;
3049 break;
3050 }
3051 }
3052
3053 /* It is valid to call this function only for properties that
3054 * are already present in the kvm_default_props table.
3055 */
3056 assert(pv->prop);
3057 }
3058
3059 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3060 bool migratable_only);
3061
3062 static bool lmce_supported(void)
3063 {
3064 uint64_t mce_cap = 0;
3065
3066 #ifdef CONFIG_KVM
3067 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
3068 return false;
3069 }
3070 #endif
3071
3072 return !!(mce_cap & MCG_LMCE_P);
3073 }
3074
3075 #define CPUID_MODEL_ID_SZ 48
3076
3077 /**
3078 * cpu_x86_fill_model_id:
3079 * Get CPUID model ID string from host CPU.
3080 *
3081 * @str should have at least CPUID_MODEL_ID_SZ bytes
3082 *
3083 * The function does NOT add a null terminator to the string
3084 * automatically.
3085 */
3086 static int cpu_x86_fill_model_id(char *str)
3087 {
3088 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3089 int i;
3090
3091 for (i = 0; i < 3; i++) {
3092 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3093 memcpy(str + i * 16 + 0, &eax, 4);
3094 memcpy(str + i * 16 + 4, &ebx, 4);
3095 memcpy(str + i * 16 + 8, &ecx, 4);
3096 memcpy(str + i * 16 + 12, &edx, 4);
3097 }
3098 return 0;
3099 }
3100
3101 static Property max_x86_cpu_properties[] = {
3102 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3103 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3104 DEFINE_PROP_END_OF_LIST()
3105 };
3106
3107 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3108 {
3109 DeviceClass *dc = DEVICE_CLASS(oc);
3110 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3111
3112 xcc->ordering = 9;
3113
3114 xcc->model_description =
3115 "Enables all features supported by the accelerator in the current host";
3116
3117 dc->props = max_x86_cpu_properties;
3118 }
3119
3120 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3121
3122 static void max_x86_cpu_initfn(Object *obj)
3123 {
3124 X86CPU *cpu = X86_CPU(obj);
3125 CPUX86State *env = &cpu->env;
3126 KVMState *s = kvm_state;
3127
3128 /* We can't fill the features array here because we don't know yet if
3129 * "migratable" is true or false.
3130 */
3131 cpu->max_features = true;
3132
3133 if (accel_uses_host_cpuid()) {
3134 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3135 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3136 int family, model, stepping;
3137 X86CPUDefinition host_cpudef = { };
3138 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3139
3140 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
3141 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
3142
3143 host_vendor_fms(vendor, &family, &model, &stepping);
3144
3145 cpu_x86_fill_model_id(model_id);
3146
3147 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3148 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3149 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3150 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3151 &error_abort);
3152 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3153 &error_abort);
3154
3155 if (kvm_enabled()) {
3156 env->cpuid_min_level =
3157 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3158 env->cpuid_min_xlevel =
3159 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3160 env->cpuid_min_xlevel2 =
3161 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3162 } else {
3163 env->cpuid_min_level =
3164 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3165 env->cpuid_min_xlevel =
3166 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3167 env->cpuid_min_xlevel2 =
3168 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3169 }
3170
3171 if (lmce_supported()) {
3172 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3173 }
3174 } else {
3175 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3176 "vendor", &error_abort);
3177 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3178 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3179 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3180 object_property_set_str(OBJECT(cpu),
3181 "QEMU TCG CPU version " QEMU_HW_VERSION,
3182 "model-id", &error_abort);
3183 }
3184
3185 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3186 }
3187
3188 static const TypeInfo max_x86_cpu_type_info = {
3189 .name = X86_CPU_TYPE_NAME("max"),
3190 .parent = TYPE_X86_CPU,
3191 .instance_init = max_x86_cpu_initfn,
3192 .class_init = max_x86_cpu_class_init,
3193 };
3194
3195 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3196 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3197 {
3198 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3199
3200 xcc->host_cpuid_required = true;
3201 xcc->ordering = 8;
3202
3203 #if defined(CONFIG_KVM)
3204 xcc->model_description =
3205 "KVM processor with all supported host features ";
3206 #elif defined(CONFIG_HVF)
3207 xcc->model_description =
3208 "HVF processor with all supported host features ";
3209 #endif
3210 }
3211
3212 static const TypeInfo host_x86_cpu_type_info = {
3213 .name = X86_CPU_TYPE_NAME("host"),
3214 .parent = X86_CPU_TYPE_NAME("max"),
3215 .class_init = host_x86_cpu_class_init,
3216 };
3217
3218 #endif
3219
3220 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3221 {
3222 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3223
3224 switch (f->type) {
3225 case CPUID_FEATURE_WORD:
3226 {
3227 const char *reg = get_register_name_32(f->cpuid.reg);
3228 assert(reg);
3229 return g_strdup_printf("CPUID.%02XH:%s",
3230 f->cpuid.eax, reg);
3231 }
3232 case MSR_FEATURE_WORD:
3233 return g_strdup_printf("MSR(%02XH)",
3234 f->msr.index);
3235 }
3236
3237 return NULL;
3238 }
3239
3240 static void report_unavailable_features(FeatureWord w, uint32_t mask)
3241 {
3242 FeatureWordInfo *f = &feature_word_info[w];
3243 int i;
3244 char *feat_word_str;
3245
3246 for (i = 0; i < 32; ++i) {
3247 if ((1UL << i) & mask) {
3248 feat_word_str = feature_word_description(f, i);
3249 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3250 accel_uses_host_cpuid() ? "host" : "TCG",
3251 feat_word_str,
3252 f->feat_names[i] ? "." : "",
3253 f->feat_names[i] ? f->feat_names[i] : "", i);
3254 g_free(feat_word_str);
3255 }
3256 }
3257 }
3258
3259 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3260 const char *name, void *opaque,
3261 Error **errp)
3262 {
3263 X86CPU *cpu = X86_CPU(obj);
3264 CPUX86State *env = &cpu->env;
3265 int64_t value;
3266
3267 value = (env->cpuid_version >> 8) & 0xf;
3268 if (value == 0xf) {
3269 value += (env->cpuid_version >> 20) & 0xff;
3270 }
3271 visit_type_int(v, name, &value, errp);
3272 }
3273
3274 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3275 const char *name, void *opaque,
3276 Error **errp)
3277 {
3278 X86CPU *cpu = X86_CPU(obj);
3279 CPUX86State *env = &cpu->env;
3280 const int64_t min = 0;
3281 const int64_t max = 0xff + 0xf;
3282 Error *local_err = NULL;
3283 int64_t value;
3284
3285 visit_type_int(v, name, &value, &local_err);
3286 if (local_err) {
3287 error_propagate(errp, local_err);
3288 return;
3289 }
3290 if (value < min || value > max) {
3291 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3292 name ? name : "null", value, min, max);
3293 return;
3294 }
3295
3296 env->cpuid_version &= ~0xff00f00;
3297 if (value > 0x0f) {
3298 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3299 } else {
3300 env->cpuid_version |= value << 8;
3301 }
3302 }
3303
3304 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3305 const char *name, void *opaque,
3306 Error **errp)
3307 {
3308 X86CPU *cpu = X86_CPU(obj);
3309 CPUX86State *env = &cpu->env;
3310 int64_t value;
3311
3312 value = (env->cpuid_version >> 4) & 0xf;
3313 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3314 visit_type_int(v, name, &value, errp);
3315 }
3316
3317 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3318 const char *name, void *opaque,
3319 Error **errp)
3320 {
3321 X86CPU *cpu = X86_CPU(obj);
3322 CPUX86State *env = &cpu->env;
3323 const int64_t min = 0;
3324 const int64_t max = 0xff;
3325 Error *local_err = NULL;
3326 int64_t value;
3327
3328 visit_type_int(v, name, &value, &local_err);
3329 if (local_err) {
3330 error_propagate(errp, local_err);
3331 return;
3332 }
3333 if (value < min || value > max) {
3334 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3335 name ? name : "null", value, min, max);
3336 return;
3337 }
3338
3339 env->cpuid_version &= ~0xf00f0;
3340 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3341 }
3342
3343 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3344 const char *name, void *opaque,
3345 Error **errp)
3346 {
3347 X86CPU *cpu = X86_CPU(obj);
3348 CPUX86State *env = &cpu->env;
3349 int64_t value;
3350
3351 value = env->cpuid_version & 0xf;
3352 visit_type_int(v, name, &value, errp);
3353 }
3354
3355 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3356 const char *name, void *opaque,
3357 Error **errp)
3358 {
3359 X86CPU *cpu = X86_CPU(obj);
3360 CPUX86State *env = &cpu->env;
3361 const int64_t min = 0;
3362 const int64_t max = 0xf;
3363 Error *local_err = NULL;
3364 int64_t value;
3365
3366 visit_type_int(v, name, &value, &local_err);
3367 if (local_err) {
3368 error_propagate(errp, local_err);
3369 return;
3370 }
3371 if (value < min || value > max) {
3372 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3373 name ? name : "null", value, min, max);
3374 return;
3375 }
3376
3377 env->cpuid_version &= ~0xf;
3378 env->cpuid_version |= value & 0xf;
3379 }
3380
3381 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3382 {
3383 X86CPU *cpu = X86_CPU(obj);
3384 CPUX86State *env = &cpu->env;
3385 char *value;
3386
3387 value = g_malloc(CPUID_VENDOR_SZ + 1);
3388 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3389 env->cpuid_vendor3);
3390 return value;
3391 }
3392
3393 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3394 Error **errp)
3395 {
3396 X86CPU *cpu = X86_CPU(obj);
3397 CPUX86State *env = &cpu->env;
3398 int i;
3399
3400 if (strlen(value) != CPUID_VENDOR_SZ) {
3401 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3402 return;
3403 }
3404
3405 env->cpuid_vendor1 = 0;
3406 env->cpuid_vendor2 = 0;
3407 env->cpuid_vendor3 = 0;
3408 for (i = 0; i < 4; i++) {
3409 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3410 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3411 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3412 }
3413 }
3414
3415 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3416 {
3417 X86CPU *cpu = X86_CPU(obj);
3418 CPUX86State *env = &cpu->env;
3419 char *value;
3420 int i;
3421
3422 value = g_malloc(48 + 1);
3423 for (i = 0; i < 48; i++) {
3424 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3425 }
3426 value[48] = '\0';
3427 return value;
3428 }
3429
3430 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3431 Error **errp)
3432 {
3433 X86CPU *cpu = X86_CPU(obj);
3434 CPUX86State *env = &cpu->env;
3435 int c, len, i;
3436
3437 if (model_id == NULL) {
3438 model_id = "";
3439 }
3440 len = strlen(model_id);
3441 memset(env->cpuid_model, 0, 48);
3442 for (i = 0; i < 48; i++) {
3443 if (i >= len) {
3444 c = '\0';
3445 } else {
3446 c = (uint8_t)model_id[i];
3447 }
3448 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3449 }
3450 }
3451
3452 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3453 void *opaque, Error **errp)
3454 {
3455 X86CPU *cpu = X86_CPU(obj);
3456 int64_t value;
3457
3458 value = cpu->env.tsc_khz * 1000;
3459 visit_type_int(v, name, &value, errp);
3460 }
3461
3462 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3463 void *opaque, Error **errp)
3464 {
3465 X86CPU *cpu = X86_CPU(obj);
3466 const int64_t min = 0;
3467 const int64_t max = INT64_MAX;
3468 Error *local_err = NULL;
3469 int64_t value;
3470
3471 visit_type_int(v, name, &value, &local_err);
3472 if (local_err) {
3473 error_propagate(errp, local_err);
3474 return;
3475 }
3476 if (value < min || value > max) {
3477 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3478 name ? name : "null", value, min, max);
3479 return;
3480 }
3481
3482 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3483 }
3484
3485 /* Generic getter for "feature-words" and "filtered-features" properties */
3486 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3487 const char *name, void *opaque,
3488 Error **errp)
3489 {
3490 uint32_t *array = (uint32_t *)opaque;
3491 FeatureWord w;
3492 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3493 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3494 X86CPUFeatureWordInfoList *list = NULL;
3495
3496 for (w = 0; w < FEATURE_WORDS; w++) {
3497 FeatureWordInfo *wi = &feature_word_info[w];
3498 /*
3499 * We didn't have MSR features when "feature-words" was
3500 * introduced. Therefore skipped other type entries.
3501 */
3502 if (wi->type != CPUID_FEATURE_WORD) {
3503 continue;
3504 }
3505 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3506 qwi->cpuid_input_eax = wi->cpuid.eax;
3507 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3508 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3509 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3510 qwi->features = array[w];
3511
3512 /* List will be in reverse order, but order shouldn't matter */
3513 list_entries[w].next = list;
3514 list_entries[w].value = &word_infos[w];
3515 list = &list_entries[w];
3516 }
3517
3518 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3519 }
3520
3521 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3522 void *opaque, Error **errp)
3523 {
3524 X86CPU *cpu = X86_CPU(obj);
3525 int64_t value = cpu->hyperv_spinlock_attempts;
3526
3527 visit_type_int(v, name, &value, errp);
3528 }
3529
3530 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3531 void *opaque, Error **errp)
3532 {
3533 const int64_t min = 0xFFF;
3534 const int64_t max = UINT_MAX;
3535 X86CPU *cpu = X86_CPU(obj);
3536 Error *err = NULL;
3537 int64_t value;
3538
3539 visit_type_int(v, name, &value, &err);
3540 if (err) {
3541 error_propagate(errp, err);
3542 return;
3543 }
3544
3545 if (value < min || value > max) {
3546 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3547 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3548 object_get_typename(obj), name ? name : "null",
3549 value, min, max);
3550 return;
3551 }
3552 cpu->hyperv_spinlock_attempts = value;
3553 }
3554
3555 static const PropertyInfo qdev_prop_spinlocks = {
3556 .name = "int",
3557 .get = x86_get_hv_spinlocks,
3558 .set = x86_set_hv_spinlocks,
3559 };
3560
3561 /* Convert all '_' in a feature string option name to '-', to make feature
3562 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3563 */
3564 static inline void feat2prop(char *s)
3565 {
3566 while ((s = strchr(s, '_'))) {
3567 *s = '-';
3568 }
3569 }
3570
3571 /* Return the feature property name for a feature flag bit */
3572 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3573 {
3574 /* XSAVE components are automatically enabled by other features,
3575 * so return the original feature name instead
3576 */
3577 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3578 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3579
3580 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3581 x86_ext_save_areas[comp].bits) {
3582 w = x86_ext_save_areas[comp].feature;
3583 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3584 }
3585 }
3586
3587 assert(bitnr < 32);
3588 assert(w < FEATURE_WORDS);
3589 return feature_word_info[w].feat_names[bitnr];
3590 }
3591
3592 /* Compatibily hack to maintain legacy +-feat semantic,
3593 * where +-feat overwrites any feature set by
3594 * feat=on|feat even if the later is parsed after +-feat
3595 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3596 */
3597 static GList *plus_features, *minus_features;
3598
3599 static gint compare_string(gconstpointer a, gconstpointer b)
3600 {
3601 return g_strcmp0(a, b);
3602 }
3603
3604 /* Parse "+feature,-feature,feature=foo" CPU feature string
3605 */
3606 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3607 Error **errp)
3608 {
3609 char *featurestr; /* Single 'key=value" string being parsed */
3610 static bool cpu_globals_initialized;
3611 bool ambiguous = false;
3612
3613 if (cpu_globals_initialized) {
3614 return;
3615 }
3616 cpu_globals_initialized = true;
3617
3618 if (!features) {
3619 return;
3620 }
3621
3622 for (featurestr = strtok(features, ",");
3623 featurestr;
3624 featurestr = strtok(NULL, ",")) {
3625 const char *name;
3626 const char *val = NULL;
3627 char *eq = NULL;
3628 char num[32];
3629 GlobalProperty *prop;
3630
3631 /* Compatibility syntax: */
3632 if (featurestr[0] == '+') {
3633 plus_features = g_list_append(plus_features,
3634 g_strdup(featurestr + 1));
3635 continue;
3636 } else if (featurestr[0] == '-') {
3637 minus_features = g_list_append(minus_features,
3638 g_strdup(featurestr + 1));
3639 continue;
3640 }
3641
3642 eq = strchr(featurestr, '=');
3643 if (eq) {
3644 *eq++ = 0;
3645 val = eq;
3646 } else {
3647 val = "on";
3648 }
3649
3650 feat2prop(featurestr);
3651 name = featurestr;
3652
3653 if (g_list_find_custom(plus_features, name, compare_string)) {
3654 warn_report("Ambiguous CPU model string. "
3655 "Don't mix both \"+%s\" and \"%s=%s\"",
3656 name, name, val);
3657 ambiguous = true;
3658 }
3659 if (g_list_find_custom(minus_features, name, compare_string)) {
3660 warn_report("Ambiguous CPU model string. "
3661 "Don't mix both \"-%s\" and \"%s=%s\"",
3662 name, name, val);
3663 ambiguous = true;
3664 }
3665
3666 /* Special case: */
3667 if (!strcmp(name, "tsc-freq")) {
3668 int ret;
3669 uint64_t tsc_freq;
3670
3671 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3672 if (ret < 0 || tsc_freq > INT64_MAX) {
3673 error_setg(errp, "bad numerical value %s", val);
3674 return;
3675 }
3676 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3677 val = num;
3678 name = "tsc-frequency";
3679 }
3680
3681 prop = g_new0(typeof(*prop), 1);
3682 prop->driver = typename;
3683 prop->property = g_strdup(name);
3684 prop->value = g_strdup(val);
3685 qdev_prop_register_global(prop);
3686 }
3687
3688 if (ambiguous) {
3689 warn_report("Compatibility of ambiguous CPU model "
3690 "strings won't be kept on future QEMU versions");
3691 }
3692 }
3693
3694 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3695 static int x86_cpu_filter_features(X86CPU *cpu);
3696
3697 /* Build a list with the name of all features on a feature word array */
3698 static void x86_cpu_list_feature_names(FeatureWordArray features,
3699 strList **feat_names)
3700 {
3701 FeatureWord w;
3702 strList **next = feat_names;
3703
3704 for (w = 0; w < FEATURE_WORDS; w++) {
3705 uint32_t filtered = features[w];
3706 int i;
3707 for (i = 0; i < 32; i++) {
3708 if (filtered & (1UL << i)) {
3709 strList *new = g_new0(strList, 1);
3710 new->value = g_strdup(x86_cpu_feature_name(w, i));
3711 *next = new;
3712 next = &new->next;
3713 }
3714 }
3715 }
3716 }
3717
3718 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
3719 const char *name, void *opaque,
3720 Error **errp)
3721 {
3722 X86CPU *xc = X86_CPU(obj);
3723 strList *result = NULL;
3724
3725 x86_cpu_list_feature_names(xc->filtered_features, &result);
3726 visit_type_strList(v, "unavailable-features", &result, errp);
3727 }
3728
3729 /* Check for missing features that may prevent the CPU class from
3730 * running using the current machine and accelerator.
3731 */
3732 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3733 strList **missing_feats)
3734 {
3735 X86CPU *xc;
3736 Error *err = NULL;
3737 strList **next = missing_feats;
3738
3739 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3740 strList *new = g_new0(strList, 1);
3741 new->value = g_strdup("kvm");
3742 *missing_feats = new;
3743 return;
3744 }
3745
3746 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3747
3748 x86_cpu_expand_features(xc, &err);
3749 if (err) {
3750 /* Errors at x86_cpu_expand_features should never happen,
3751 * but in case it does, just report the model as not
3752 * runnable at all using the "type" property.
3753 */
3754 strList *new = g_new0(strList, 1);
3755 new->value = g_strdup("type");
3756 *next = new;
3757 next = &new->next;
3758 }
3759
3760 x86_cpu_filter_features(xc);
3761
3762 x86_cpu_list_feature_names(xc->filtered_features, next);
3763
3764 object_unref(OBJECT(xc));
3765 }
3766
3767 /* Print all cpuid feature names in featureset
3768 */
3769 static void listflags(GList *features)
3770 {
3771 size_t len = 0;
3772 GList *tmp;
3773
3774 for (tmp = features; tmp; tmp = tmp->next) {
3775 const char *name = tmp->data;
3776 if ((len + strlen(name) + 1) >= 75) {
3777 qemu_printf("\n");
3778 len = 0;
3779 }
3780 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3781 len += strlen(name) + 1;
3782 }
3783 qemu_printf("\n");
3784 }
3785
3786 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3787 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3788 {
3789 ObjectClass *class_a = (ObjectClass *)a;
3790 ObjectClass *class_b = (ObjectClass *)b;
3791 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3792 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3793 char *name_a, *name_b;
3794 int ret;
3795
3796 if (cc_a->ordering != cc_b->ordering) {
3797 ret = cc_a->ordering - cc_b->ordering;
3798 } else {
3799 name_a = x86_cpu_class_get_model_name(cc_a);
3800 name_b = x86_cpu_class_get_model_name(cc_b);
3801 ret = strcmp(name_a, name_b);
3802 g_free(name_a);
3803 g_free(name_b);
3804 }
3805 return ret;
3806 }
3807
3808 static GSList *get_sorted_cpu_model_list(void)
3809 {
3810 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3811 list = g_slist_sort(list, x86_cpu_list_compare);
3812 return list;
3813 }
3814
3815 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3816 {
3817 ObjectClass *oc = data;
3818 X86CPUClass *cc = X86_CPU_CLASS(oc);
3819 char *name = x86_cpu_class_get_model_name(cc);
3820 const char *desc = cc->model_description;
3821 if (!desc && cc->cpu_def) {
3822 desc = cc->cpu_def->model_id;
3823 }
3824
3825 qemu_printf("x86 %-20s %-48s\n", name, desc);
3826 g_free(name);
3827 }
3828
3829 /* list available CPU models and flags */
3830 void x86_cpu_list(void)
3831 {
3832 int i, j;
3833 GSList *list;
3834 GList *names = NULL;
3835
3836 qemu_printf("Available CPUs:\n");
3837 list = get_sorted_cpu_model_list();
3838 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3839 g_slist_free(list);
3840
3841 names = NULL;
3842 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3843 FeatureWordInfo *fw = &feature_word_info[i];
3844 for (j = 0; j < 32; j++) {
3845 if (fw->feat_names[j]) {
3846 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3847 }
3848 }
3849 }
3850
3851 names = g_list_sort(names, (GCompareFunc)strcmp);
3852
3853 qemu_printf("\nRecognized CPUID flags:\n");
3854 listflags(names);
3855 qemu_printf("\n");
3856 g_list_free(names);
3857 }
3858
3859 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3860 {
3861 ObjectClass *oc = data;
3862 X86CPUClass *cc = X86_CPU_CLASS(oc);
3863 CpuDefinitionInfoList **cpu_list = user_data;
3864 CpuDefinitionInfoList *entry;
3865 CpuDefinitionInfo *info;
3866
3867 info = g_malloc0(sizeof(*info));
3868 info->name = x86_cpu_class_get_model_name(cc);
3869 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3870 info->has_unavailable_features = true;
3871 info->q_typename = g_strdup(object_class_get_name(oc));
3872 info->migration_safe = cc->migration_safe;
3873 info->has_migration_safe = true;
3874 info->q_static = cc->static_model;
3875
3876 entry = g_malloc0(sizeof(*entry));
3877 entry->value = info;
3878 entry->next = *cpu_list;
3879 *cpu_list = entry;
3880 }
3881
3882 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
3883 {
3884 CpuDefinitionInfoList *cpu_list = NULL;
3885 GSList *list = get_sorted_cpu_model_list();
3886 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3887 g_slist_free(list);
3888 return cpu_list;
3889 }
3890
3891 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3892 bool migratable_only)
3893 {
3894 FeatureWordInfo *wi = &feature_word_info[w];
3895 uint32_t r = 0;
3896
3897 if (kvm_enabled()) {
3898 switch (wi->type) {
3899 case CPUID_FEATURE_WORD:
3900 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3901 wi->cpuid.ecx,
3902 wi->cpuid.reg);
3903 break;
3904 case MSR_FEATURE_WORD:
3905 r = kvm_arch_get_supported_msr_feature(kvm_state,
3906 wi->msr.index);
3907 break;
3908 }
3909 } else if (hvf_enabled()) {
3910 if (wi->type != CPUID_FEATURE_WORD) {
3911 return 0;
3912 }
3913 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3914 wi->cpuid.ecx,
3915 wi->cpuid.reg);
3916 } else if (tcg_enabled()) {
3917 r = wi->tcg_features;
3918 } else {
3919 return ~0;
3920 }
3921 if (migratable_only) {
3922 r &= x86_cpu_get_migratable_flags(w);
3923 }
3924 return r;
3925 }
3926
3927 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3928 {
3929 FeatureWord w;
3930
3931 for (w = 0; w < FEATURE_WORDS; w++) {
3932 report_unavailable_features(w, cpu->filtered_features[w]);
3933 }
3934 }
3935
3936 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3937 {
3938 PropValue *pv;
3939 for (pv = props; pv->prop; pv++) {
3940 if (!pv->value) {
3941 continue;
3942 }
3943 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3944 &error_abort);
3945 }
3946 }
3947
3948 /* Load data from X86CPUDefinition into a X86CPU object
3949 */
3950 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3951 {
3952 CPUX86State *env = &cpu->env;
3953 const char *vendor;
3954 char host_vendor[CPUID_VENDOR_SZ + 1];
3955 FeatureWord w;
3956
3957 /*NOTE: any property set by this function should be returned by
3958 * x86_cpu_static_props(), so static expansion of
3959 * query-cpu-model-expansion is always complete.
3960 */
3961
3962 /* CPU models only set _minimum_ values for level/xlevel: */
3963 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3964 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3965
3966 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3967 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3968 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3969 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3970 for (w = 0; w < FEATURE_WORDS; w++) {
3971 env->features[w] = def->features[w];
3972 }
3973
3974 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3975 cpu->legacy_cache = !def->cache_info;
3976
3977 /* Special cases not set in the X86CPUDefinition structs: */
3978 /* TODO: in-kernel irqchip for hvf */
3979 if (kvm_enabled()) {
3980 if (!kvm_irqchip_in_kernel()) {
3981 x86_cpu_change_kvm_default("x2apic", "off");
3982 }
3983
3984 x86_cpu_apply_props(cpu, kvm_default_props);
3985 } else if (tcg_enabled()) {
3986 x86_cpu_apply_props(cpu, tcg_default_props);
3987 }
3988
3989 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3990
3991 /* sysenter isn't supported in compatibility mode on AMD,
3992 * syscall isn't supported in compatibility mode on Intel.
3993 * Normally we advertise the actual CPU vendor, but you can
3994 * override this using the 'vendor' property if you want to use
3995 * KVM's sysenter/syscall emulation in compatibility mode and
3996 * when doing cross vendor migration
3997 */
3998 vendor = def->vendor;
3999 if (accel_uses_host_cpuid()) {
4000 uint32_t ebx = 0, ecx = 0, edx = 0;
4001 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
4002 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
4003 vendor = host_vendor;
4004 }
4005
4006 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
4007
4008 }
4009
4010 #ifndef CONFIG_USER_ONLY
4011 /* Return a QDict containing keys for all properties that can be included
4012 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
4013 * must be included in the dictionary.
4014 */
4015 static QDict *x86_cpu_static_props(void)
4016 {
4017 FeatureWord w;
4018 int i;
4019 static const char *props[] = {
4020 "min-level",
4021 "min-xlevel",
4022 "family",
4023 "model",
4024 "stepping",
4025 "model-id",
4026 "vendor",
4027 "lmce",
4028 NULL,
4029 };
4030 static QDict *d;
4031
4032 if (d) {
4033 return d;
4034 }
4035
4036 d = qdict_new();
4037 for (i = 0; props[i]; i++) {
4038 qdict_put_null(d, props[i]);
4039 }
4040
4041 for (w = 0; w < FEATURE_WORDS; w++) {
4042 FeatureWordInfo *fi = &feature_word_info[w];
4043 int bit;
4044 for (bit = 0; bit < 32; bit++) {
4045 if (!fi->feat_names[bit]) {
4046 continue;
4047 }
4048 qdict_put_null(d, fi->feat_names[bit]);
4049 }
4050 }
4051
4052 return d;
4053 }
4054
4055 /* Add an entry to @props dict, with the value for property. */
4056 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
4057 {
4058 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
4059 &error_abort);
4060
4061 qdict_put_obj(props, prop, value);
4062 }
4063
4064 /* Convert CPU model data from X86CPU object to a property dictionary
4065 * that can recreate exactly the same CPU model.
4066 */
4067 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
4068 {
4069 QDict *sprops = x86_cpu_static_props();
4070 const QDictEntry *e;
4071
4072 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
4073 const char *prop = qdict_entry_key(e);
4074 x86_cpu_expand_prop(cpu, props, prop);
4075 }
4076 }
4077
4078 /* Convert CPU model data from X86CPU object to a property dictionary
4079 * that can recreate exactly the same CPU model, including every
4080 * writeable QOM property.
4081 */
4082 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
4083 {
4084 ObjectPropertyIterator iter;
4085 ObjectProperty *prop;
4086
4087 object_property_iter_init(&iter, OBJECT(cpu));
4088 while ((prop = object_property_iter_next(&iter))) {
4089 /* skip read-only or write-only properties */
4090 if (!prop->get || !prop->set) {
4091 continue;
4092 }
4093
4094 /* "hotplugged" is the only property that is configurable
4095 * on the command-line but will be set differently on CPUs
4096 * created using "-cpu ... -smp ..." and by CPUs created
4097 * on the fly by x86_cpu_from_model() for querying. Skip it.
4098 */
4099 if (!strcmp(prop->name, "hotplugged")) {
4100 continue;
4101 }
4102 x86_cpu_expand_prop(cpu, props, prop->name);
4103 }
4104 }
4105
4106 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4107 {
4108 const QDictEntry *prop;
4109 Error *err = NULL;
4110
4111 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4112 object_property_set_qobject(obj, qdict_entry_value(prop),
4113 qdict_entry_key(prop), &err);
4114 if (err) {
4115 break;
4116 }
4117 }
4118
4119 error_propagate(errp, err);
4120 }
4121
4122 /* Create X86CPU object according to model+props specification */
4123 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4124 {
4125 X86CPU *xc = NULL;
4126 X86CPUClass *xcc;
4127 Error *err = NULL;
4128
4129 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4130 if (xcc == NULL) {
4131 error_setg(&err, "CPU model '%s' not found", model);
4132 goto out;
4133 }
4134
4135 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4136 if (props) {
4137 object_apply_props(OBJECT(xc), props, &err);
4138 if (err) {
4139 goto out;
4140 }
4141 }
4142
4143 x86_cpu_expand_features(xc, &err);
4144 if (err) {
4145 goto out;
4146 }
4147
4148 out:
4149 if (err) {
4150 error_propagate(errp, err);
4151 object_unref(OBJECT(xc));
4152 xc = NULL;
4153 }
4154 return xc;
4155 }
4156
4157 CpuModelExpansionInfo *
4158 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
4159 CpuModelInfo *model,
4160 Error **errp)
4161 {
4162 X86CPU *xc = NULL;
4163 Error *err = NULL;
4164 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4165 QDict *props = NULL;
4166 const char *base_name;
4167
4168 xc = x86_cpu_from_model(model->name,
4169 model->has_props ?
4170 qobject_to(QDict, model->props) :
4171 NULL, &err);
4172 if (err) {
4173 goto out;
4174 }
4175
4176 props = qdict_new();
4177 ret->model = g_new0(CpuModelInfo, 1);
4178 ret->model->props = QOBJECT(props);
4179 ret->model->has_props = true;
4180
4181 switch (type) {
4182 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4183 /* Static expansion will be based on "base" only */
4184 base_name = "base";
4185 x86_cpu_to_dict(xc, props);
4186 break;
4187 case CPU_MODEL_EXPANSION_TYPE_FULL:
4188 /* As we don't return every single property, full expansion needs
4189 * to keep the original model name+props, and add extra
4190 * properties on top of that.
4191 */
4192 base_name = model->name;
4193 x86_cpu_to_dict_full(xc, props);
4194 break;
4195 default:
4196 error_setg(&err, "Unsupported expansion type");
4197 goto out;
4198 }
4199
4200 x86_cpu_to_dict(xc, props);
4201
4202 ret->model->name = g_strdup(base_name);
4203
4204 out:
4205 object_unref(OBJECT(xc));
4206 if (err) {
4207 error_propagate(errp, err);
4208 qapi_free_CpuModelExpansionInfo(ret);
4209 ret = NULL;
4210 }
4211 return ret;
4212 }
4213 #endif /* !CONFIG_USER_ONLY */
4214
4215 static gchar *x86_gdb_arch_name(CPUState *cs)
4216 {
4217 #ifdef TARGET_X86_64
4218 return g_strdup("i386:x86-64");
4219 #else
4220 return g_strdup("i386");
4221 #endif
4222 }
4223
4224 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4225 {
4226 X86CPUDefinition *cpudef = data;
4227 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4228
4229 xcc->cpu_def = cpudef;
4230 xcc->migration_safe = true;
4231 }
4232
4233 static void x86_register_cpudef_type(X86CPUDefinition *def)
4234 {
4235 char *typename = x86_cpu_type_name(def->name);
4236 TypeInfo ti = {
4237 .name = typename,
4238 .parent = TYPE_X86_CPU,
4239 .class_init = x86_cpu_cpudef_class_init,
4240 .class_data = def,
4241 };
4242
4243 /* AMD aliases are handled at runtime based on CPUID vendor, so
4244 * they shouldn't be set on the CPU model table.
4245 */
4246 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4247 /* catch mistakes instead of silently truncating model_id when too long */
4248 assert(def->model_id && strlen(def->model_id) <= 48);
4249
4250
4251 type_register(&ti);
4252 g_free(typename);
4253 }
4254
4255 #if !defined(CONFIG_USER_ONLY)
4256
4257 void cpu_clear_apic_feature(CPUX86State *env)
4258 {
4259 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4260 }
4261
4262 #endif /* !CONFIG_USER_ONLY */
4263
4264 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4265 uint32_t *eax, uint32_t *ebx,
4266 uint32_t *ecx, uint32_t *edx)
4267 {
4268 X86CPU *cpu = env_archcpu(env);
4269 CPUState *cs = env_cpu(env);
4270 uint32_t pkg_offset;
4271 uint32_t limit;
4272 uint32_t signature[3];
4273
4274 /* Calculate & apply limits for different index ranges */
4275 if (index >= 0xC0000000) {
4276 limit = env->cpuid_xlevel2;
4277 } else if (index >= 0x80000000) {
4278 limit = env->cpuid_xlevel;
4279 } else if (index >= 0x40000000) {
4280 limit = 0x40000001;
4281 } else {
4282 limit = env->cpuid_level;
4283 }
4284
4285 if (index > limit) {
4286 /* Intel documentation states that invalid EAX input will
4287 * return the same information as EAX=cpuid_level
4288 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4289 */
4290 index = env->cpuid_level;
4291 }
4292
4293 switch(index) {
4294 case 0:
4295 *eax = env->cpuid_level;
4296 *ebx = env->cpuid_vendor1;
4297 *edx = env->cpuid_vendor2;
4298 *ecx = env->cpuid_vendor3;
4299 break;
4300 case 1:
4301 *eax = env->cpuid_version;
4302 *ebx = (cpu->apic_id << 24) |
4303 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4304 *ecx = env->features[FEAT_1_ECX];
4305 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4306 *ecx |= CPUID_EXT_OSXSAVE;
4307 }
4308 *edx = env->features[FEAT_1_EDX];
4309 if (cs->nr_cores * cs->nr_threads > 1) {
4310 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4311 *edx |= CPUID_HT;
4312 }
4313 break;
4314 case 2:
4315 /* cache info: needed for Pentium Pro compatibility */
4316 if (cpu->cache_info_passthrough) {
4317 host_cpuid(index, 0, eax, ebx, ecx, edx);
4318 break;
4319 }
4320 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4321 *ebx = 0;
4322 if (!cpu->enable_l3_cache) {
4323 *ecx = 0;
4324 } else {
4325 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4326 }
4327 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4328 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4329 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4330 break;
4331 case 4:
4332 /* cache info: needed for Core compatibility */
4333 if (cpu->cache_info_passthrough) {
4334 host_cpuid(index, count, eax, ebx, ecx, edx);
4335 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4336 *eax &= ~0xFC000000;
4337 if ((*eax & 31) && cs->nr_cores > 1) {
4338 *eax |= (cs->nr_cores - 1) << 26;
4339 }
4340 } else {
4341 *eax = 0;
4342 switch (count) {
4343 case 0: /* L1 dcache info */
4344 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4345 1, cs->nr_cores,
4346 eax, ebx, ecx, edx);
4347 break;
4348 case 1: /* L1 icache info */
4349 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4350 1, cs->nr_cores,
4351 eax, ebx, ecx, edx);
4352 break;
4353 case 2: /* L2 cache info */
4354 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4355 cs->nr_threads, cs->nr_cores,
4356 eax, ebx, ecx, edx);
4357 break;
4358 case 3: /* L3 cache info */
4359 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4360 if (cpu->enable_l3_cache) {
4361 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4362 (1 << pkg_offset), cs->nr_cores,
4363 eax, ebx, ecx, edx);
4364 break;
4365 }
4366 /* fall through */
4367 default: /* end of info */
4368 *eax = *ebx = *ecx = *edx = 0;
4369 break;
4370 }
4371 }
4372 break;
4373 case 5:
4374 /* MONITOR/MWAIT Leaf */
4375 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4376 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4377 *ecx = cpu->mwait.ecx; /* flags */
4378 *edx = cpu->mwait.edx; /* mwait substates */
4379 break;
4380 case 6:
4381 /* Thermal and Power Leaf */
4382 *eax = env->features[FEAT_6_EAX];
4383 *ebx = 0;
4384 *ecx = 0;
4385 *edx = 0;
4386 break;
4387 case 7:
4388 /* Structured Extended Feature Flags Enumeration Leaf */
4389 if (count == 0) {
4390 *eax = 0; /* Maximum ECX value for sub-leaves */
4391 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4392 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4393 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4394 *ecx |= CPUID_7_0_ECX_OSPKE;
4395 }
4396 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4397 } else {
4398 *eax = 0;
4399 *ebx = 0;
4400 *ecx = 0;
4401 *edx = 0;
4402 }
4403 break;
4404 case 9:
4405 /* Direct Cache Access Information Leaf */
4406 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4407 *ebx = 0;
4408 *ecx = 0;
4409 *edx = 0;
4410 break;
4411 case 0xA:
4412 /* Architectural Performance Monitoring Leaf */
4413 if (kvm_enabled() && cpu->enable_pmu) {
4414 KVMState *s = cs->kvm_state;
4415
4416 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4417 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4418 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4419 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4420 } else if (hvf_enabled() && cpu->enable_pmu) {
4421 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4422 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4423 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4424 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4425 } else {
4426 *eax = 0;
4427 *ebx = 0;
4428 *ecx = 0;
4429 *edx = 0;
4430 }
4431 break;
4432 case 0xB:
4433 /* Extended Topology Enumeration Leaf */
4434 if (!cpu->enable_cpuid_0xb) {
4435 *eax = *ebx = *ecx = *edx = 0;
4436 break;
4437 }
4438
4439 *ecx = count & 0xff;
4440 *edx = cpu->apic_id;
4441
4442 switch (count) {
4443 case 0:
4444 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4445 *ebx = cs->nr_threads;
4446 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4447 break;
4448 case 1:
4449 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4450 *ebx = cs->nr_cores * cs->nr_threads;
4451 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4452 break;
4453 default:
4454 *eax = 0;
4455 *ebx = 0;
4456 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4457 }
4458
4459 assert(!(*eax & ~0x1f));
4460 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4461 break;
4462 case 0xD: {
4463 /* Processor Extended State */
4464 *eax = 0;
4465 *ebx = 0;
4466 *ecx = 0;
4467 *edx = 0;
4468 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4469 break;
4470 }
4471
4472 if (count == 0) {
4473 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4474 *eax = env->features[FEAT_XSAVE_COMP_LO];
4475 *edx = env->features[FEAT_XSAVE_COMP_HI];
4476 *ebx = xsave_area_size(env->xcr0);
4477 } else if (count == 1) {
4478 *eax = env->features[FEAT_XSAVE];
4479 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4480 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4481 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4482 *eax = esa->size;
4483 *ebx = esa->offset;
4484 }
4485 }
4486 break;
4487 }
4488 case 0x14: {
4489 /* Intel Processor Trace Enumeration */
4490 *eax = 0;
4491 *ebx = 0;
4492 *ecx = 0;
4493 *edx = 0;
4494 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4495 !kvm_enabled()) {
4496 break;
4497 }
4498
4499 if (count == 0) {
4500 *eax = INTEL_PT_MAX_SUBLEAF;
4501 *ebx = INTEL_PT_MINIMAL_EBX;
4502 *ecx = INTEL_PT_MINIMAL_ECX;
4503 } else if (count == 1) {
4504 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4505 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4506 }
4507 break;
4508 }
4509 case 0x40000000:
4510 /*
4511 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4512 * set here, but we restrict to TCG none the less.
4513 */
4514 if (tcg_enabled() && cpu->expose_tcg) {
4515 memcpy(signature, "TCGTCGTCGTCG", 12);
4516 *eax = 0x40000001;
4517 *ebx = signature[0];
4518 *ecx = signature[1];
4519 *edx = signature[2];
4520 } else {
4521 *eax = 0;
4522 *ebx = 0;
4523 *ecx = 0;
4524 *edx = 0;
4525 }
4526 break;
4527 case 0x40000001:
4528 *eax = 0;
4529 *ebx = 0;
4530 *ecx = 0;
4531 *edx = 0;
4532 break;
4533 case 0x80000000:
4534 *eax = env->cpuid_xlevel;
4535 *ebx = env->cpuid_vendor1;
4536 *edx = env->cpuid_vendor2;
4537 *ecx = env->cpuid_vendor3;
4538 break;
4539 case 0x80000001:
4540 *eax = env->cpuid_version;
4541 *ebx = 0;
4542 *ecx = env->features[FEAT_8000_0001_ECX];
4543 *edx = env->features[FEAT_8000_0001_EDX];
4544
4545 /* The Linux kernel checks for the CMPLegacy bit and
4546 * discards multiple thread information if it is set.
4547 * So don't set it here for Intel to make Linux guests happy.
4548 */
4549 if (cs->nr_cores * cs->nr_threads > 1) {
4550 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4551 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4552 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4553 *ecx |= 1 << 1; /* CmpLegacy bit */
4554 }
4555 }
4556 break;
4557 case 0x80000002:
4558 case 0x80000003:
4559 case 0x80000004:
4560 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4561 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4562 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4563 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4564 break;
4565 case 0x80000005:
4566 /* cache info (L1 cache) */
4567 if (cpu->cache_info_passthrough) {
4568 host_cpuid(index, 0, eax, ebx, ecx, edx);
4569 break;
4570 }
4571 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4572 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4573 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4574 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4575 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4576 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4577 break;
4578 case 0x80000006:
4579 /* cache info (L2 cache) */
4580 if (cpu->cache_info_passthrough) {
4581 host_cpuid(index, 0, eax, ebx, ecx, edx);
4582 break;
4583 }
4584 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4585 (L2_DTLB_2M_ENTRIES << 16) | \
4586 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4587 (L2_ITLB_2M_ENTRIES);
4588 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4589 (L2_DTLB_4K_ENTRIES << 16) | \
4590 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4591 (L2_ITLB_4K_ENTRIES);
4592 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4593 cpu->enable_l3_cache ?
4594 env->cache_info_amd.l3_cache : NULL,
4595 ecx, edx);
4596 break;
4597 case 0x80000007:
4598 *eax = 0;
4599 *ebx = 0;
4600 *ecx = 0;
4601 *edx = env->features[FEAT_8000_0007_EDX];
4602 break;
4603 case 0x80000008:
4604 /* virtual & phys address size in low 2 bytes. */
4605 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4606 /* 64 bit processor */
4607 *eax = cpu->phys_bits; /* configurable physical bits */
4608 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4609 *eax |= 0x00003900; /* 57 bits virtual */
4610 } else {
4611 *eax |= 0x00003000; /* 48 bits virtual */
4612 }
4613 } else {
4614 *eax = cpu->phys_bits;
4615 }
4616 *ebx = env->features[FEAT_8000_0008_EBX];
4617 *ecx = 0;
4618 *edx = 0;
4619 if (cs->nr_cores * cs->nr_threads > 1) {
4620 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4621 }
4622 break;
4623 case 0x8000000A:
4624 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4625 *eax = 0x00000001; /* SVM Revision */
4626 *ebx = 0x00000010; /* nr of ASIDs */
4627 *ecx = 0;
4628 *edx = env->features[FEAT_SVM]; /* optional features */
4629 } else {
4630 *eax = 0;
4631 *ebx = 0;
4632 *ecx = 0;
4633 *edx = 0;
4634 }
4635 break;
4636 case 0x8000001D:
4637 *eax = 0;
4638 if (cpu->cache_info_passthrough) {
4639 host_cpuid(index, count, eax, ebx, ecx, edx);
4640 break;
4641 }
4642 switch (count) {
4643 case 0: /* L1 dcache info */
4644 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4645 eax, ebx, ecx, edx);
4646 break;
4647 case 1: /* L1 icache info */
4648 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4649 eax, ebx, ecx, edx);
4650 break;
4651 case 2: /* L2 cache info */
4652 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4653 eax, ebx, ecx, edx);
4654 break;
4655 case 3: /* L3 cache info */
4656 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4657 eax, ebx, ecx, edx);
4658 break;
4659 default: /* end of info */
4660 *eax = *ebx = *ecx = *edx = 0;
4661 break;
4662 }
4663 break;
4664 case 0x8000001E:
4665 assert(cpu->core_id <= 255);
4666 encode_topo_cpuid8000001e(cs, cpu,
4667 eax, ebx, ecx, edx);
4668 break;
4669 case 0xC0000000:
4670 *eax = env->cpuid_xlevel2;
4671 *ebx = 0;
4672 *ecx = 0;
4673 *edx = 0;
4674 break;
4675 case 0xC0000001:
4676 /* Support for VIA CPU's CPUID instruction */
4677 *eax = env->cpuid_version;
4678 *ebx = 0;
4679 *ecx = 0;
4680 *edx = env->features[FEAT_C000_0001_EDX];
4681 break;
4682 case 0xC0000002:
4683 case 0xC0000003:
4684 case 0xC0000004:
4685 /* Reserved for the future, and now filled with zero */
4686 *eax = 0;
4687 *ebx = 0;
4688 *ecx = 0;
4689 *edx = 0;
4690 break;
4691 case 0x8000001F:
4692 *eax = sev_enabled() ? 0x2 : 0;
4693 *ebx = sev_get_cbit_position();
4694 *ebx |= sev_get_reduced_phys_bits() << 6;
4695 *ecx = 0;
4696 *edx = 0;
4697 break;
4698 default:
4699 /* reserved values: zero */
4700 *eax = 0;
4701 *ebx = 0;
4702 *ecx = 0;
4703 *edx = 0;
4704 break;
4705 }
4706 }
4707
4708 /* CPUClass::reset() */
4709 static void x86_cpu_reset(CPUState *s)
4710 {
4711 X86CPU *cpu = X86_CPU(s);
4712 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4713 CPUX86State *env = &cpu->env;
4714 target_ulong cr4;
4715 uint64_t xcr0;
4716 int i;
4717
4718 xcc->parent_reset(s);
4719
4720 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4721
4722 env->old_exception = -1;
4723
4724 /* init to reset state */
4725
4726 env->hflags2 |= HF2_GIF_MASK;
4727
4728 cpu_x86_update_cr0(env, 0x60000010);
4729 env->a20_mask = ~0x0;
4730 env->smbase = 0x30000;
4731 env->msr_smi_count = 0;
4732
4733 env->idt.limit = 0xffff;
4734 env->gdt.limit = 0xffff;
4735 env->ldt.limit = 0xffff;
4736 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4737 env->tr.limit = 0xffff;
4738 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4739
4740 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4741 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4742 DESC_R_MASK | DESC_A_MASK);
4743 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4744 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4745 DESC_A_MASK);
4746 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4747 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4748 DESC_A_MASK);
4749 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4750 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4751 DESC_A_MASK);
4752 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4753 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4754 DESC_A_MASK);
4755 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4756 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4757 DESC_A_MASK);
4758
4759 env->eip = 0xfff0;
4760 env->regs[R_EDX] = env->cpuid_version;
4761
4762 env->eflags = 0x2;
4763
4764 /* FPU init */
4765 for (i = 0; i < 8; i++) {
4766 env->fptags[i] = 1;
4767 }
4768 cpu_set_fpuc(env, 0x37f);
4769
4770 env->mxcsr = 0x1f80;
4771 /* All units are in INIT state. */
4772 env->xstate_bv = 0;
4773
4774 env->pat = 0x0007040600070406ULL;
4775 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4776 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
4777 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
4778 }
4779
4780 memset(env->dr, 0, sizeof(env->dr));
4781 env->dr[6] = DR6_FIXED_1;
4782 env->dr[7] = DR7_FIXED_1;
4783 cpu_breakpoint_remove_all(s, BP_CPU);
4784 cpu_watchpoint_remove_all(s, BP_CPU);
4785
4786 cr4 = 0;
4787 xcr0 = XSTATE_FP_MASK;
4788
4789 #ifdef CONFIG_USER_ONLY
4790 /* Enable all the features for user-mode. */
4791 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4792 xcr0 |= XSTATE_SSE_MASK;
4793 }
4794 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4795 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4796 if (env->features[esa->feature] & esa->bits) {
4797 xcr0 |= 1ull << i;
4798 }
4799 }
4800
4801 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4802 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4803 }
4804 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4805 cr4 |= CR4_FSGSBASE_MASK;
4806 }
4807 #endif
4808
4809 env->xcr0 = xcr0;
4810 cpu_x86_update_cr4(env, cr4);
4811
4812 /*
4813 * SDM 11.11.5 requires:
4814 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4815 * - IA32_MTRR_PHYSMASKn.V = 0
4816 * All other bits are undefined. For simplification, zero it all.
4817 */
4818 env->mtrr_deftype = 0;
4819 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4820 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4821
4822 env->interrupt_injected = -1;
4823 env->exception_nr = -1;
4824 env->exception_pending = 0;
4825 env->exception_injected = 0;
4826 env->exception_has_payload = false;
4827 env->exception_payload = 0;
4828 env->nmi_injected = false;
4829 #if !defined(CONFIG_USER_ONLY)
4830 /* We hard-wire the BSP to the first CPU. */
4831 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4832
4833 s->halted = !cpu_is_bsp(cpu);
4834
4835 if (kvm_enabled()) {
4836 kvm_arch_reset_vcpu(cpu);
4837 }
4838 else if (hvf_enabled()) {
4839 hvf_reset_vcpu(s);
4840 }
4841 #endif
4842 }
4843
4844 #ifndef CONFIG_USER_ONLY
4845 bool cpu_is_bsp(X86CPU *cpu)
4846 {
4847 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4848 }
4849
4850 /* TODO: remove me, when reset over QOM tree is implemented */
4851 static void x86_cpu_machine_reset_cb(void *opaque)
4852 {
4853 X86CPU *cpu = opaque;
4854 cpu_reset(CPU(cpu));
4855 }
4856 #endif
4857
4858 static void mce_init(X86CPU *cpu)
4859 {
4860 CPUX86State *cenv = &cpu->env;
4861 unsigned int bank;
4862
4863 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4864 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4865 (CPUID_MCE | CPUID_MCA)) {
4866 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4867 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4868 cenv->mcg_ctl = ~(uint64_t)0;
4869 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4870 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4871 }
4872 }
4873 }
4874
4875 #ifndef CONFIG_USER_ONLY
4876 APICCommonClass *apic_get_class(void)
4877 {
4878 const char *apic_type = "apic";
4879
4880 /* TODO: in-kernel irqchip for hvf */
4881 if (kvm_apic_in_kernel()) {
4882 apic_type = "kvm-apic";
4883 } else if (xen_enabled()) {
4884 apic_type = "xen-apic";
4885 }
4886
4887 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4888 }
4889
4890 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4891 {
4892 APICCommonState *apic;
4893 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4894
4895 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4896
4897 object_property_add_child(OBJECT(cpu), "lapic",
4898 OBJECT(cpu->apic_state), &error_abort);
4899 object_unref(OBJECT(cpu->apic_state));
4900
4901 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4902 /* TODO: convert to link<> */
4903 apic = APIC_COMMON(cpu->apic_state);
4904 apic->cpu = cpu;
4905 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4906 }
4907
4908 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4909 {
4910 APICCommonState *apic;
4911 static bool apic_mmio_map_once;
4912
4913 if (cpu->apic_state == NULL) {
4914 return;
4915 }
4916 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4917 errp);
4918
4919 /* Map APIC MMIO area */
4920 apic = APIC_COMMON(cpu->apic_state);
4921 if (!apic_mmio_map_once) {
4922 memory_region_add_subregion_overlap(get_system_memory(),
4923 apic->apicbase &
4924 MSR_IA32_APICBASE_BASE,
4925 &apic->io_memory,
4926 0x1000);
4927 apic_mmio_map_once = true;
4928 }
4929 }
4930
4931 static void x86_cpu_machine_done(Notifier *n, void *unused)
4932 {
4933 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4934 MemoryRegion *smram =
4935 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4936
4937 if (smram) {
4938 cpu->smram = g_new(MemoryRegion, 1);
4939 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4940 smram, 0, 1ull << 32);
4941 memory_region_set_enabled(cpu->smram, true);
4942 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4943 }
4944 }
4945 #else
4946 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4947 {
4948 }
4949 #endif
4950
4951 /* Note: Only safe for use on x86(-64) hosts */
4952 static uint32_t x86_host_phys_bits(void)
4953 {
4954 uint32_t eax;
4955 uint32_t host_phys_bits;
4956
4957 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4958 if (eax >= 0x80000008) {
4959 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4960 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4961 * at 23:16 that can specify a maximum physical address bits for
4962 * the guest that can override this value; but I've not seen
4963 * anything with that set.
4964 */
4965 host_phys_bits = eax & 0xff;
4966 } else {
4967 /* It's an odd 64 bit machine that doesn't have the leaf for
4968 * physical address bits; fall back to 36 that's most older
4969 * Intel.
4970 */
4971 host_phys_bits = 36;
4972 }
4973
4974 return host_phys_bits;
4975 }
4976
4977 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4978 {
4979 if (*min < value) {
4980 *min = value;
4981 }
4982 }
4983
4984 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4985 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4986 {
4987 CPUX86State *env = &cpu->env;
4988 FeatureWordInfo *fi = &feature_word_info[w];
4989 uint32_t eax = fi->cpuid.eax;
4990 uint32_t region = eax & 0xF0000000;
4991
4992 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
4993 if (!env->features[w]) {
4994 return;
4995 }
4996
4997 switch (region) {
4998 case 0x00000000:
4999 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
5000 break;
5001 case 0x80000000:
5002 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
5003 break;
5004 case 0xC0000000:
5005 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
5006 break;
5007 }
5008 }
5009
5010 /* Calculate XSAVE components based on the configured CPU feature flags */
5011 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
5012 {
5013 CPUX86State *env = &cpu->env;
5014 int i;
5015 uint64_t mask;
5016
5017 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
5018 return;
5019 }
5020
5021 mask = 0;
5022 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
5023 const ExtSaveArea *esa = &x86_ext_save_areas[i];
5024 if (env->features[esa->feature] & esa->bits) {
5025 mask |= (1ULL << i);
5026 }
5027 }
5028
5029 env->features[FEAT_XSAVE_COMP_LO] = mask;
5030 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
5031 }
5032
5033 /***** Steps involved on loading and filtering CPUID data
5034 *
5035 * When initializing and realizing a CPU object, the steps
5036 * involved in setting up CPUID data are:
5037 *
5038 * 1) Loading CPU model definition (X86CPUDefinition). This is
5039 * implemented by x86_cpu_load_def() and should be completely
5040 * transparent, as it is done automatically by instance_init.
5041 * No code should need to look at X86CPUDefinition structs
5042 * outside instance_init.
5043 *
5044 * 2) CPU expansion. This is done by realize before CPUID
5045 * filtering, and will make sure host/accelerator data is
5046 * loaded for CPU models that depend on host capabilities
5047 * (e.g. "host"). Done by x86_cpu_expand_features().
5048 *
5049 * 3) CPUID filtering. This initializes extra data related to
5050 * CPUID, and checks if the host supports all capabilities
5051 * required by the CPU. Runnability of a CPU model is
5052 * determined at this step. Done by x86_cpu_filter_features().
5053 *
5054 * Some operations don't require all steps to be performed.
5055 * More precisely:
5056 *
5057 * - CPU instance creation (instance_init) will run only CPU
5058 * model loading. CPU expansion can't run at instance_init-time
5059 * because host/accelerator data may be not available yet.
5060 * - CPU realization will perform both CPU model expansion and CPUID
5061 * filtering, and return an error in case one of them fails.
5062 * - query-cpu-definitions needs to run all 3 steps. It needs
5063 * to run CPUID filtering, as the 'unavailable-features'
5064 * field is set based on the filtering results.
5065 * - The query-cpu-model-expansion QMP command only needs to run
5066 * CPU model loading and CPU expansion. It should not filter
5067 * any CPUID data based on host capabilities.
5068 */
5069
5070 /* Expand CPU configuration data, based on configured features
5071 * and host/accelerator capabilities when appropriate.
5072 */
5073 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
5074 {
5075 CPUX86State *env = &cpu->env;
5076 FeatureWord w;
5077 GList *l;
5078 Error *local_err = NULL;
5079
5080 /*TODO: Now cpu->max_features doesn't overwrite features
5081 * set using QOM properties, and we can convert
5082 * plus_features & minus_features to global properties
5083 * inside x86_cpu_parse_featurestr() too.
5084 */
5085 if (cpu->max_features) {
5086 for (w = 0; w < FEATURE_WORDS; w++) {
5087 /* Override only features that weren't set explicitly
5088 * by the user.
5089 */
5090 env->features[w] |=
5091 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
5092 ~env->user_features[w] & \
5093 ~feature_word_info[w].no_autoenable_flags;
5094 }
5095 }
5096
5097 for (l = plus_features; l; l = l->next) {
5098 const char *prop = l->data;
5099 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
5100 if (local_err) {
5101 goto out;
5102 }
5103 }
5104
5105 for (l = minus_features; l; l = l->next) {
5106 const char *prop = l->data;
5107 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5108 if (local_err) {
5109 goto out;
5110 }
5111 }
5112
5113 if (!kvm_enabled() || !cpu->expose_kvm) {
5114 env->features[FEAT_KVM] = 0;
5115 }
5116
5117 x86_cpu_enable_xsave_components(cpu);
5118
5119 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5120 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5121 if (cpu->full_cpuid_auto_level) {
5122 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5123 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5124 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5125 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5126 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5127 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5128 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5129 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5130 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5131 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5132 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5133
5134 /* Intel Processor Trace requires CPUID[0x14] */
5135 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5136 kvm_enabled() && cpu->intel_pt_auto_level) {
5137 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5138 }
5139
5140 /* SVM requires CPUID[0x8000000A] */
5141 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5142 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5143 }
5144
5145 /* SEV requires CPUID[0x8000001F] */
5146 if (sev_enabled()) {
5147 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5148 }
5149 }
5150
5151 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5152 if (env->cpuid_level == UINT32_MAX) {
5153 env->cpuid_level = env->cpuid_min_level;
5154 }
5155 if (env->cpuid_xlevel == UINT32_MAX) {
5156 env->cpuid_xlevel = env->cpuid_min_xlevel;
5157 }
5158 if (env->cpuid_xlevel2 == UINT32_MAX) {
5159 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5160 }
5161
5162 out:
5163 if (local_err != NULL) {
5164 error_propagate(errp, local_err);
5165 }
5166 }
5167
5168 /*
5169 * Finishes initialization of CPUID data, filters CPU feature
5170 * words based on host availability of each feature.
5171 *
5172 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5173 */
5174 static int x86_cpu_filter_features(X86CPU *cpu)
5175 {
5176 CPUX86State *env = &cpu->env;
5177 FeatureWord w;
5178 int rv = 0;
5179
5180 for (w = 0; w < FEATURE_WORDS; w++) {
5181 uint32_t host_feat =
5182 x86_cpu_get_supported_feature_word(w, false);
5183 uint32_t requested_features = env->features[w];
5184 env->features[w] &= host_feat;
5185 cpu->filtered_features[w] = requested_features & ~env->features[w];
5186 if (cpu->filtered_features[w]) {
5187 rv = 1;
5188 }
5189 }
5190
5191 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5192 kvm_enabled()) {
5193 KVMState *s = CPU(cpu)->kvm_state;
5194 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5195 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5196 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5197 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5198 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5199
5200 if (!eax_0 ||
5201 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5202 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5203 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5204 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5205 INTEL_PT_ADDR_RANGES_NUM) ||
5206 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5207 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5208 (ecx_0 & INTEL_PT_IP_LIP)) {
5209 /*
5210 * Processor Trace capabilities aren't configurable, so if the
5211 * host can't emulate the capabilities we report on
5212 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5213 */
5214 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5215 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5216 rv = 1;
5217 }
5218 }
5219
5220 return rv;
5221 }
5222
5223 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5224 {
5225 CPUState *cs = CPU(dev);
5226 X86CPU *cpu = X86_CPU(dev);
5227 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5228 CPUX86State *env = &cpu->env;
5229 Error *local_err = NULL;
5230 static bool ht_warned;
5231
5232 if (xcc->host_cpuid_required) {
5233 if (!accel_uses_host_cpuid()) {
5234 char *name = x86_cpu_class_get_model_name(xcc);
5235 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5236 g_free(name);
5237 goto out;
5238 }
5239
5240 if (enable_cpu_pm) {
5241 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5242 &cpu->mwait.ecx, &cpu->mwait.edx);
5243 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5244 }
5245 }
5246
5247 /* mwait extended info: needed for Core compatibility */
5248 /* We always wake on interrupt even if host does not have the capability */
5249 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5250
5251 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5252 error_setg(errp, "apic-id property was not initialized properly");
5253 return;
5254 }
5255
5256 x86_cpu_expand_features(cpu, &local_err);
5257 if (local_err) {
5258 goto out;
5259 }
5260
5261 if (x86_cpu_filter_features(cpu) &&
5262 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5263 x86_cpu_report_filtered_features(cpu);
5264 if (cpu->enforce_cpuid) {
5265 error_setg(&local_err,
5266 accel_uses_host_cpuid() ?
5267 "Host doesn't support requested features" :
5268 "TCG doesn't support requested features");
5269 goto out;
5270 }
5271 }
5272
5273 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5274 * CPUID[1].EDX.
5275 */
5276 if (IS_AMD_CPU(env)) {
5277 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5278 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5279 & CPUID_EXT2_AMD_ALIASES);
5280 }
5281
5282 /* For 64bit systems think about the number of physical bits to present.
5283 * ideally this should be the same as the host; anything other than matching
5284 * the host can cause incorrect guest behaviour.
5285 * QEMU used to pick the magic value of 40 bits that corresponds to
5286 * consumer AMD devices but nothing else.
5287 */
5288 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5289 if (accel_uses_host_cpuid()) {
5290 uint32_t host_phys_bits = x86_host_phys_bits();
5291 static bool warned;
5292
5293 if (cpu->host_phys_bits) {
5294 /* The user asked for us to use the host physical bits */
5295 cpu->phys_bits = host_phys_bits;
5296 if (cpu->host_phys_bits_limit &&
5297 cpu->phys_bits > cpu->host_phys_bits_limit) {
5298 cpu->phys_bits = cpu->host_phys_bits_limit;
5299 }
5300 }
5301
5302 /* Print a warning if the user set it to a value that's not the
5303 * host value.
5304 */
5305 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5306 !warned) {
5307 warn_report("Host physical bits (%u)"
5308 " does not match phys-bits property (%u)",
5309 host_phys_bits, cpu->phys_bits);
5310 warned = true;
5311 }
5312
5313 if (cpu->phys_bits &&
5314 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5315 cpu->phys_bits < 32)) {
5316 error_setg(errp, "phys-bits should be between 32 and %u "
5317 " (but is %u)",
5318 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5319 return;
5320 }
5321 } else {
5322 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5323 error_setg(errp, "TCG only supports phys-bits=%u",
5324 TCG_PHYS_ADDR_BITS);
5325 return;
5326 }
5327 }
5328 /* 0 means it was not explicitly set by the user (or by machine
5329 * compat_props or by the host code above). In this case, the default
5330 * is the value used by TCG (40).
5331 */
5332 if (cpu->phys_bits == 0) {
5333 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5334 }
5335 } else {
5336 /* For 32 bit systems don't use the user set value, but keep
5337 * phys_bits consistent with what we tell the guest.
5338 */
5339 if (cpu->phys_bits != 0) {
5340 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5341 return;
5342 }
5343
5344 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5345 cpu->phys_bits = 36;
5346 } else {
5347 cpu->phys_bits = 32;
5348 }
5349 }
5350
5351 /* Cache information initialization */
5352 if (!cpu->legacy_cache) {
5353 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5354 char *name = x86_cpu_class_get_model_name(xcc);
5355 error_setg(errp,
5356 "CPU model '%s' doesn't support legacy-cache=off", name);
5357 g_free(name);
5358 return;
5359 }
5360 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5361 *xcc->cpu_def->cache_info;
5362 } else {
5363 /* Build legacy cache information */
5364 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5365 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5366 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5367 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5368
5369 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5370 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5371 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5372 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5373
5374 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5375 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5376 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5377 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5378 }
5379
5380
5381 cpu_exec_realizefn(cs, &local_err);
5382 if (local_err != NULL) {
5383 error_propagate(errp, local_err);
5384 return;
5385 }
5386
5387 #ifndef CONFIG_USER_ONLY
5388 MachineState *ms = MACHINE(qdev_get_machine());
5389 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5390
5391 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
5392 x86_cpu_apic_create(cpu, &local_err);
5393 if (local_err != NULL) {
5394 goto out;
5395 }
5396 }
5397 #endif
5398
5399 mce_init(cpu);
5400
5401 #ifndef CONFIG_USER_ONLY
5402 if (tcg_enabled()) {
5403 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5404 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5405
5406 /* Outer container... */
5407 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5408 memory_region_set_enabled(cpu->cpu_as_root, true);
5409
5410 /* ... with two regions inside: normal system memory with low
5411 * priority, and...
5412 */
5413 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5414 get_system_memory(), 0, ~0ull);
5415 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5416 memory_region_set_enabled(cpu->cpu_as_mem, true);
5417
5418 cs->num_ases = 2;
5419 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5420 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5421
5422 /* ... SMRAM with higher priority, linked from /machine/smram. */
5423 cpu->machine_done.notify = x86_cpu_machine_done;
5424 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5425 }
5426 #endif
5427
5428 qemu_init_vcpu(cs);
5429
5430 /*
5431 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5432 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5433 * based on inputs (sockets,cores,threads), it is still better to give
5434 * users a warning.
5435 *
5436 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5437 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5438 */
5439 if (IS_AMD_CPU(env) &&
5440 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5441 cs->nr_threads > 1 && !ht_warned) {
5442 warn_report("This family of AMD CPU doesn't support "
5443 "hyperthreading(%d)",
5444 cs->nr_threads);
5445 error_printf("Please configure -smp options properly"
5446 " or try enabling topoext feature.\n");
5447 ht_warned = true;
5448 }
5449
5450 x86_cpu_apic_realize(cpu, &local_err);
5451 if (local_err != NULL) {
5452 goto out;
5453 }
5454 cpu_reset(cs);
5455
5456 xcc->parent_realize(dev, &local_err);
5457
5458 out:
5459 if (local_err != NULL) {
5460 error_propagate(errp, local_err);
5461 return;
5462 }
5463 }
5464
5465 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5466 {
5467 X86CPU *cpu = X86_CPU(dev);
5468 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5469 Error *local_err = NULL;
5470
5471 #ifndef CONFIG_USER_ONLY
5472 cpu_remove_sync(CPU(dev));
5473 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5474 #endif
5475
5476 if (cpu->apic_state) {
5477 object_unparent(OBJECT(cpu->apic_state));
5478 cpu->apic_state = NULL;
5479 }
5480
5481 xcc->parent_unrealize(dev, &local_err);
5482 if (local_err != NULL) {
5483 error_propagate(errp, local_err);
5484 return;
5485 }
5486 }
5487
5488 typedef struct BitProperty {
5489 FeatureWord w;
5490 uint32_t mask;
5491 } BitProperty;
5492
5493 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5494 void *opaque, Error **errp)
5495 {
5496 X86CPU *cpu = X86_CPU(obj);
5497 BitProperty *fp = opaque;
5498 uint32_t f = cpu->env.features[fp->w];
5499 bool value = (f & fp->mask) == fp->mask;
5500 visit_type_bool(v, name, &value, errp);
5501 }
5502
5503 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5504 void *opaque, Error **errp)
5505 {
5506 DeviceState *dev = DEVICE(obj);
5507 X86CPU *cpu = X86_CPU(obj);
5508 BitProperty *fp = opaque;
5509 Error *local_err = NULL;
5510 bool value;
5511
5512 if (dev->realized) {
5513 qdev_prop_set_after_realize(dev, name, errp);
5514 return;
5515 }
5516
5517 visit_type_bool(v, name, &value, &local_err);
5518 if (local_err) {
5519 error_propagate(errp, local_err);
5520 return;
5521 }
5522
5523 if (value) {
5524 cpu->env.features[fp->w] |= fp->mask;
5525 } else {
5526 cpu->env.features[fp->w] &= ~fp->mask;
5527 }
5528 cpu->env.user_features[fp->w] |= fp->mask;
5529 }
5530
5531 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5532 void *opaque)
5533 {
5534 BitProperty *prop = opaque;
5535 g_free(prop);
5536 }
5537
5538 /* Register a boolean property to get/set a single bit in a uint32_t field.
5539 *
5540 * The same property name can be registered multiple times to make it affect
5541 * multiple bits in the same FeatureWord. In that case, the getter will return
5542 * true only if all bits are set.
5543 */
5544 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5545 const char *prop_name,
5546 FeatureWord w,
5547 int bitnr)
5548 {
5549 BitProperty *fp;
5550 ObjectProperty *op;
5551 uint32_t mask = (1UL << bitnr);
5552
5553 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5554 if (op) {
5555 fp = op->opaque;
5556 assert(fp->w == w);
5557 fp->mask |= mask;
5558 } else {
5559 fp = g_new0(BitProperty, 1);
5560 fp->w = w;
5561 fp->mask = mask;
5562 object_property_add(OBJECT(cpu), prop_name, "bool",
5563 x86_cpu_get_bit_prop,
5564 x86_cpu_set_bit_prop,
5565 x86_cpu_release_bit_prop, fp, &error_abort);
5566 }
5567 }
5568
5569 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5570 FeatureWord w,
5571 int bitnr)
5572 {
5573 FeatureWordInfo *fi = &feature_word_info[w];
5574 const char *name = fi->feat_names[bitnr];
5575
5576 if (!name) {
5577 return;
5578 }
5579
5580 /* Property names should use "-" instead of "_".
5581 * Old names containing underscores are registered as aliases
5582 * using object_property_add_alias()
5583 */
5584 assert(!strchr(name, '_'));
5585 /* aliases don't use "|" delimiters anymore, they are registered
5586 * manually using object_property_add_alias() */
5587 assert(!strchr(name, '|'));
5588 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5589 }
5590
5591 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5592 {
5593 X86CPU *cpu = X86_CPU(cs);
5594 CPUX86State *env = &cpu->env;
5595 GuestPanicInformation *panic_info = NULL;
5596
5597 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5598 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5599
5600 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5601
5602 assert(HV_CRASH_PARAMS >= 5);
5603 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5604 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5605 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5606 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5607 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5608 }
5609
5610 return panic_info;
5611 }
5612 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5613 const char *name, void *opaque,
5614 Error **errp)
5615 {
5616 CPUState *cs = CPU(obj);
5617 GuestPanicInformation *panic_info;
5618
5619 if (!cs->crash_occurred) {
5620 error_setg(errp, "No crash occured");
5621 return;
5622 }
5623
5624 panic_info = x86_cpu_get_crash_info(cs);
5625 if (panic_info == NULL) {
5626 error_setg(errp, "No crash information");
5627 return;
5628 }
5629
5630 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5631 errp);
5632 qapi_free_GuestPanicInformation(panic_info);
5633 }
5634
5635 static void x86_cpu_initfn(Object *obj)
5636 {
5637 X86CPU *cpu = X86_CPU(obj);
5638 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5639 CPUX86State *env = &cpu->env;
5640 FeatureWord w;
5641
5642 cpu_set_cpustate_pointers(cpu);
5643
5644 object_property_add(obj, "family", "int",
5645 x86_cpuid_version_get_family,
5646 x86_cpuid_version_set_family, NULL, NULL, NULL);
5647 object_property_add(obj, "model", "int",
5648 x86_cpuid_version_get_model,
5649 x86_cpuid_version_set_model, NULL, NULL, NULL);
5650 object_property_add(obj, "stepping", "int",
5651 x86_cpuid_version_get_stepping,
5652 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5653 object_property_add_str(obj, "vendor",
5654 x86_cpuid_get_vendor,
5655 x86_cpuid_set_vendor, NULL);
5656 object_property_add_str(obj, "model-id",
5657 x86_cpuid_get_model_id,
5658 x86_cpuid_set_model_id, NULL);
5659 object_property_add(obj, "tsc-frequency", "int",
5660 x86_cpuid_get_tsc_freq,
5661 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5662 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5663 x86_cpu_get_feature_words,
5664 NULL, NULL, (void *)env->features, NULL);
5665 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5666 x86_cpu_get_feature_words,
5667 NULL, NULL, (void *)cpu->filtered_features, NULL);
5668 /*
5669 * The "unavailable-features" property has the same semantics as
5670 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
5671 * QMP command: they list the features that would have prevented the
5672 * CPU from running if the "enforce" flag was set.
5673 */
5674 object_property_add(obj, "unavailable-features", "strList",
5675 x86_cpu_get_unavailable_features,
5676 NULL, NULL, NULL, &error_abort);
5677
5678 object_property_add(obj, "crash-information", "GuestPanicInformation",
5679 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5680
5681 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5682
5683 for (w = 0; w < FEATURE_WORDS; w++) {
5684 int bitnr;
5685
5686 for (bitnr = 0; bitnr < 32; bitnr++) {
5687 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5688 }
5689 }
5690
5691 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5692 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5693 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5694 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5695 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5696 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5697 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5698
5699 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5700 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5701 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5702 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5703 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5704 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5705 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5706 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5707 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5708 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5709 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5710 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5711 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5712 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5713 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5714 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5715 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5716 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5717 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5718 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5719 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5720
5721 if (xcc->cpu_def) {
5722 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5723 }
5724 }
5725
5726 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5727 {
5728 X86CPU *cpu = X86_CPU(cs);
5729
5730 return cpu->apic_id;
5731 }
5732
5733 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5734 {
5735 X86CPU *cpu = X86_CPU(cs);
5736
5737 return cpu->env.cr[0] & CR0_PG_MASK;
5738 }
5739
5740 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5741 {
5742 X86CPU *cpu = X86_CPU(cs);
5743
5744 cpu->env.eip = value;
5745 }
5746
5747 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5748 {
5749 X86CPU *cpu = X86_CPU(cs);
5750
5751 cpu->env.eip = tb->pc - tb->cs_base;
5752 }
5753
5754 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5755 {
5756 X86CPU *cpu = X86_CPU(cs);
5757 CPUX86State *env = &cpu->env;
5758
5759 #if !defined(CONFIG_USER_ONLY)
5760 if (interrupt_request & CPU_INTERRUPT_POLL) {
5761 return CPU_INTERRUPT_POLL;
5762 }
5763 #endif
5764 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5765 return CPU_INTERRUPT_SIPI;
5766 }
5767
5768 if (env->hflags2 & HF2_GIF_MASK) {
5769 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5770 !(env->hflags & HF_SMM_MASK)) {
5771 return CPU_INTERRUPT_SMI;
5772 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5773 !(env->hflags2 & HF2_NMI_MASK)) {
5774 return CPU_INTERRUPT_NMI;
5775 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5776 return CPU_INTERRUPT_MCE;
5777 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5778 (((env->hflags2 & HF2_VINTR_MASK) &&
5779 (env->hflags2 & HF2_HIF_MASK)) ||
5780 (!(env->hflags2 & HF2_VINTR_MASK) &&
5781 (env->eflags & IF_MASK &&
5782 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5783 return CPU_INTERRUPT_HARD;
5784 #if !defined(CONFIG_USER_ONLY)
5785 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5786 (env->eflags & IF_MASK) &&
5787 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5788 return CPU_INTERRUPT_VIRQ;
5789 #endif
5790 }
5791 }
5792
5793 return 0;
5794 }
5795
5796 static bool x86_cpu_has_work(CPUState *cs)
5797 {
5798 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5799 }
5800
5801 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5802 {
5803 X86CPU *cpu = X86_CPU(cs);
5804 CPUX86State *env = &cpu->env;
5805
5806 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5807 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5808 : bfd_mach_i386_i8086);
5809 info->print_insn = print_insn_i386;
5810
5811 info->cap_arch = CS_ARCH_X86;
5812 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5813 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5814 : CS_MODE_16);
5815 info->cap_insn_unit = 1;
5816 info->cap_insn_split = 8;
5817 }
5818
5819 void x86_update_hflags(CPUX86State *env)
5820 {
5821 uint32_t hflags;
5822 #define HFLAG_COPY_MASK \
5823 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5824 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5825 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5826 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5827
5828 hflags = env->hflags & HFLAG_COPY_MASK;
5829 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5830 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5831 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5832 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5833 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5834
5835 if (env->cr[4] & CR4_OSFXSR_MASK) {
5836 hflags |= HF_OSFXSR_MASK;
5837 }
5838
5839 if (env->efer & MSR_EFER_LMA) {
5840 hflags |= HF_LMA_MASK;
5841 }
5842
5843 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5844 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5845 } else {
5846 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5847 (DESC_B_SHIFT - HF_CS32_SHIFT);
5848 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5849 (DESC_B_SHIFT - HF_SS32_SHIFT);
5850 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5851 !(hflags & HF_CS32_MASK)) {
5852 hflags |= HF_ADDSEG_MASK;
5853 } else {
5854 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5855 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5856 }
5857 }
5858 env->hflags = hflags;
5859 }
5860
5861 static Property x86_cpu_properties[] = {
5862 #ifdef CONFIG_USER_ONLY
5863 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5864 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5865 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5866 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5867 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5868 #else
5869 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5870 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5871 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5872 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5873 #endif
5874 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5875 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5876
5877 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5878 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
5879 HYPERV_FEAT_RELAXED, 0),
5880 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
5881 HYPERV_FEAT_VAPIC, 0),
5882 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
5883 HYPERV_FEAT_TIME, 0),
5884 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
5885 HYPERV_FEAT_CRASH, 0),
5886 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
5887 HYPERV_FEAT_RESET, 0),
5888 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
5889 HYPERV_FEAT_VPINDEX, 0),
5890 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
5891 HYPERV_FEAT_RUNTIME, 0),
5892 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
5893 HYPERV_FEAT_SYNIC, 0),
5894 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
5895 HYPERV_FEAT_STIMER, 0),
5896 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
5897 HYPERV_FEAT_FREQUENCIES, 0),
5898 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
5899 HYPERV_FEAT_REENLIGHTENMENT, 0),
5900 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
5901 HYPERV_FEAT_TLBFLUSH, 0),
5902 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
5903 HYPERV_FEAT_EVMCS, 0),
5904 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
5905 HYPERV_FEAT_IPI, 0),
5906 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
5907 HYPERV_FEAT_STIMER_DIRECT, 0),
5908 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
5909
5910 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5911 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5912 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5913 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5914 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5915 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
5916 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5917 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5918 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5919 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5920 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5921 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5922 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5923 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5924 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5925 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5926 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5927 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5928 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5929 false),
5930 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5931 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5932 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5933 true),
5934 /*
5935 * lecacy_cache defaults to true unless the CPU model provides its
5936 * own cache information (see x86_cpu_load_def()).
5937 */
5938 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5939
5940 /*
5941 * From "Requirements for Implementing the Microsoft
5942 * Hypervisor Interface":
5943 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5944 *
5945 * "Starting with Windows Server 2012 and Windows 8, if
5946 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5947 * the hypervisor imposes no specific limit to the number of VPs.
5948 * In this case, Windows Server 2012 guest VMs may use more than
5949 * 64 VPs, up to the maximum supported number of processors applicable
5950 * to the specific Windows version being used."
5951 */
5952 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5953 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
5954 false),
5955 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
5956 true),
5957 DEFINE_PROP_END_OF_LIST()
5958 };
5959
5960 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5961 {
5962 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5963 CPUClass *cc = CPU_CLASS(oc);
5964 DeviceClass *dc = DEVICE_CLASS(oc);
5965
5966 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5967 &xcc->parent_realize);
5968 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5969 &xcc->parent_unrealize);
5970 dc->props = x86_cpu_properties;
5971
5972 xcc->parent_reset = cc->reset;
5973 cc->reset = x86_cpu_reset;
5974 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5975
5976 cc->class_by_name = x86_cpu_class_by_name;
5977 cc->parse_features = x86_cpu_parse_featurestr;
5978 cc->has_work = x86_cpu_has_work;
5979 #ifdef CONFIG_TCG
5980 cc->do_interrupt = x86_cpu_do_interrupt;
5981 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5982 #endif
5983 cc->dump_state = x86_cpu_dump_state;
5984 cc->get_crash_info = x86_cpu_get_crash_info;
5985 cc->set_pc = x86_cpu_set_pc;
5986 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5987 cc->gdb_read_register = x86_cpu_gdb_read_register;
5988 cc->gdb_write_register = x86_cpu_gdb_write_register;
5989 cc->get_arch_id = x86_cpu_get_arch_id;
5990 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5991 #ifndef CONFIG_USER_ONLY
5992 cc->asidx_from_attrs = x86_asidx_from_attrs;
5993 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5994 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5995 cc->write_elf64_note = x86_cpu_write_elf64_note;
5996 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5997 cc->write_elf32_note = x86_cpu_write_elf32_note;
5998 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5999 cc->vmsd = &vmstate_x86_cpu;
6000 #endif
6001 cc->gdb_arch_name = x86_gdb_arch_name;
6002 #ifdef TARGET_X86_64
6003 cc->gdb_core_xml_file = "i386-64bit.xml";
6004 cc->gdb_num_core_regs = 66;
6005 #else
6006 cc->gdb_core_xml_file = "i386-32bit.xml";
6007 cc->gdb_num_core_regs = 50;
6008 #endif
6009 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
6010 cc->debug_excp_handler = breakpoint_handler;
6011 #endif
6012 cc->cpu_exec_enter = x86_cpu_exec_enter;
6013 cc->cpu_exec_exit = x86_cpu_exec_exit;
6014 #ifdef CONFIG_TCG
6015 cc->tcg_initialize = tcg_x86_init;
6016 cc->tlb_fill = x86_cpu_tlb_fill;
6017 #endif
6018 cc->disas_set_info = x86_disas_set_info;
6019
6020 dc->user_creatable = true;
6021 }
6022
6023 static const TypeInfo x86_cpu_type_info = {
6024 .name = TYPE_X86_CPU,
6025 .parent = TYPE_CPU,
6026 .instance_size = sizeof(X86CPU),
6027 .instance_init = x86_cpu_initfn,
6028 .abstract = true,
6029 .class_size = sizeof(X86CPUClass),
6030 .class_init = x86_cpu_common_class_init,
6031 };
6032
6033
6034 /* "base" CPU model, used by query-cpu-model-expansion */
6035 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
6036 {
6037 X86CPUClass *xcc = X86_CPU_CLASS(oc);
6038
6039 xcc->static_model = true;
6040 xcc->migration_safe = true;
6041 xcc->model_description = "base CPU model type with no features enabled";
6042 xcc->ordering = 8;
6043 }
6044
6045 static const TypeInfo x86_base_cpu_type_info = {
6046 .name = X86_CPU_TYPE_NAME("base"),
6047 .parent = TYPE_X86_CPU,
6048 .class_init = x86_cpu_base_class_init,
6049 };
6050
6051 static void x86_cpu_register_types(void)
6052 {
6053 int i;
6054
6055 type_register_static(&x86_cpu_type_info);
6056 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
6057 x86_register_cpudef_type(&builtin_x86_defs[i]);
6058 }
6059 type_register_static(&max_x86_cpu_type_info);
6060 type_register_static(&x86_base_cpu_type_info);
6061 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
6062 type_register_static(&host_x86_cpu_type_info);
6063 #endif
6064 }
6065
6066 type_init(x86_cpu_register_types)