]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: display known CPUID features linewrapped, in alphabetical order
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
30
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
42
43 #include "standard-headers/asm-x86/kvm_para.h"
44
45 #include "sysemu/sysemu.h"
46 #include "hw/qdev-properties.h"
47 #include "hw/i386/topology.h"
48 #ifndef CONFIG_USER_ONLY
49 #include "exec/address-spaces.h"
50 #include "hw/hw.h"
51 #include "hw/xen/xen.h"
52 #include "hw/i386/apic_internal.h"
53 #endif
54
55 #include "disas/capstone.h"
56
57 /* Helpers for building CPUID[2] descriptors: */
58
59 struct CPUID2CacheDescriptorInfo {
60 enum CacheType type;
61 int level;
62 int size;
63 int line_size;
64 int associativity;
65 };
66
67 #define KiB 1024
68 #define MiB (1024 * 1024)
69
70 /*
71 * Known CPUID 2 cache descriptors.
72 * From Intel SDM Volume 2A, CPUID instruction
73 */
74 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
75 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
76 .associativity = 4, .line_size = 32, },
77 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
80 .associativity = 4, .line_size = 64, },
81 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
82 .associativity = 2, .line_size = 32, },
83 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
84 .associativity = 4, .line_size = 32, },
85 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 64, },
87 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
88 .associativity = 6, .line_size = 64, },
89 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
90 .associativity = 2, .line_size = 64, },
91 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
92 .associativity = 8, .line_size = 64, },
93 /* lines per sector is not supported cpuid2_cache_descriptor(),
94 * so descriptors 0x22, 0x23 are not included
95 */
96 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
97 .associativity = 16, .line_size = 64, },
98 /* lines per sector is not supported cpuid2_cache_descriptor(),
99 * so descriptors 0x25, 0x20 are not included
100 */
101 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
102 .associativity = 8, .line_size = 64, },
103 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
106 .associativity = 4, .line_size = 32, },
107 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
112 .associativity = 4, .line_size = 32, },
113 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
116 .associativity = 4, .line_size = 64, },
117 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
118 .associativity = 8, .line_size = 64, },
119 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
120 .associativity = 12, .line_size = 64, },
121 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
122 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
123 .associativity = 12, .line_size = 64, },
124 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
125 .associativity = 16, .line_size = 64, },
126 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
131 .associativity = 24, .line_size = 64, },
132 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
133 .associativity = 8, .line_size = 64, },
134 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
135 .associativity = 4, .line_size = 64, },
136 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
141 .associativity = 4, .line_size = 64, },
142 /* lines per sector is not supported cpuid2_cache_descriptor(),
143 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
144 */
145 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
146 .associativity = 8, .line_size = 64, },
147 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
148 .associativity = 2, .line_size = 64, },
149 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 8, .line_size = 64, },
151 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
152 .associativity = 8, .line_size = 32, },
153 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
156 .associativity = 8, .line_size = 32, },
157 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
160 .associativity = 4, .line_size = 64, },
161 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
162 .associativity = 8, .line_size = 64, },
163 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 8, .line_size = 64, },
171 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
176 .associativity = 12, .line_size = 64, },
177 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
182 .associativity = 16, .line_size = 64, },
183 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
188 .associativity = 24, .line_size = 64, },
189 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
192 .associativity = 24, .line_size = 64, },
193 };
194
195 /*
196 * "CPUID leaf 2 does not report cache descriptor information,
197 * use CPUID leaf 4 to query cache parameters"
198 */
199 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
200
201 /*
202 * Return a CPUID 2 cache descriptor for a given cache.
203 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
204 */
205 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
206 {
207 int i;
208
209 assert(cache->size > 0);
210 assert(cache->level > 0);
211 assert(cache->line_size > 0);
212 assert(cache->associativity > 0);
213 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
214 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
215 if (d->level == cache->level && d->type == cache->type &&
216 d->size == cache->size && d->line_size == cache->line_size &&
217 d->associativity == cache->associativity) {
218 return i;
219 }
220 }
221
222 return CACHE_DESCRIPTOR_UNAVAILABLE;
223 }
224
225 /* CPUID Leaf 4 constants: */
226
227 /* EAX: */
228 #define CACHE_TYPE_D 1
229 #define CACHE_TYPE_I 2
230 #define CACHE_TYPE_UNIFIED 3
231
232 #define CACHE_LEVEL(l) (l << 5)
233
234 #define CACHE_SELF_INIT_LEVEL (1 << 8)
235
236 /* EDX: */
237 #define CACHE_NO_INVD_SHARING (1 << 0)
238 #define CACHE_INCLUSIVE (1 << 1)
239 #define CACHE_COMPLEX_IDX (1 << 2)
240
241 /* Encode CacheType for CPUID[4].EAX */
242 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
243 ((t) == ICACHE) ? CACHE_TYPE_I : \
244 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
245 0 /* Invalid value */)
246
247
248 /* Encode cache info for CPUID[4] */
249 static void encode_cache_cpuid4(CPUCacheInfo *cache,
250 int num_apic_ids, int num_cores,
251 uint32_t *eax, uint32_t *ebx,
252 uint32_t *ecx, uint32_t *edx)
253 {
254 assert(cache->size == cache->line_size * cache->associativity *
255 cache->partitions * cache->sets);
256
257 assert(num_apic_ids > 0);
258 *eax = CACHE_TYPE(cache->type) |
259 CACHE_LEVEL(cache->level) |
260 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
261 ((num_cores - 1) << 26) |
262 ((num_apic_ids - 1) << 14);
263
264 assert(cache->line_size > 0);
265 assert(cache->partitions > 0);
266 assert(cache->associativity > 0);
267 /* We don't implement fully-associative caches */
268 assert(cache->associativity < cache->sets);
269 *ebx = (cache->line_size - 1) |
270 ((cache->partitions - 1) << 12) |
271 ((cache->associativity - 1) << 22);
272
273 assert(cache->sets > 0);
274 *ecx = cache->sets - 1;
275
276 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
277 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
278 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
279 }
280
281 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
282 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
283 {
284 assert(cache->size % 1024 == 0);
285 assert(cache->lines_per_tag > 0);
286 assert(cache->associativity > 0);
287 assert(cache->line_size > 0);
288 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
289 (cache->lines_per_tag << 8) | (cache->line_size);
290 }
291
292 #define ASSOC_FULL 0xFF
293
294 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
295 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
296 a == 2 ? 0x2 : \
297 a == 4 ? 0x4 : \
298 a == 8 ? 0x6 : \
299 a == 16 ? 0x8 : \
300 a == 32 ? 0xA : \
301 a == 48 ? 0xB : \
302 a == 64 ? 0xC : \
303 a == 96 ? 0xD : \
304 a == 128 ? 0xE : \
305 a == ASSOC_FULL ? 0xF : \
306 0 /* invalid value */)
307
308 /*
309 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
310 * @l3 can be NULL.
311 */
312 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
313 CPUCacheInfo *l3,
314 uint32_t *ecx, uint32_t *edx)
315 {
316 assert(l2->size % 1024 == 0);
317 assert(l2->associativity > 0);
318 assert(l2->lines_per_tag > 0);
319 assert(l2->line_size > 0);
320 *ecx = ((l2->size / 1024) << 16) |
321 (AMD_ENC_ASSOC(l2->associativity) << 12) |
322 (l2->lines_per_tag << 8) | (l2->line_size);
323
324 if (l3) {
325 assert(l3->size % (512 * 1024) == 0);
326 assert(l3->associativity > 0);
327 assert(l3->lines_per_tag > 0);
328 assert(l3->line_size > 0);
329 *edx = ((l3->size / (512 * 1024)) << 18) |
330 (AMD_ENC_ASSOC(l3->associativity) << 12) |
331 (l3->lines_per_tag << 8) | (l3->line_size);
332 } else {
333 *edx = 0;
334 }
335 }
336
337 /*
338 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
339 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
340 * Define the constants to build the cpu topology. Right now, TOPOEXT
341 * feature is enabled only on EPYC. So, these constants are based on
342 * EPYC supported configurations. We may need to handle the cases if
343 * these values change in future.
344 */
345 /* Maximum core complexes in a node */
346 #define MAX_CCX 2
347 /* Maximum cores in a core complex */
348 #define MAX_CORES_IN_CCX 4
349 /* Maximum cores in a node */
350 #define MAX_CORES_IN_NODE 8
351 /* Maximum nodes in a socket */
352 #define MAX_NODES_PER_SOCKET 4
353
354 /*
355 * Figure out the number of nodes required to build this config.
356 * Max cores in a node is 8
357 */
358 static int nodes_in_socket(int nr_cores)
359 {
360 int nodes;
361
362 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
363
364 /* Hardware does not support config with 3 nodes, return 4 in that case */
365 return (nodes == 3) ? 4 : nodes;
366 }
367
368 /*
369 * Decide the number of cores in a core complex with the given nr_cores using
370 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
371 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
372 * L3 cache is shared across all cores in a core complex. So, this will also
373 * tell us how many cores are sharing the L3 cache.
374 */
375 static int cores_in_core_complex(int nr_cores)
376 {
377 int nodes;
378
379 /* Check if we can fit all the cores in one core complex */
380 if (nr_cores <= MAX_CORES_IN_CCX) {
381 return nr_cores;
382 }
383 /* Get the number of nodes required to build this config */
384 nodes = nodes_in_socket(nr_cores);
385
386 /*
387 * Divide the cores accros all the core complexes
388 * Return rounded up value
389 */
390 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
391 }
392
393 /* Encode cache info for CPUID[8000001D] */
394 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
395 uint32_t *eax, uint32_t *ebx,
396 uint32_t *ecx, uint32_t *edx)
397 {
398 uint32_t l3_cores;
399 assert(cache->size == cache->line_size * cache->associativity *
400 cache->partitions * cache->sets);
401
402 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
403 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
404
405 /* L3 is shared among multiple cores */
406 if (cache->level == 3) {
407 l3_cores = cores_in_core_complex(cs->nr_cores);
408 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
409 } else {
410 *eax |= ((cs->nr_threads - 1) << 14);
411 }
412
413 assert(cache->line_size > 0);
414 assert(cache->partitions > 0);
415 assert(cache->associativity > 0);
416 /* We don't implement fully-associative caches */
417 assert(cache->associativity < cache->sets);
418 *ebx = (cache->line_size - 1) |
419 ((cache->partitions - 1) << 12) |
420 ((cache->associativity - 1) << 22);
421
422 assert(cache->sets > 0);
423 *ecx = cache->sets - 1;
424
425 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
426 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
427 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
428 }
429
430 /* Data structure to hold the configuration info for a given core index */
431 struct core_topology {
432 /* core complex id of the current core index */
433 int ccx_id;
434 /*
435 * Adjusted core index for this core in the topology
436 * This can be 0,1,2,3 with max 4 cores in a core complex
437 */
438 int core_id;
439 /* Node id for this core index */
440 int node_id;
441 /* Number of nodes in this config */
442 int num_nodes;
443 };
444
445 /*
446 * Build the configuration closely match the EPYC hardware. Using the EPYC
447 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
448 * right now. This could change in future.
449 * nr_cores : Total number of cores in the config
450 * core_id : Core index of the current CPU
451 * topo : Data structure to hold all the config info for this core index
452 */
453 static void build_core_topology(int nr_cores, int core_id,
454 struct core_topology *topo)
455 {
456 int nodes, cores_in_ccx;
457
458 /* First get the number of nodes required */
459 nodes = nodes_in_socket(nr_cores);
460
461 cores_in_ccx = cores_in_core_complex(nr_cores);
462
463 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
464 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
465 topo->core_id = core_id % cores_in_ccx;
466 topo->num_nodes = nodes;
467 }
468
469 /* Encode cache info for CPUID[8000001E] */
470 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
471 uint32_t *eax, uint32_t *ebx,
472 uint32_t *ecx, uint32_t *edx)
473 {
474 struct core_topology topo = {0};
475
476 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
477 *eax = cpu->apic_id;
478 /*
479 * CPUID_Fn8000001E_EBX
480 * 31:16 Reserved
481 * 15:8 Threads per core (The number of threads per core is
482 * Threads per core + 1)
483 * 7:0 Core id (see bit decoding below)
484 * SMT:
485 * 4:3 node id
486 * 2 Core complex id
487 * 1:0 Core id
488 * Non SMT:
489 * 5:4 node id
490 * 3 Core complex id
491 * 1:0 Core id
492 */
493 if (cs->nr_threads - 1) {
494 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
495 (topo.ccx_id << 2) | topo.core_id;
496 } else {
497 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
498 }
499 /*
500 * CPUID_Fn8000001E_ECX
501 * 31:11 Reserved
502 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
503 * 7:0 Node id (see bit decoding below)
504 * 2 Socket id
505 * 1:0 Node id
506 */
507 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | topo.node_id;
508 *edx = 0;
509 }
510
511 /*
512 * Definitions of the hardcoded cache entries we expose:
513 * These are legacy cache values. If there is a need to change any
514 * of these values please use builtin_x86_defs
515 */
516
517 /* L1 data cache: */
518 static CPUCacheInfo legacy_l1d_cache = {
519 .type = DCACHE,
520 .level = 1,
521 .size = 32 * KiB,
522 .self_init = 1,
523 .line_size = 64,
524 .associativity = 8,
525 .sets = 64,
526 .partitions = 1,
527 .no_invd_sharing = true,
528 };
529
530 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
531 static CPUCacheInfo legacy_l1d_cache_amd = {
532 .type = DCACHE,
533 .level = 1,
534 .size = 64 * KiB,
535 .self_init = 1,
536 .line_size = 64,
537 .associativity = 2,
538 .sets = 512,
539 .partitions = 1,
540 .lines_per_tag = 1,
541 .no_invd_sharing = true,
542 };
543
544 /* L1 instruction cache: */
545 static CPUCacheInfo legacy_l1i_cache = {
546 .type = ICACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
555 };
556
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1i_cache_amd = {
559 .type = ICACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
569 };
570
571 /* Level 2 unified cache: */
572 static CPUCacheInfo legacy_l2_cache = {
573 .type = UNIFIED_CACHE,
574 .level = 2,
575 .size = 4 * MiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 16,
579 .sets = 4096,
580 .partitions = 1,
581 .no_invd_sharing = true,
582 };
583
584 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
585 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
586 .type = UNIFIED_CACHE,
587 .level = 2,
588 .size = 2 * MiB,
589 .line_size = 64,
590 .associativity = 8,
591 };
592
593
594 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
595 static CPUCacheInfo legacy_l2_cache_amd = {
596 .type = UNIFIED_CACHE,
597 .level = 2,
598 .size = 512 * KiB,
599 .line_size = 64,
600 .lines_per_tag = 1,
601 .associativity = 16,
602 .sets = 512,
603 .partitions = 1,
604 };
605
606 /* Level 3 unified cache: */
607 static CPUCacheInfo legacy_l3_cache = {
608 .type = UNIFIED_CACHE,
609 .level = 3,
610 .size = 16 * MiB,
611 .line_size = 64,
612 .associativity = 16,
613 .sets = 16384,
614 .partitions = 1,
615 .lines_per_tag = 1,
616 .self_init = true,
617 .inclusive = true,
618 .complex_indexing = true,
619 };
620
621 /* TLB definitions: */
622
623 #define L1_DTLB_2M_ASSOC 1
624 #define L1_DTLB_2M_ENTRIES 255
625 #define L1_DTLB_4K_ASSOC 1
626 #define L1_DTLB_4K_ENTRIES 255
627
628 #define L1_ITLB_2M_ASSOC 1
629 #define L1_ITLB_2M_ENTRIES 255
630 #define L1_ITLB_4K_ASSOC 1
631 #define L1_ITLB_4K_ENTRIES 255
632
633 #define L2_DTLB_2M_ASSOC 0 /* disabled */
634 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
635 #define L2_DTLB_4K_ASSOC 4
636 #define L2_DTLB_4K_ENTRIES 512
637
638 #define L2_ITLB_2M_ASSOC 0 /* disabled */
639 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
640 #define L2_ITLB_4K_ASSOC 4
641 #define L2_ITLB_4K_ENTRIES 512
642
643 /* CPUID Leaf 0x14 constants: */
644 #define INTEL_PT_MAX_SUBLEAF 0x1
645 /*
646 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
647 * MSR can be accessed;
648 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
649 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
650 * of Intel PT MSRs across warm reset;
651 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
652 */
653 #define INTEL_PT_MINIMAL_EBX 0xf
654 /*
655 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
656 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
657 * accessed;
658 * bit[01]: ToPA tables can hold any number of output entries, up to the
659 * maximum allowed by the MaskOrTableOffset field of
660 * IA32_RTIT_OUTPUT_MASK_PTRS;
661 * bit[02]: Support Single-Range Output scheme;
662 */
663 #define INTEL_PT_MINIMAL_ECX 0x7
664 /* generated packets which contain IP payloads have LIP values */
665 #define INTEL_PT_IP_LIP (1 << 31)
666 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
667 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
668 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
669 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
670 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
671
672 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
673 uint32_t vendor2, uint32_t vendor3)
674 {
675 int i;
676 for (i = 0; i < 4; i++) {
677 dst[i] = vendor1 >> (8 * i);
678 dst[i + 4] = vendor2 >> (8 * i);
679 dst[i + 8] = vendor3 >> (8 * i);
680 }
681 dst[CPUID_VENDOR_SZ] = '\0';
682 }
683
684 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
685 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
686 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
687 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
688 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
689 CPUID_PSE36 | CPUID_FXSR)
690 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
691 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
692 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
693 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
694 CPUID_PAE | CPUID_SEP | CPUID_APIC)
695
696 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
697 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
698 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
699 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
700 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
701 /* partly implemented:
702 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
703 /* missing:
704 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
705 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
706 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
707 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
708 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
709 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
710 /* missing:
711 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
712 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
713 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
714 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
715 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
716
717 #ifdef TARGET_X86_64
718 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
719 #else
720 #define TCG_EXT2_X86_64_FEATURES 0
721 #endif
722
723 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
724 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
725 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
726 TCG_EXT2_X86_64_FEATURES)
727 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
728 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
729 #define TCG_EXT4_FEATURES 0
730 #define TCG_SVM_FEATURES 0
731 #define TCG_KVM_FEATURES 0
732 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
733 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
734 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
735 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
736 CPUID_7_0_EBX_ERMS)
737 /* missing:
738 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
739 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
740 CPUID_7_0_EBX_RDSEED */
741 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
742 CPUID_7_0_ECX_LA57)
743 #define TCG_7_0_EDX_FEATURES 0
744 #define TCG_APM_FEATURES 0
745 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
746 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
747 /* missing:
748 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
749
750 typedef struct FeatureWordInfo {
751 /* feature flags names are taken from "Intel Processor Identification and
752 * the CPUID Instruction" and AMD's "CPUID Specification".
753 * In cases of disagreement between feature naming conventions,
754 * aliases may be added.
755 */
756 const char *feat_names[32];
757 uint32_t cpuid_eax; /* Input EAX for CPUID */
758 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
759 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
760 int cpuid_reg; /* output register (R_* constant) */
761 uint32_t tcg_features; /* Feature flags supported by TCG */
762 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
763 uint32_t migratable_flags; /* Feature flags known to be migratable */
764 /* Features that shouldn't be auto-enabled by "-cpu host" */
765 uint32_t no_autoenable_flags;
766 } FeatureWordInfo;
767
768 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
769 [FEAT_1_EDX] = {
770 .feat_names = {
771 "fpu", "vme", "de", "pse",
772 "tsc", "msr", "pae", "mce",
773 "cx8", "apic", NULL, "sep",
774 "mtrr", "pge", "mca", "cmov",
775 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
776 NULL, "ds" /* Intel dts */, "acpi", "mmx",
777 "fxsr", "sse", "sse2", "ss",
778 "ht" /* Intel htt */, "tm", "ia64", "pbe",
779 },
780 .cpuid_eax = 1, .cpuid_reg = R_EDX,
781 .tcg_features = TCG_FEATURES,
782 },
783 [FEAT_1_ECX] = {
784 .feat_names = {
785 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
786 "ds-cpl", "vmx", "smx", "est",
787 "tm2", "ssse3", "cid", NULL,
788 "fma", "cx16", "xtpr", "pdcm",
789 NULL, "pcid", "dca", "sse4.1",
790 "sse4.2", "x2apic", "movbe", "popcnt",
791 "tsc-deadline", "aes", "xsave", "osxsave",
792 "avx", "f16c", "rdrand", "hypervisor",
793 },
794 .cpuid_eax = 1, .cpuid_reg = R_ECX,
795 .tcg_features = TCG_EXT_FEATURES,
796 },
797 /* Feature names that are already defined on feature_name[] but
798 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
799 * names on feat_names below. They are copied automatically
800 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
801 */
802 [FEAT_8000_0001_EDX] = {
803 .feat_names = {
804 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
805 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
806 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
807 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
808 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
809 "nx", NULL, "mmxext", NULL /* mmx */,
810 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
811 NULL, "lm", "3dnowext", "3dnow",
812 },
813 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
814 .tcg_features = TCG_EXT2_FEATURES,
815 },
816 [FEAT_8000_0001_ECX] = {
817 .feat_names = {
818 "lahf-lm", "cmp-legacy", "svm", "extapic",
819 "cr8legacy", "abm", "sse4a", "misalignsse",
820 "3dnowprefetch", "osvw", "ibs", "xop",
821 "skinit", "wdt", NULL, "lwp",
822 "fma4", "tce", NULL, "nodeid-msr",
823 NULL, "tbm", "topoext", "perfctr-core",
824 "perfctr-nb", NULL, NULL, NULL,
825 NULL, NULL, NULL, NULL,
826 },
827 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
828 .tcg_features = TCG_EXT3_FEATURES,
829 },
830 [FEAT_C000_0001_EDX] = {
831 .feat_names = {
832 NULL, NULL, "xstore", "xstore-en",
833 NULL, NULL, "xcrypt", "xcrypt-en",
834 "ace2", "ace2-en", "phe", "phe-en",
835 "pmm", "pmm-en", NULL, NULL,
836 NULL, NULL, NULL, NULL,
837 NULL, NULL, NULL, NULL,
838 NULL, NULL, NULL, NULL,
839 NULL, NULL, NULL, NULL,
840 },
841 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
842 .tcg_features = TCG_EXT4_FEATURES,
843 },
844 [FEAT_KVM] = {
845 .feat_names = {
846 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
847 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
848 NULL, "kvm-pv-tlb-flush", NULL, NULL,
849 NULL, NULL, NULL, NULL,
850 NULL, NULL, NULL, NULL,
851 NULL, NULL, NULL, NULL,
852 "kvmclock-stable-bit", NULL, NULL, NULL,
853 NULL, NULL, NULL, NULL,
854 },
855 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
856 .tcg_features = TCG_KVM_FEATURES,
857 },
858 [FEAT_KVM_HINTS] = {
859 .feat_names = {
860 "kvm-hint-dedicated", NULL, NULL, NULL,
861 NULL, NULL, NULL, NULL,
862 NULL, NULL, NULL, NULL,
863 NULL, NULL, NULL, NULL,
864 NULL, NULL, NULL, NULL,
865 NULL, NULL, NULL, NULL,
866 NULL, NULL, NULL, NULL,
867 NULL, NULL, NULL, NULL,
868 },
869 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
870 .tcg_features = TCG_KVM_FEATURES,
871 /*
872 * KVM hints aren't auto-enabled by -cpu host, they need to be
873 * explicitly enabled in the command-line.
874 */
875 .no_autoenable_flags = ~0U,
876 },
877 [FEAT_HYPERV_EAX] = {
878 .feat_names = {
879 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
880 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
881 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
882 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
883 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
884 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
885 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
886 NULL, NULL,
887 NULL, NULL, NULL, NULL,
888 NULL, NULL, NULL, NULL,
889 NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
891 },
892 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
893 },
894 [FEAT_HYPERV_EBX] = {
895 .feat_names = {
896 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
897 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
898 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
899 NULL /* hv_create_port */, NULL /* hv_connect_port */,
900 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
901 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
902 NULL, NULL,
903 NULL, NULL, NULL, NULL,
904 NULL, NULL, NULL, NULL,
905 NULL, NULL, NULL, NULL,
906 NULL, NULL, NULL, NULL,
907 },
908 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
909 },
910 [FEAT_HYPERV_EDX] = {
911 .feat_names = {
912 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
913 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
914 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
915 NULL, NULL,
916 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
917 NULL, NULL, NULL, NULL,
918 NULL, NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 },
923 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
924 },
925 [FEAT_SVM] = {
926 .feat_names = {
927 "npt", "lbrv", "svm-lock", "nrip-save",
928 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
929 NULL, NULL, "pause-filter", NULL,
930 "pfthreshold", NULL, NULL, NULL,
931 NULL, NULL, NULL, NULL,
932 NULL, NULL, NULL, NULL,
933 NULL, NULL, NULL, NULL,
934 NULL, NULL, NULL, NULL,
935 },
936 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
937 .tcg_features = TCG_SVM_FEATURES,
938 },
939 [FEAT_7_0_EBX] = {
940 .feat_names = {
941 "fsgsbase", "tsc-adjust", NULL, "bmi1",
942 "hle", "avx2", NULL, "smep",
943 "bmi2", "erms", "invpcid", "rtm",
944 NULL, NULL, "mpx", NULL,
945 "avx512f", "avx512dq", "rdseed", "adx",
946 "smap", "avx512ifma", "pcommit", "clflushopt",
947 "clwb", "intel-pt", "avx512pf", "avx512er",
948 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
949 },
950 .cpuid_eax = 7,
951 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
952 .cpuid_reg = R_EBX,
953 .tcg_features = TCG_7_0_EBX_FEATURES,
954 },
955 [FEAT_7_0_ECX] = {
956 .feat_names = {
957 NULL, "avx512vbmi", "umip", "pku",
958 "ospke", NULL, "avx512vbmi2", NULL,
959 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
960 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
961 "la57", NULL, NULL, NULL,
962 NULL, NULL, "rdpid", NULL,
963 NULL, "cldemote", NULL, NULL,
964 NULL, NULL, NULL, NULL,
965 },
966 .cpuid_eax = 7,
967 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
968 .cpuid_reg = R_ECX,
969 .tcg_features = TCG_7_0_ECX_FEATURES,
970 },
971 [FEAT_7_0_EDX] = {
972 .feat_names = {
973 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
974 NULL, NULL, NULL, NULL,
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
977 NULL, NULL, NULL, NULL,
978 NULL, NULL, NULL, NULL,
979 NULL, NULL, "spec-ctrl", NULL,
980 NULL, NULL, NULL, "ssbd",
981 },
982 .cpuid_eax = 7,
983 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
984 .cpuid_reg = R_EDX,
985 .tcg_features = TCG_7_0_EDX_FEATURES,
986 },
987 [FEAT_8000_0007_EDX] = {
988 .feat_names = {
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL, NULL,
991 "invtsc", NULL, NULL, NULL,
992 NULL, NULL, NULL, NULL,
993 NULL, NULL, NULL, NULL,
994 NULL, NULL, NULL, NULL,
995 NULL, NULL, NULL, NULL,
996 NULL, NULL, NULL, NULL,
997 },
998 .cpuid_eax = 0x80000007,
999 .cpuid_reg = R_EDX,
1000 .tcg_features = TCG_APM_FEATURES,
1001 .unmigratable_flags = CPUID_APM_INVTSC,
1002 },
1003 [FEAT_8000_0008_EBX] = {
1004 .feat_names = {
1005 NULL, NULL, NULL, NULL,
1006 NULL, NULL, NULL, NULL,
1007 NULL, NULL, NULL, NULL,
1008 "ibpb", NULL, NULL, NULL,
1009 NULL, NULL, NULL, NULL,
1010 NULL, NULL, NULL, NULL,
1011 NULL, "virt-ssbd", NULL, NULL,
1012 NULL, NULL, NULL, NULL,
1013 },
1014 .cpuid_eax = 0x80000008,
1015 .cpuid_reg = R_EBX,
1016 .tcg_features = 0,
1017 .unmigratable_flags = 0,
1018 },
1019 [FEAT_XSAVE] = {
1020 .feat_names = {
1021 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1022 NULL, NULL, NULL, NULL,
1023 NULL, NULL, NULL, NULL,
1024 NULL, NULL, NULL, NULL,
1025 NULL, NULL, NULL, NULL,
1026 NULL, NULL, NULL, NULL,
1027 NULL, NULL, NULL, NULL,
1028 NULL, NULL, NULL, NULL,
1029 },
1030 .cpuid_eax = 0xd,
1031 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
1032 .cpuid_reg = R_EAX,
1033 .tcg_features = TCG_XSAVE_FEATURES,
1034 },
1035 [FEAT_6_EAX] = {
1036 .feat_names = {
1037 NULL, NULL, "arat", NULL,
1038 NULL, NULL, NULL, NULL,
1039 NULL, NULL, NULL, NULL,
1040 NULL, NULL, NULL, NULL,
1041 NULL, NULL, NULL, NULL,
1042 NULL, NULL, NULL, NULL,
1043 NULL, NULL, NULL, NULL,
1044 NULL, NULL, NULL, NULL,
1045 },
1046 .cpuid_eax = 6, .cpuid_reg = R_EAX,
1047 .tcg_features = TCG_6_EAX_FEATURES,
1048 },
1049 [FEAT_XSAVE_COMP_LO] = {
1050 .cpuid_eax = 0xD,
1051 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1052 .cpuid_reg = R_EAX,
1053 .tcg_features = ~0U,
1054 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1055 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1056 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1057 XSTATE_PKRU_MASK,
1058 },
1059 [FEAT_XSAVE_COMP_HI] = {
1060 .cpuid_eax = 0xD,
1061 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1062 .cpuid_reg = R_EDX,
1063 .tcg_features = ~0U,
1064 },
1065 };
1066
1067 typedef struct X86RegisterInfo32 {
1068 /* Name of register */
1069 const char *name;
1070 /* QAPI enum value register */
1071 X86CPURegister32 qapi_enum;
1072 } X86RegisterInfo32;
1073
1074 #define REGISTER(reg) \
1075 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1076 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1077 REGISTER(EAX),
1078 REGISTER(ECX),
1079 REGISTER(EDX),
1080 REGISTER(EBX),
1081 REGISTER(ESP),
1082 REGISTER(EBP),
1083 REGISTER(ESI),
1084 REGISTER(EDI),
1085 };
1086 #undef REGISTER
1087
1088 typedef struct ExtSaveArea {
1089 uint32_t feature, bits;
1090 uint32_t offset, size;
1091 } ExtSaveArea;
1092
1093 static const ExtSaveArea x86_ext_save_areas[] = {
1094 [XSTATE_FP_BIT] = {
1095 /* x87 FP state component is always enabled if XSAVE is supported */
1096 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1097 /* x87 state is in the legacy region of the XSAVE area */
1098 .offset = 0,
1099 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1100 },
1101 [XSTATE_SSE_BIT] = {
1102 /* SSE state component is always enabled if XSAVE is supported */
1103 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1104 /* SSE state is in the legacy region of the XSAVE area */
1105 .offset = 0,
1106 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1107 },
1108 [XSTATE_YMM_BIT] =
1109 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1110 .offset = offsetof(X86XSaveArea, avx_state),
1111 .size = sizeof(XSaveAVX) },
1112 [XSTATE_BNDREGS_BIT] =
1113 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1114 .offset = offsetof(X86XSaveArea, bndreg_state),
1115 .size = sizeof(XSaveBNDREG) },
1116 [XSTATE_BNDCSR_BIT] =
1117 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1118 .offset = offsetof(X86XSaveArea, bndcsr_state),
1119 .size = sizeof(XSaveBNDCSR) },
1120 [XSTATE_OPMASK_BIT] =
1121 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1122 .offset = offsetof(X86XSaveArea, opmask_state),
1123 .size = sizeof(XSaveOpmask) },
1124 [XSTATE_ZMM_Hi256_BIT] =
1125 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1126 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1127 .size = sizeof(XSaveZMM_Hi256) },
1128 [XSTATE_Hi16_ZMM_BIT] =
1129 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1130 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1131 .size = sizeof(XSaveHi16_ZMM) },
1132 [XSTATE_PKRU_BIT] =
1133 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1134 .offset = offsetof(X86XSaveArea, pkru_state),
1135 .size = sizeof(XSavePKRU) },
1136 };
1137
1138 static uint32_t xsave_area_size(uint64_t mask)
1139 {
1140 int i;
1141 uint64_t ret = 0;
1142
1143 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1144 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1145 if ((mask >> i) & 1) {
1146 ret = MAX(ret, esa->offset + esa->size);
1147 }
1148 }
1149 return ret;
1150 }
1151
1152 static inline bool accel_uses_host_cpuid(void)
1153 {
1154 return kvm_enabled() || hvf_enabled();
1155 }
1156
1157 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1158 {
1159 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1160 cpu->env.features[FEAT_XSAVE_COMP_LO];
1161 }
1162
1163 const char *get_register_name_32(unsigned int reg)
1164 {
1165 if (reg >= CPU_NB_REGS32) {
1166 return NULL;
1167 }
1168 return x86_reg_info_32[reg].name;
1169 }
1170
1171 /*
1172 * Returns the set of feature flags that are supported and migratable by
1173 * QEMU, for a given FeatureWord.
1174 */
1175 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1176 {
1177 FeatureWordInfo *wi = &feature_word_info[w];
1178 uint32_t r = 0;
1179 int i;
1180
1181 for (i = 0; i < 32; i++) {
1182 uint32_t f = 1U << i;
1183
1184 /* If the feature name is known, it is implicitly considered migratable,
1185 * unless it is explicitly set in unmigratable_flags */
1186 if ((wi->migratable_flags & f) ||
1187 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1188 r |= f;
1189 }
1190 }
1191 return r;
1192 }
1193
1194 void host_cpuid(uint32_t function, uint32_t count,
1195 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1196 {
1197 uint32_t vec[4];
1198
1199 #ifdef __x86_64__
1200 asm volatile("cpuid"
1201 : "=a"(vec[0]), "=b"(vec[1]),
1202 "=c"(vec[2]), "=d"(vec[3])
1203 : "0"(function), "c"(count) : "cc");
1204 #elif defined(__i386__)
1205 asm volatile("pusha \n\t"
1206 "cpuid \n\t"
1207 "mov %%eax, 0(%2) \n\t"
1208 "mov %%ebx, 4(%2) \n\t"
1209 "mov %%ecx, 8(%2) \n\t"
1210 "mov %%edx, 12(%2) \n\t"
1211 "popa"
1212 : : "a"(function), "c"(count), "S"(vec)
1213 : "memory", "cc");
1214 #else
1215 abort();
1216 #endif
1217
1218 if (eax)
1219 *eax = vec[0];
1220 if (ebx)
1221 *ebx = vec[1];
1222 if (ecx)
1223 *ecx = vec[2];
1224 if (edx)
1225 *edx = vec[3];
1226 }
1227
1228 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1229 {
1230 uint32_t eax, ebx, ecx, edx;
1231
1232 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1233 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1234
1235 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1236 if (family) {
1237 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1238 }
1239 if (model) {
1240 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1241 }
1242 if (stepping) {
1243 *stepping = eax & 0x0F;
1244 }
1245 }
1246
1247 /* CPU class name definitions: */
1248
1249 /* Return type name for a given CPU model name
1250 * Caller is responsible for freeing the returned string.
1251 */
1252 static char *x86_cpu_type_name(const char *model_name)
1253 {
1254 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1255 }
1256
1257 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1258 {
1259 ObjectClass *oc;
1260 char *typename = x86_cpu_type_name(cpu_model);
1261 oc = object_class_by_name(typename);
1262 g_free(typename);
1263 return oc;
1264 }
1265
1266 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1267 {
1268 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1269 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1270 return g_strndup(class_name,
1271 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1272 }
1273
1274 struct X86CPUDefinition {
1275 const char *name;
1276 uint32_t level;
1277 uint32_t xlevel;
1278 /* vendor is zero-terminated, 12 character ASCII string */
1279 char vendor[CPUID_VENDOR_SZ + 1];
1280 int family;
1281 int model;
1282 int stepping;
1283 FeatureWordArray features;
1284 const char *model_id;
1285 CPUCaches *cache_info;
1286 };
1287
1288 static CPUCaches epyc_cache_info = {
1289 .l1d_cache = &(CPUCacheInfo) {
1290 .type = DCACHE,
1291 .level = 1,
1292 .size = 32 * KiB,
1293 .line_size = 64,
1294 .associativity = 8,
1295 .partitions = 1,
1296 .sets = 64,
1297 .lines_per_tag = 1,
1298 .self_init = 1,
1299 .no_invd_sharing = true,
1300 },
1301 .l1i_cache = &(CPUCacheInfo) {
1302 .type = ICACHE,
1303 .level = 1,
1304 .size = 64 * KiB,
1305 .line_size = 64,
1306 .associativity = 4,
1307 .partitions = 1,
1308 .sets = 256,
1309 .lines_per_tag = 1,
1310 .self_init = 1,
1311 .no_invd_sharing = true,
1312 },
1313 .l2_cache = &(CPUCacheInfo) {
1314 .type = UNIFIED_CACHE,
1315 .level = 2,
1316 .size = 512 * KiB,
1317 .line_size = 64,
1318 .associativity = 8,
1319 .partitions = 1,
1320 .sets = 1024,
1321 .lines_per_tag = 1,
1322 },
1323 .l3_cache = &(CPUCacheInfo) {
1324 .type = UNIFIED_CACHE,
1325 .level = 3,
1326 .size = 8 * MiB,
1327 .line_size = 64,
1328 .associativity = 16,
1329 .partitions = 1,
1330 .sets = 8192,
1331 .lines_per_tag = 1,
1332 .self_init = true,
1333 .inclusive = true,
1334 .complex_indexing = true,
1335 },
1336 };
1337
1338 static X86CPUDefinition builtin_x86_defs[] = {
1339 {
1340 .name = "qemu64",
1341 .level = 0xd,
1342 .vendor = CPUID_VENDOR_AMD,
1343 .family = 6,
1344 .model = 6,
1345 .stepping = 3,
1346 .features[FEAT_1_EDX] =
1347 PPRO_FEATURES |
1348 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1349 CPUID_PSE36,
1350 .features[FEAT_1_ECX] =
1351 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1352 .features[FEAT_8000_0001_EDX] =
1353 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1354 .features[FEAT_8000_0001_ECX] =
1355 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1356 .xlevel = 0x8000000A,
1357 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1358 },
1359 {
1360 .name = "phenom",
1361 .level = 5,
1362 .vendor = CPUID_VENDOR_AMD,
1363 .family = 16,
1364 .model = 2,
1365 .stepping = 3,
1366 /* Missing: CPUID_HT */
1367 .features[FEAT_1_EDX] =
1368 PPRO_FEATURES |
1369 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1370 CPUID_PSE36 | CPUID_VME,
1371 .features[FEAT_1_ECX] =
1372 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1373 CPUID_EXT_POPCNT,
1374 .features[FEAT_8000_0001_EDX] =
1375 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1376 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1377 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1378 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1379 CPUID_EXT3_CR8LEG,
1380 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1381 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1382 .features[FEAT_8000_0001_ECX] =
1383 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1384 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1385 /* Missing: CPUID_SVM_LBRV */
1386 .features[FEAT_SVM] =
1387 CPUID_SVM_NPT,
1388 .xlevel = 0x8000001A,
1389 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1390 },
1391 {
1392 .name = "core2duo",
1393 .level = 10,
1394 .vendor = CPUID_VENDOR_INTEL,
1395 .family = 6,
1396 .model = 15,
1397 .stepping = 11,
1398 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1399 .features[FEAT_1_EDX] =
1400 PPRO_FEATURES |
1401 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1402 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1403 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1404 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1405 .features[FEAT_1_ECX] =
1406 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1407 CPUID_EXT_CX16,
1408 .features[FEAT_8000_0001_EDX] =
1409 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1410 .features[FEAT_8000_0001_ECX] =
1411 CPUID_EXT3_LAHF_LM,
1412 .xlevel = 0x80000008,
1413 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1414 },
1415 {
1416 .name = "kvm64",
1417 .level = 0xd,
1418 .vendor = CPUID_VENDOR_INTEL,
1419 .family = 15,
1420 .model = 6,
1421 .stepping = 1,
1422 /* Missing: CPUID_HT */
1423 .features[FEAT_1_EDX] =
1424 PPRO_FEATURES | CPUID_VME |
1425 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1426 CPUID_PSE36,
1427 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1428 .features[FEAT_1_ECX] =
1429 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1430 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1431 .features[FEAT_8000_0001_EDX] =
1432 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1433 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1434 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1435 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1436 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1437 .features[FEAT_8000_0001_ECX] =
1438 0,
1439 .xlevel = 0x80000008,
1440 .model_id = "Common KVM processor"
1441 },
1442 {
1443 .name = "qemu32",
1444 .level = 4,
1445 .vendor = CPUID_VENDOR_INTEL,
1446 .family = 6,
1447 .model = 6,
1448 .stepping = 3,
1449 .features[FEAT_1_EDX] =
1450 PPRO_FEATURES,
1451 .features[FEAT_1_ECX] =
1452 CPUID_EXT_SSE3,
1453 .xlevel = 0x80000004,
1454 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1455 },
1456 {
1457 .name = "kvm32",
1458 .level = 5,
1459 .vendor = CPUID_VENDOR_INTEL,
1460 .family = 15,
1461 .model = 6,
1462 .stepping = 1,
1463 .features[FEAT_1_EDX] =
1464 PPRO_FEATURES | CPUID_VME |
1465 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1466 .features[FEAT_1_ECX] =
1467 CPUID_EXT_SSE3,
1468 .features[FEAT_8000_0001_ECX] =
1469 0,
1470 .xlevel = 0x80000008,
1471 .model_id = "Common 32-bit KVM processor"
1472 },
1473 {
1474 .name = "coreduo",
1475 .level = 10,
1476 .vendor = CPUID_VENDOR_INTEL,
1477 .family = 6,
1478 .model = 14,
1479 .stepping = 8,
1480 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1481 .features[FEAT_1_EDX] =
1482 PPRO_FEATURES | CPUID_VME |
1483 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1484 CPUID_SS,
1485 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1486 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1487 .features[FEAT_1_ECX] =
1488 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1489 .features[FEAT_8000_0001_EDX] =
1490 CPUID_EXT2_NX,
1491 .xlevel = 0x80000008,
1492 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1493 },
1494 {
1495 .name = "486",
1496 .level = 1,
1497 .vendor = CPUID_VENDOR_INTEL,
1498 .family = 4,
1499 .model = 8,
1500 .stepping = 0,
1501 .features[FEAT_1_EDX] =
1502 I486_FEATURES,
1503 .xlevel = 0,
1504 .model_id = "",
1505 },
1506 {
1507 .name = "pentium",
1508 .level = 1,
1509 .vendor = CPUID_VENDOR_INTEL,
1510 .family = 5,
1511 .model = 4,
1512 .stepping = 3,
1513 .features[FEAT_1_EDX] =
1514 PENTIUM_FEATURES,
1515 .xlevel = 0,
1516 .model_id = "",
1517 },
1518 {
1519 .name = "pentium2",
1520 .level = 2,
1521 .vendor = CPUID_VENDOR_INTEL,
1522 .family = 6,
1523 .model = 5,
1524 .stepping = 2,
1525 .features[FEAT_1_EDX] =
1526 PENTIUM2_FEATURES,
1527 .xlevel = 0,
1528 .model_id = "",
1529 },
1530 {
1531 .name = "pentium3",
1532 .level = 3,
1533 .vendor = CPUID_VENDOR_INTEL,
1534 .family = 6,
1535 .model = 7,
1536 .stepping = 3,
1537 .features[FEAT_1_EDX] =
1538 PENTIUM3_FEATURES,
1539 .xlevel = 0,
1540 .model_id = "",
1541 },
1542 {
1543 .name = "athlon",
1544 .level = 2,
1545 .vendor = CPUID_VENDOR_AMD,
1546 .family = 6,
1547 .model = 2,
1548 .stepping = 3,
1549 .features[FEAT_1_EDX] =
1550 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1551 CPUID_MCA,
1552 .features[FEAT_8000_0001_EDX] =
1553 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1554 .xlevel = 0x80000008,
1555 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1556 },
1557 {
1558 .name = "n270",
1559 .level = 10,
1560 .vendor = CPUID_VENDOR_INTEL,
1561 .family = 6,
1562 .model = 28,
1563 .stepping = 2,
1564 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1565 .features[FEAT_1_EDX] =
1566 PPRO_FEATURES |
1567 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1568 CPUID_ACPI | CPUID_SS,
1569 /* Some CPUs got no CPUID_SEP */
1570 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1571 * CPUID_EXT_XTPR */
1572 .features[FEAT_1_ECX] =
1573 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1574 CPUID_EXT_MOVBE,
1575 .features[FEAT_8000_0001_EDX] =
1576 CPUID_EXT2_NX,
1577 .features[FEAT_8000_0001_ECX] =
1578 CPUID_EXT3_LAHF_LM,
1579 .xlevel = 0x80000008,
1580 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1581 },
1582 {
1583 .name = "Conroe",
1584 .level = 10,
1585 .vendor = CPUID_VENDOR_INTEL,
1586 .family = 6,
1587 .model = 15,
1588 .stepping = 3,
1589 .features[FEAT_1_EDX] =
1590 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1591 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1592 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1593 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1594 CPUID_DE | CPUID_FP87,
1595 .features[FEAT_1_ECX] =
1596 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1597 .features[FEAT_8000_0001_EDX] =
1598 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1599 .features[FEAT_8000_0001_ECX] =
1600 CPUID_EXT3_LAHF_LM,
1601 .xlevel = 0x80000008,
1602 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1603 },
1604 {
1605 .name = "Penryn",
1606 .level = 10,
1607 .vendor = CPUID_VENDOR_INTEL,
1608 .family = 6,
1609 .model = 23,
1610 .stepping = 3,
1611 .features[FEAT_1_EDX] =
1612 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1613 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1614 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1615 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1616 CPUID_DE | CPUID_FP87,
1617 .features[FEAT_1_ECX] =
1618 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1619 CPUID_EXT_SSE3,
1620 .features[FEAT_8000_0001_EDX] =
1621 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1622 .features[FEAT_8000_0001_ECX] =
1623 CPUID_EXT3_LAHF_LM,
1624 .xlevel = 0x80000008,
1625 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1626 },
1627 {
1628 .name = "Nehalem",
1629 .level = 11,
1630 .vendor = CPUID_VENDOR_INTEL,
1631 .family = 6,
1632 .model = 26,
1633 .stepping = 3,
1634 .features[FEAT_1_EDX] =
1635 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1636 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1637 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1638 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1639 CPUID_DE | CPUID_FP87,
1640 .features[FEAT_1_ECX] =
1641 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1642 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1643 .features[FEAT_8000_0001_EDX] =
1644 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1645 .features[FEAT_8000_0001_ECX] =
1646 CPUID_EXT3_LAHF_LM,
1647 .xlevel = 0x80000008,
1648 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1649 },
1650 {
1651 .name = "Nehalem-IBRS",
1652 .level = 11,
1653 .vendor = CPUID_VENDOR_INTEL,
1654 .family = 6,
1655 .model = 26,
1656 .stepping = 3,
1657 .features[FEAT_1_EDX] =
1658 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1659 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1660 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1661 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1662 CPUID_DE | CPUID_FP87,
1663 .features[FEAT_1_ECX] =
1664 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1665 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1666 .features[FEAT_7_0_EDX] =
1667 CPUID_7_0_EDX_SPEC_CTRL,
1668 .features[FEAT_8000_0001_EDX] =
1669 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1670 .features[FEAT_8000_0001_ECX] =
1671 CPUID_EXT3_LAHF_LM,
1672 .xlevel = 0x80000008,
1673 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1674 },
1675 {
1676 .name = "Westmere",
1677 .level = 11,
1678 .vendor = CPUID_VENDOR_INTEL,
1679 .family = 6,
1680 .model = 44,
1681 .stepping = 1,
1682 .features[FEAT_1_EDX] =
1683 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1684 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1685 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1686 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1687 CPUID_DE | CPUID_FP87,
1688 .features[FEAT_1_ECX] =
1689 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1690 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1691 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1692 .features[FEAT_8000_0001_EDX] =
1693 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1694 .features[FEAT_8000_0001_ECX] =
1695 CPUID_EXT3_LAHF_LM,
1696 .features[FEAT_6_EAX] =
1697 CPUID_6_EAX_ARAT,
1698 .xlevel = 0x80000008,
1699 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1700 },
1701 {
1702 .name = "Westmere-IBRS",
1703 .level = 11,
1704 .vendor = CPUID_VENDOR_INTEL,
1705 .family = 6,
1706 .model = 44,
1707 .stepping = 1,
1708 .features[FEAT_1_EDX] =
1709 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1710 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1711 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1712 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1713 CPUID_DE | CPUID_FP87,
1714 .features[FEAT_1_ECX] =
1715 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1716 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1717 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1718 .features[FEAT_8000_0001_EDX] =
1719 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1720 .features[FEAT_8000_0001_ECX] =
1721 CPUID_EXT3_LAHF_LM,
1722 .features[FEAT_7_0_EDX] =
1723 CPUID_7_0_EDX_SPEC_CTRL,
1724 .features[FEAT_6_EAX] =
1725 CPUID_6_EAX_ARAT,
1726 .xlevel = 0x80000008,
1727 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1728 },
1729 {
1730 .name = "SandyBridge",
1731 .level = 0xd,
1732 .vendor = CPUID_VENDOR_INTEL,
1733 .family = 6,
1734 .model = 42,
1735 .stepping = 1,
1736 .features[FEAT_1_EDX] =
1737 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1738 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1739 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1740 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1741 CPUID_DE | CPUID_FP87,
1742 .features[FEAT_1_ECX] =
1743 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1744 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1745 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1746 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1747 CPUID_EXT_SSE3,
1748 .features[FEAT_8000_0001_EDX] =
1749 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1750 CPUID_EXT2_SYSCALL,
1751 .features[FEAT_8000_0001_ECX] =
1752 CPUID_EXT3_LAHF_LM,
1753 .features[FEAT_XSAVE] =
1754 CPUID_XSAVE_XSAVEOPT,
1755 .features[FEAT_6_EAX] =
1756 CPUID_6_EAX_ARAT,
1757 .xlevel = 0x80000008,
1758 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1759 },
1760 {
1761 .name = "SandyBridge-IBRS",
1762 .level = 0xd,
1763 .vendor = CPUID_VENDOR_INTEL,
1764 .family = 6,
1765 .model = 42,
1766 .stepping = 1,
1767 .features[FEAT_1_EDX] =
1768 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1769 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1770 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1771 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1772 CPUID_DE | CPUID_FP87,
1773 .features[FEAT_1_ECX] =
1774 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1775 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1776 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1777 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1778 CPUID_EXT_SSE3,
1779 .features[FEAT_8000_0001_EDX] =
1780 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1781 CPUID_EXT2_SYSCALL,
1782 .features[FEAT_8000_0001_ECX] =
1783 CPUID_EXT3_LAHF_LM,
1784 .features[FEAT_7_0_EDX] =
1785 CPUID_7_0_EDX_SPEC_CTRL,
1786 .features[FEAT_XSAVE] =
1787 CPUID_XSAVE_XSAVEOPT,
1788 .features[FEAT_6_EAX] =
1789 CPUID_6_EAX_ARAT,
1790 .xlevel = 0x80000008,
1791 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1792 },
1793 {
1794 .name = "IvyBridge",
1795 .level = 0xd,
1796 .vendor = CPUID_VENDOR_INTEL,
1797 .family = 6,
1798 .model = 58,
1799 .stepping = 9,
1800 .features[FEAT_1_EDX] =
1801 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1802 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1803 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1804 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1805 CPUID_DE | CPUID_FP87,
1806 .features[FEAT_1_ECX] =
1807 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1808 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1809 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1810 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1811 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1812 .features[FEAT_7_0_EBX] =
1813 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1814 CPUID_7_0_EBX_ERMS,
1815 .features[FEAT_8000_0001_EDX] =
1816 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1817 CPUID_EXT2_SYSCALL,
1818 .features[FEAT_8000_0001_ECX] =
1819 CPUID_EXT3_LAHF_LM,
1820 .features[FEAT_XSAVE] =
1821 CPUID_XSAVE_XSAVEOPT,
1822 .features[FEAT_6_EAX] =
1823 CPUID_6_EAX_ARAT,
1824 .xlevel = 0x80000008,
1825 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1826 },
1827 {
1828 .name = "IvyBridge-IBRS",
1829 .level = 0xd,
1830 .vendor = CPUID_VENDOR_INTEL,
1831 .family = 6,
1832 .model = 58,
1833 .stepping = 9,
1834 .features[FEAT_1_EDX] =
1835 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1836 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1837 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1838 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1839 CPUID_DE | CPUID_FP87,
1840 .features[FEAT_1_ECX] =
1841 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1842 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1843 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1844 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1845 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1846 .features[FEAT_7_0_EBX] =
1847 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1848 CPUID_7_0_EBX_ERMS,
1849 .features[FEAT_8000_0001_EDX] =
1850 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1851 CPUID_EXT2_SYSCALL,
1852 .features[FEAT_8000_0001_ECX] =
1853 CPUID_EXT3_LAHF_LM,
1854 .features[FEAT_7_0_EDX] =
1855 CPUID_7_0_EDX_SPEC_CTRL,
1856 .features[FEAT_XSAVE] =
1857 CPUID_XSAVE_XSAVEOPT,
1858 .features[FEAT_6_EAX] =
1859 CPUID_6_EAX_ARAT,
1860 .xlevel = 0x80000008,
1861 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1862 },
1863 {
1864 .name = "Haswell-noTSX",
1865 .level = 0xd,
1866 .vendor = CPUID_VENDOR_INTEL,
1867 .family = 6,
1868 .model = 60,
1869 .stepping = 1,
1870 .features[FEAT_1_EDX] =
1871 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1872 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1873 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1874 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1875 CPUID_DE | CPUID_FP87,
1876 .features[FEAT_1_ECX] =
1877 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1878 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1879 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1880 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1881 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1882 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1883 .features[FEAT_8000_0001_EDX] =
1884 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1885 CPUID_EXT2_SYSCALL,
1886 .features[FEAT_8000_0001_ECX] =
1887 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1888 .features[FEAT_7_0_EBX] =
1889 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1890 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1891 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1892 .features[FEAT_XSAVE] =
1893 CPUID_XSAVE_XSAVEOPT,
1894 .features[FEAT_6_EAX] =
1895 CPUID_6_EAX_ARAT,
1896 .xlevel = 0x80000008,
1897 .model_id = "Intel Core Processor (Haswell, no TSX)",
1898 },
1899 {
1900 .name = "Haswell-noTSX-IBRS",
1901 .level = 0xd,
1902 .vendor = CPUID_VENDOR_INTEL,
1903 .family = 6,
1904 .model = 60,
1905 .stepping = 1,
1906 .features[FEAT_1_EDX] =
1907 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1908 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1909 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1910 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1911 CPUID_DE | CPUID_FP87,
1912 .features[FEAT_1_ECX] =
1913 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1914 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1915 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1916 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1917 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1918 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1919 .features[FEAT_8000_0001_EDX] =
1920 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1921 CPUID_EXT2_SYSCALL,
1922 .features[FEAT_8000_0001_ECX] =
1923 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1924 .features[FEAT_7_0_EDX] =
1925 CPUID_7_0_EDX_SPEC_CTRL,
1926 .features[FEAT_7_0_EBX] =
1927 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1928 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1929 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1930 .features[FEAT_XSAVE] =
1931 CPUID_XSAVE_XSAVEOPT,
1932 .features[FEAT_6_EAX] =
1933 CPUID_6_EAX_ARAT,
1934 .xlevel = 0x80000008,
1935 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1936 },
1937 {
1938 .name = "Haswell",
1939 .level = 0xd,
1940 .vendor = CPUID_VENDOR_INTEL,
1941 .family = 6,
1942 .model = 60,
1943 .stepping = 4,
1944 .features[FEAT_1_EDX] =
1945 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1946 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1947 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1948 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1949 CPUID_DE | CPUID_FP87,
1950 .features[FEAT_1_ECX] =
1951 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1952 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1953 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1954 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1955 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1956 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1957 .features[FEAT_8000_0001_EDX] =
1958 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1959 CPUID_EXT2_SYSCALL,
1960 .features[FEAT_8000_0001_ECX] =
1961 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1962 .features[FEAT_7_0_EBX] =
1963 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1964 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1965 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1966 CPUID_7_0_EBX_RTM,
1967 .features[FEAT_XSAVE] =
1968 CPUID_XSAVE_XSAVEOPT,
1969 .features[FEAT_6_EAX] =
1970 CPUID_6_EAX_ARAT,
1971 .xlevel = 0x80000008,
1972 .model_id = "Intel Core Processor (Haswell)",
1973 },
1974 {
1975 .name = "Haswell-IBRS",
1976 .level = 0xd,
1977 .vendor = CPUID_VENDOR_INTEL,
1978 .family = 6,
1979 .model = 60,
1980 .stepping = 4,
1981 .features[FEAT_1_EDX] =
1982 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1983 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1984 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1985 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1986 CPUID_DE | CPUID_FP87,
1987 .features[FEAT_1_ECX] =
1988 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1989 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1990 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1991 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1992 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1993 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1994 .features[FEAT_8000_0001_EDX] =
1995 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1996 CPUID_EXT2_SYSCALL,
1997 .features[FEAT_8000_0001_ECX] =
1998 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1999 .features[FEAT_7_0_EDX] =
2000 CPUID_7_0_EDX_SPEC_CTRL,
2001 .features[FEAT_7_0_EBX] =
2002 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2003 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2004 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2005 CPUID_7_0_EBX_RTM,
2006 .features[FEAT_XSAVE] =
2007 CPUID_XSAVE_XSAVEOPT,
2008 .features[FEAT_6_EAX] =
2009 CPUID_6_EAX_ARAT,
2010 .xlevel = 0x80000008,
2011 .model_id = "Intel Core Processor (Haswell, IBRS)",
2012 },
2013 {
2014 .name = "Broadwell-noTSX",
2015 .level = 0xd,
2016 .vendor = CPUID_VENDOR_INTEL,
2017 .family = 6,
2018 .model = 61,
2019 .stepping = 2,
2020 .features[FEAT_1_EDX] =
2021 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2022 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2023 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2024 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2025 CPUID_DE | CPUID_FP87,
2026 .features[FEAT_1_ECX] =
2027 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2028 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2029 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2030 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2031 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2032 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2033 .features[FEAT_8000_0001_EDX] =
2034 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2035 CPUID_EXT2_SYSCALL,
2036 .features[FEAT_8000_0001_ECX] =
2037 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2038 .features[FEAT_7_0_EBX] =
2039 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2040 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2041 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2042 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2043 CPUID_7_0_EBX_SMAP,
2044 .features[FEAT_XSAVE] =
2045 CPUID_XSAVE_XSAVEOPT,
2046 .features[FEAT_6_EAX] =
2047 CPUID_6_EAX_ARAT,
2048 .xlevel = 0x80000008,
2049 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2050 },
2051 {
2052 .name = "Broadwell-noTSX-IBRS",
2053 .level = 0xd,
2054 .vendor = CPUID_VENDOR_INTEL,
2055 .family = 6,
2056 .model = 61,
2057 .stepping = 2,
2058 .features[FEAT_1_EDX] =
2059 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2060 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2061 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2062 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2063 CPUID_DE | CPUID_FP87,
2064 .features[FEAT_1_ECX] =
2065 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2066 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2067 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2068 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2069 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2070 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2071 .features[FEAT_8000_0001_EDX] =
2072 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2073 CPUID_EXT2_SYSCALL,
2074 .features[FEAT_8000_0001_ECX] =
2075 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2076 .features[FEAT_7_0_EDX] =
2077 CPUID_7_0_EDX_SPEC_CTRL,
2078 .features[FEAT_7_0_EBX] =
2079 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2080 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2081 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2082 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2083 CPUID_7_0_EBX_SMAP,
2084 .features[FEAT_XSAVE] =
2085 CPUID_XSAVE_XSAVEOPT,
2086 .features[FEAT_6_EAX] =
2087 CPUID_6_EAX_ARAT,
2088 .xlevel = 0x80000008,
2089 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2090 },
2091 {
2092 .name = "Broadwell",
2093 .level = 0xd,
2094 .vendor = CPUID_VENDOR_INTEL,
2095 .family = 6,
2096 .model = 61,
2097 .stepping = 2,
2098 .features[FEAT_1_EDX] =
2099 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2100 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2101 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2102 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2103 CPUID_DE | CPUID_FP87,
2104 .features[FEAT_1_ECX] =
2105 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2106 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2107 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2108 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2109 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2110 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2111 .features[FEAT_8000_0001_EDX] =
2112 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2113 CPUID_EXT2_SYSCALL,
2114 .features[FEAT_8000_0001_ECX] =
2115 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2116 .features[FEAT_7_0_EBX] =
2117 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2118 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2119 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2120 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2121 CPUID_7_0_EBX_SMAP,
2122 .features[FEAT_XSAVE] =
2123 CPUID_XSAVE_XSAVEOPT,
2124 .features[FEAT_6_EAX] =
2125 CPUID_6_EAX_ARAT,
2126 .xlevel = 0x80000008,
2127 .model_id = "Intel Core Processor (Broadwell)",
2128 },
2129 {
2130 .name = "Broadwell-IBRS",
2131 .level = 0xd,
2132 .vendor = CPUID_VENDOR_INTEL,
2133 .family = 6,
2134 .model = 61,
2135 .stepping = 2,
2136 .features[FEAT_1_EDX] =
2137 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2138 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2139 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2140 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2141 CPUID_DE | CPUID_FP87,
2142 .features[FEAT_1_ECX] =
2143 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2144 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2145 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2146 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2147 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2148 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2149 .features[FEAT_8000_0001_EDX] =
2150 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2151 CPUID_EXT2_SYSCALL,
2152 .features[FEAT_8000_0001_ECX] =
2153 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2154 .features[FEAT_7_0_EDX] =
2155 CPUID_7_0_EDX_SPEC_CTRL,
2156 .features[FEAT_7_0_EBX] =
2157 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2158 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2159 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2160 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2161 CPUID_7_0_EBX_SMAP,
2162 .features[FEAT_XSAVE] =
2163 CPUID_XSAVE_XSAVEOPT,
2164 .features[FEAT_6_EAX] =
2165 CPUID_6_EAX_ARAT,
2166 .xlevel = 0x80000008,
2167 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2168 },
2169 {
2170 .name = "Skylake-Client",
2171 .level = 0xd,
2172 .vendor = CPUID_VENDOR_INTEL,
2173 .family = 6,
2174 .model = 94,
2175 .stepping = 3,
2176 .features[FEAT_1_EDX] =
2177 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2178 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2179 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2180 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2181 CPUID_DE | CPUID_FP87,
2182 .features[FEAT_1_ECX] =
2183 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2184 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2185 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2186 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2187 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2188 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2189 .features[FEAT_8000_0001_EDX] =
2190 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2191 CPUID_EXT2_SYSCALL,
2192 .features[FEAT_8000_0001_ECX] =
2193 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2194 .features[FEAT_7_0_EBX] =
2195 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2196 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2197 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2198 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2199 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2200 /* Missing: XSAVES (not supported by some Linux versions,
2201 * including v4.1 to v4.12).
2202 * KVM doesn't yet expose any XSAVES state save component,
2203 * and the only one defined in Skylake (processor tracing)
2204 * probably will block migration anyway.
2205 */
2206 .features[FEAT_XSAVE] =
2207 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2208 CPUID_XSAVE_XGETBV1,
2209 .features[FEAT_6_EAX] =
2210 CPUID_6_EAX_ARAT,
2211 .xlevel = 0x80000008,
2212 .model_id = "Intel Core Processor (Skylake)",
2213 },
2214 {
2215 .name = "Skylake-Client-IBRS",
2216 .level = 0xd,
2217 .vendor = CPUID_VENDOR_INTEL,
2218 .family = 6,
2219 .model = 94,
2220 .stepping = 3,
2221 .features[FEAT_1_EDX] =
2222 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2223 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2224 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2225 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2226 CPUID_DE | CPUID_FP87,
2227 .features[FEAT_1_ECX] =
2228 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2229 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2230 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2231 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2232 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2233 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2234 .features[FEAT_8000_0001_EDX] =
2235 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2236 CPUID_EXT2_SYSCALL,
2237 .features[FEAT_8000_0001_ECX] =
2238 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2239 .features[FEAT_7_0_EDX] =
2240 CPUID_7_0_EDX_SPEC_CTRL,
2241 .features[FEAT_7_0_EBX] =
2242 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2243 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2244 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2245 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2246 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2247 /* Missing: XSAVES (not supported by some Linux versions,
2248 * including v4.1 to v4.12).
2249 * KVM doesn't yet expose any XSAVES state save component,
2250 * and the only one defined in Skylake (processor tracing)
2251 * probably will block migration anyway.
2252 */
2253 .features[FEAT_XSAVE] =
2254 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2255 CPUID_XSAVE_XGETBV1,
2256 .features[FEAT_6_EAX] =
2257 CPUID_6_EAX_ARAT,
2258 .xlevel = 0x80000008,
2259 .model_id = "Intel Core Processor (Skylake, IBRS)",
2260 },
2261 {
2262 .name = "Skylake-Server",
2263 .level = 0xd,
2264 .vendor = CPUID_VENDOR_INTEL,
2265 .family = 6,
2266 .model = 85,
2267 .stepping = 4,
2268 .features[FEAT_1_EDX] =
2269 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2270 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2271 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2272 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2273 CPUID_DE | CPUID_FP87,
2274 .features[FEAT_1_ECX] =
2275 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2276 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2277 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2278 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2279 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2280 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2281 .features[FEAT_8000_0001_EDX] =
2282 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2283 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2284 .features[FEAT_8000_0001_ECX] =
2285 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2286 .features[FEAT_7_0_EBX] =
2287 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2288 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2289 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2290 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2291 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2292 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2293 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2294 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2295 /* Missing: XSAVES (not supported by some Linux versions,
2296 * including v4.1 to v4.12).
2297 * KVM doesn't yet expose any XSAVES state save component,
2298 * and the only one defined in Skylake (processor tracing)
2299 * probably will block migration anyway.
2300 */
2301 .features[FEAT_XSAVE] =
2302 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2303 CPUID_XSAVE_XGETBV1,
2304 .features[FEAT_6_EAX] =
2305 CPUID_6_EAX_ARAT,
2306 .xlevel = 0x80000008,
2307 .model_id = "Intel Xeon Processor (Skylake)",
2308 },
2309 {
2310 .name = "Skylake-Server-IBRS",
2311 .level = 0xd,
2312 .vendor = CPUID_VENDOR_INTEL,
2313 .family = 6,
2314 .model = 85,
2315 .stepping = 4,
2316 .features[FEAT_1_EDX] =
2317 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2318 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2319 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2320 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2321 CPUID_DE | CPUID_FP87,
2322 .features[FEAT_1_ECX] =
2323 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2324 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2325 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2326 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2327 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2328 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2329 .features[FEAT_8000_0001_EDX] =
2330 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2331 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2332 .features[FEAT_8000_0001_ECX] =
2333 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2334 .features[FEAT_7_0_EDX] =
2335 CPUID_7_0_EDX_SPEC_CTRL,
2336 .features[FEAT_7_0_EBX] =
2337 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2338 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2339 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2340 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2341 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2342 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2343 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2344 CPUID_7_0_EBX_AVX512VL,
2345 /* Missing: XSAVES (not supported by some Linux versions,
2346 * including v4.1 to v4.12).
2347 * KVM doesn't yet expose any XSAVES state save component,
2348 * and the only one defined in Skylake (processor tracing)
2349 * probably will block migration anyway.
2350 */
2351 .features[FEAT_XSAVE] =
2352 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2353 CPUID_XSAVE_XGETBV1,
2354 .features[FEAT_6_EAX] =
2355 CPUID_6_EAX_ARAT,
2356 .xlevel = 0x80000008,
2357 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2358 },
2359 {
2360 .name = "KnightsMill",
2361 .level = 0xd,
2362 .vendor = CPUID_VENDOR_INTEL,
2363 .family = 6,
2364 .model = 133,
2365 .stepping = 0,
2366 .features[FEAT_1_EDX] =
2367 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2368 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2369 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2370 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2371 CPUID_PSE | CPUID_DE | CPUID_FP87,
2372 .features[FEAT_1_ECX] =
2373 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2374 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2375 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2376 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2377 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2378 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2379 .features[FEAT_8000_0001_EDX] =
2380 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2381 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2382 .features[FEAT_8000_0001_ECX] =
2383 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2384 .features[FEAT_7_0_EBX] =
2385 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2386 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2387 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2388 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2389 CPUID_7_0_EBX_AVX512ER,
2390 .features[FEAT_7_0_ECX] =
2391 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2392 .features[FEAT_7_0_EDX] =
2393 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2394 .features[FEAT_XSAVE] =
2395 CPUID_XSAVE_XSAVEOPT,
2396 .features[FEAT_6_EAX] =
2397 CPUID_6_EAX_ARAT,
2398 .xlevel = 0x80000008,
2399 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2400 },
2401 {
2402 .name = "Opteron_G1",
2403 .level = 5,
2404 .vendor = CPUID_VENDOR_AMD,
2405 .family = 15,
2406 .model = 6,
2407 .stepping = 1,
2408 .features[FEAT_1_EDX] =
2409 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2410 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2411 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2412 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2413 CPUID_DE | CPUID_FP87,
2414 .features[FEAT_1_ECX] =
2415 CPUID_EXT_SSE3,
2416 .features[FEAT_8000_0001_EDX] =
2417 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2418 .xlevel = 0x80000008,
2419 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2420 },
2421 {
2422 .name = "Opteron_G2",
2423 .level = 5,
2424 .vendor = CPUID_VENDOR_AMD,
2425 .family = 15,
2426 .model = 6,
2427 .stepping = 1,
2428 .features[FEAT_1_EDX] =
2429 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2430 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2431 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2432 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2433 CPUID_DE | CPUID_FP87,
2434 .features[FEAT_1_ECX] =
2435 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2436 /* Missing: CPUID_EXT2_RDTSCP */
2437 .features[FEAT_8000_0001_EDX] =
2438 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2439 .features[FEAT_8000_0001_ECX] =
2440 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2441 .xlevel = 0x80000008,
2442 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2443 },
2444 {
2445 .name = "Opteron_G3",
2446 .level = 5,
2447 .vendor = CPUID_VENDOR_AMD,
2448 .family = 16,
2449 .model = 2,
2450 .stepping = 3,
2451 .features[FEAT_1_EDX] =
2452 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2453 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2454 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2455 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2456 CPUID_DE | CPUID_FP87,
2457 .features[FEAT_1_ECX] =
2458 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2459 CPUID_EXT_SSE3,
2460 /* Missing: CPUID_EXT2_RDTSCP */
2461 .features[FEAT_8000_0001_EDX] =
2462 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2463 .features[FEAT_8000_0001_ECX] =
2464 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2465 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2466 .xlevel = 0x80000008,
2467 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2468 },
2469 {
2470 .name = "Opteron_G4",
2471 .level = 0xd,
2472 .vendor = CPUID_VENDOR_AMD,
2473 .family = 21,
2474 .model = 1,
2475 .stepping = 2,
2476 .features[FEAT_1_EDX] =
2477 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2478 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2479 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2480 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2481 CPUID_DE | CPUID_FP87,
2482 .features[FEAT_1_ECX] =
2483 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2484 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2485 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2486 CPUID_EXT_SSE3,
2487 /* Missing: CPUID_EXT2_RDTSCP */
2488 .features[FEAT_8000_0001_EDX] =
2489 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2490 CPUID_EXT2_SYSCALL,
2491 .features[FEAT_8000_0001_ECX] =
2492 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2493 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2494 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2495 CPUID_EXT3_LAHF_LM,
2496 /* no xsaveopt! */
2497 .xlevel = 0x8000001A,
2498 .model_id = "AMD Opteron 62xx class CPU",
2499 },
2500 {
2501 .name = "Opteron_G5",
2502 .level = 0xd,
2503 .vendor = CPUID_VENDOR_AMD,
2504 .family = 21,
2505 .model = 2,
2506 .stepping = 0,
2507 .features[FEAT_1_EDX] =
2508 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2509 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2510 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2511 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2512 CPUID_DE | CPUID_FP87,
2513 .features[FEAT_1_ECX] =
2514 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2515 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2516 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2517 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2518 /* Missing: CPUID_EXT2_RDTSCP */
2519 .features[FEAT_8000_0001_EDX] =
2520 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2521 CPUID_EXT2_SYSCALL,
2522 .features[FEAT_8000_0001_ECX] =
2523 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2524 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2525 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2526 CPUID_EXT3_LAHF_LM,
2527 /* no xsaveopt! */
2528 .xlevel = 0x8000001A,
2529 .model_id = "AMD Opteron 63xx class CPU",
2530 },
2531 {
2532 .name = "EPYC",
2533 .level = 0xd,
2534 .vendor = CPUID_VENDOR_AMD,
2535 .family = 23,
2536 .model = 1,
2537 .stepping = 2,
2538 .features[FEAT_1_EDX] =
2539 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2540 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2541 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2542 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2543 CPUID_VME | CPUID_FP87,
2544 .features[FEAT_1_ECX] =
2545 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2546 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2547 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2548 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2549 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2550 .features[FEAT_8000_0001_EDX] =
2551 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2552 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2553 CPUID_EXT2_SYSCALL,
2554 .features[FEAT_8000_0001_ECX] =
2555 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2556 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2557 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2558 .features[FEAT_7_0_EBX] =
2559 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2560 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2561 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2562 CPUID_7_0_EBX_SHA_NI,
2563 /* Missing: XSAVES (not supported by some Linux versions,
2564 * including v4.1 to v4.12).
2565 * KVM doesn't yet expose any XSAVES state save component.
2566 */
2567 .features[FEAT_XSAVE] =
2568 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2569 CPUID_XSAVE_XGETBV1,
2570 .features[FEAT_6_EAX] =
2571 CPUID_6_EAX_ARAT,
2572 .xlevel = 0x8000000A,
2573 .model_id = "AMD EPYC Processor",
2574 .cache_info = &epyc_cache_info,
2575 },
2576 {
2577 .name = "EPYC-IBPB",
2578 .level = 0xd,
2579 .vendor = CPUID_VENDOR_AMD,
2580 .family = 23,
2581 .model = 1,
2582 .stepping = 2,
2583 .features[FEAT_1_EDX] =
2584 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2585 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2586 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2587 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2588 CPUID_VME | CPUID_FP87,
2589 .features[FEAT_1_ECX] =
2590 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2591 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2592 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2593 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2594 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2595 .features[FEAT_8000_0001_EDX] =
2596 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2597 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2598 CPUID_EXT2_SYSCALL,
2599 .features[FEAT_8000_0001_ECX] =
2600 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2601 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2602 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2603 .features[FEAT_8000_0008_EBX] =
2604 CPUID_8000_0008_EBX_IBPB,
2605 .features[FEAT_7_0_EBX] =
2606 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2607 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2608 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2609 CPUID_7_0_EBX_SHA_NI,
2610 /* Missing: XSAVES (not supported by some Linux versions,
2611 * including v4.1 to v4.12).
2612 * KVM doesn't yet expose any XSAVES state save component.
2613 */
2614 .features[FEAT_XSAVE] =
2615 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2616 CPUID_XSAVE_XGETBV1,
2617 .features[FEAT_6_EAX] =
2618 CPUID_6_EAX_ARAT,
2619 .xlevel = 0x8000000A,
2620 .model_id = "AMD EPYC Processor (with IBPB)",
2621 .cache_info = &epyc_cache_info,
2622 },
2623 };
2624
2625 typedef struct PropValue {
2626 const char *prop, *value;
2627 } PropValue;
2628
2629 /* KVM-specific features that are automatically added/removed
2630 * from all CPU models when KVM is enabled.
2631 */
2632 static PropValue kvm_default_props[] = {
2633 { "kvmclock", "on" },
2634 { "kvm-nopiodelay", "on" },
2635 { "kvm-asyncpf", "on" },
2636 { "kvm-steal-time", "on" },
2637 { "kvm-pv-eoi", "on" },
2638 { "kvmclock-stable-bit", "on" },
2639 { "x2apic", "on" },
2640 { "acpi", "off" },
2641 { "monitor", "off" },
2642 { "svm", "off" },
2643 { NULL, NULL },
2644 };
2645
2646 /* TCG-specific defaults that override all CPU models when using TCG
2647 */
2648 static PropValue tcg_default_props[] = {
2649 { "vme", "off" },
2650 { NULL, NULL },
2651 };
2652
2653
2654 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2655 {
2656 PropValue *pv;
2657 for (pv = kvm_default_props; pv->prop; pv++) {
2658 if (!strcmp(pv->prop, prop)) {
2659 pv->value = value;
2660 break;
2661 }
2662 }
2663
2664 /* It is valid to call this function only for properties that
2665 * are already present in the kvm_default_props table.
2666 */
2667 assert(pv->prop);
2668 }
2669
2670 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2671 bool migratable_only);
2672
2673 static bool lmce_supported(void)
2674 {
2675 uint64_t mce_cap = 0;
2676
2677 #ifdef CONFIG_KVM
2678 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2679 return false;
2680 }
2681 #endif
2682
2683 return !!(mce_cap & MCG_LMCE_P);
2684 }
2685
2686 #define CPUID_MODEL_ID_SZ 48
2687
2688 /**
2689 * cpu_x86_fill_model_id:
2690 * Get CPUID model ID string from host CPU.
2691 *
2692 * @str should have at least CPUID_MODEL_ID_SZ bytes
2693 *
2694 * The function does NOT add a null terminator to the string
2695 * automatically.
2696 */
2697 static int cpu_x86_fill_model_id(char *str)
2698 {
2699 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2700 int i;
2701
2702 for (i = 0; i < 3; i++) {
2703 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2704 memcpy(str + i * 16 + 0, &eax, 4);
2705 memcpy(str + i * 16 + 4, &ebx, 4);
2706 memcpy(str + i * 16 + 8, &ecx, 4);
2707 memcpy(str + i * 16 + 12, &edx, 4);
2708 }
2709 return 0;
2710 }
2711
2712 static Property max_x86_cpu_properties[] = {
2713 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2714 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2715 DEFINE_PROP_END_OF_LIST()
2716 };
2717
2718 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2719 {
2720 DeviceClass *dc = DEVICE_CLASS(oc);
2721 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2722
2723 xcc->ordering = 9;
2724
2725 xcc->model_description =
2726 "Enables all features supported by the accelerator in the current host";
2727
2728 dc->props = max_x86_cpu_properties;
2729 }
2730
2731 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2732
2733 static void max_x86_cpu_initfn(Object *obj)
2734 {
2735 X86CPU *cpu = X86_CPU(obj);
2736 CPUX86State *env = &cpu->env;
2737 KVMState *s = kvm_state;
2738
2739 /* We can't fill the features array here because we don't know yet if
2740 * "migratable" is true or false.
2741 */
2742 cpu->max_features = true;
2743
2744 if (accel_uses_host_cpuid()) {
2745 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2746 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2747 int family, model, stepping;
2748 X86CPUDefinition host_cpudef = { };
2749 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2750
2751 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2752 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2753
2754 host_vendor_fms(vendor, &family, &model, &stepping);
2755
2756 cpu_x86_fill_model_id(model_id);
2757
2758 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2759 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2760 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2761 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2762 &error_abort);
2763 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2764 &error_abort);
2765
2766 if (kvm_enabled()) {
2767 env->cpuid_min_level =
2768 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2769 env->cpuid_min_xlevel =
2770 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2771 env->cpuid_min_xlevel2 =
2772 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2773 } else {
2774 env->cpuid_min_level =
2775 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2776 env->cpuid_min_xlevel =
2777 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2778 env->cpuid_min_xlevel2 =
2779 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2780 }
2781
2782 if (lmce_supported()) {
2783 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2784 }
2785 } else {
2786 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2787 "vendor", &error_abort);
2788 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2789 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2790 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2791 object_property_set_str(OBJECT(cpu),
2792 "QEMU TCG CPU version " QEMU_HW_VERSION,
2793 "model-id", &error_abort);
2794 }
2795
2796 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2797 }
2798
2799 static const TypeInfo max_x86_cpu_type_info = {
2800 .name = X86_CPU_TYPE_NAME("max"),
2801 .parent = TYPE_X86_CPU,
2802 .instance_init = max_x86_cpu_initfn,
2803 .class_init = max_x86_cpu_class_init,
2804 };
2805
2806 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2807 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2808 {
2809 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2810
2811 xcc->host_cpuid_required = true;
2812 xcc->ordering = 8;
2813
2814 if (kvm_enabled()) {
2815 xcc->model_description =
2816 "KVM processor with all supported host features ";
2817 } else if (hvf_enabled()) {
2818 xcc->model_description =
2819 "HVF processor with all supported host features ";
2820 }
2821 }
2822
2823 static const TypeInfo host_x86_cpu_type_info = {
2824 .name = X86_CPU_TYPE_NAME("host"),
2825 .parent = X86_CPU_TYPE_NAME("max"),
2826 .class_init = host_x86_cpu_class_init,
2827 };
2828
2829 #endif
2830
2831 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2832 {
2833 FeatureWordInfo *f = &feature_word_info[w];
2834 int i;
2835
2836 for (i = 0; i < 32; ++i) {
2837 if ((1UL << i) & mask) {
2838 const char *reg = get_register_name_32(f->cpuid_reg);
2839 assert(reg);
2840 warn_report("%s doesn't support requested feature: "
2841 "CPUID.%02XH:%s%s%s [bit %d]",
2842 accel_uses_host_cpuid() ? "host" : "TCG",
2843 f->cpuid_eax, reg,
2844 f->feat_names[i] ? "." : "",
2845 f->feat_names[i] ? f->feat_names[i] : "", i);
2846 }
2847 }
2848 }
2849
2850 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2851 const char *name, void *opaque,
2852 Error **errp)
2853 {
2854 X86CPU *cpu = X86_CPU(obj);
2855 CPUX86State *env = &cpu->env;
2856 int64_t value;
2857
2858 value = (env->cpuid_version >> 8) & 0xf;
2859 if (value == 0xf) {
2860 value += (env->cpuid_version >> 20) & 0xff;
2861 }
2862 visit_type_int(v, name, &value, errp);
2863 }
2864
2865 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2866 const char *name, void *opaque,
2867 Error **errp)
2868 {
2869 X86CPU *cpu = X86_CPU(obj);
2870 CPUX86State *env = &cpu->env;
2871 const int64_t min = 0;
2872 const int64_t max = 0xff + 0xf;
2873 Error *local_err = NULL;
2874 int64_t value;
2875
2876 visit_type_int(v, name, &value, &local_err);
2877 if (local_err) {
2878 error_propagate(errp, local_err);
2879 return;
2880 }
2881 if (value < min || value > max) {
2882 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2883 name ? name : "null", value, min, max);
2884 return;
2885 }
2886
2887 env->cpuid_version &= ~0xff00f00;
2888 if (value > 0x0f) {
2889 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2890 } else {
2891 env->cpuid_version |= value << 8;
2892 }
2893 }
2894
2895 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2896 const char *name, void *opaque,
2897 Error **errp)
2898 {
2899 X86CPU *cpu = X86_CPU(obj);
2900 CPUX86State *env = &cpu->env;
2901 int64_t value;
2902
2903 value = (env->cpuid_version >> 4) & 0xf;
2904 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2905 visit_type_int(v, name, &value, errp);
2906 }
2907
2908 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2909 const char *name, void *opaque,
2910 Error **errp)
2911 {
2912 X86CPU *cpu = X86_CPU(obj);
2913 CPUX86State *env = &cpu->env;
2914 const int64_t min = 0;
2915 const int64_t max = 0xff;
2916 Error *local_err = NULL;
2917 int64_t value;
2918
2919 visit_type_int(v, name, &value, &local_err);
2920 if (local_err) {
2921 error_propagate(errp, local_err);
2922 return;
2923 }
2924 if (value < min || value > max) {
2925 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2926 name ? name : "null", value, min, max);
2927 return;
2928 }
2929
2930 env->cpuid_version &= ~0xf00f0;
2931 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2932 }
2933
2934 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2935 const char *name, void *opaque,
2936 Error **errp)
2937 {
2938 X86CPU *cpu = X86_CPU(obj);
2939 CPUX86State *env = &cpu->env;
2940 int64_t value;
2941
2942 value = env->cpuid_version & 0xf;
2943 visit_type_int(v, name, &value, errp);
2944 }
2945
2946 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2947 const char *name, void *opaque,
2948 Error **errp)
2949 {
2950 X86CPU *cpu = X86_CPU(obj);
2951 CPUX86State *env = &cpu->env;
2952 const int64_t min = 0;
2953 const int64_t max = 0xf;
2954 Error *local_err = NULL;
2955 int64_t value;
2956
2957 visit_type_int(v, name, &value, &local_err);
2958 if (local_err) {
2959 error_propagate(errp, local_err);
2960 return;
2961 }
2962 if (value < min || value > max) {
2963 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2964 name ? name : "null", value, min, max);
2965 return;
2966 }
2967
2968 env->cpuid_version &= ~0xf;
2969 env->cpuid_version |= value & 0xf;
2970 }
2971
2972 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2973 {
2974 X86CPU *cpu = X86_CPU(obj);
2975 CPUX86State *env = &cpu->env;
2976 char *value;
2977
2978 value = g_malloc(CPUID_VENDOR_SZ + 1);
2979 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2980 env->cpuid_vendor3);
2981 return value;
2982 }
2983
2984 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2985 Error **errp)
2986 {
2987 X86CPU *cpu = X86_CPU(obj);
2988 CPUX86State *env = &cpu->env;
2989 int i;
2990
2991 if (strlen(value) != CPUID_VENDOR_SZ) {
2992 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2993 return;
2994 }
2995
2996 env->cpuid_vendor1 = 0;
2997 env->cpuid_vendor2 = 0;
2998 env->cpuid_vendor3 = 0;
2999 for (i = 0; i < 4; i++) {
3000 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3001 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3002 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3003 }
3004 }
3005
3006 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3007 {
3008 X86CPU *cpu = X86_CPU(obj);
3009 CPUX86State *env = &cpu->env;
3010 char *value;
3011 int i;
3012
3013 value = g_malloc(48 + 1);
3014 for (i = 0; i < 48; i++) {
3015 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3016 }
3017 value[48] = '\0';
3018 return value;
3019 }
3020
3021 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3022 Error **errp)
3023 {
3024 X86CPU *cpu = X86_CPU(obj);
3025 CPUX86State *env = &cpu->env;
3026 int c, len, i;
3027
3028 if (model_id == NULL) {
3029 model_id = "";
3030 }
3031 len = strlen(model_id);
3032 memset(env->cpuid_model, 0, 48);
3033 for (i = 0; i < 48; i++) {
3034 if (i >= len) {
3035 c = '\0';
3036 } else {
3037 c = (uint8_t)model_id[i];
3038 }
3039 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3040 }
3041 }
3042
3043 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3044 void *opaque, Error **errp)
3045 {
3046 X86CPU *cpu = X86_CPU(obj);
3047 int64_t value;
3048
3049 value = cpu->env.tsc_khz * 1000;
3050 visit_type_int(v, name, &value, errp);
3051 }
3052
3053 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3054 void *opaque, Error **errp)
3055 {
3056 X86CPU *cpu = X86_CPU(obj);
3057 const int64_t min = 0;
3058 const int64_t max = INT64_MAX;
3059 Error *local_err = NULL;
3060 int64_t value;
3061
3062 visit_type_int(v, name, &value, &local_err);
3063 if (local_err) {
3064 error_propagate(errp, local_err);
3065 return;
3066 }
3067 if (value < min || value > max) {
3068 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3069 name ? name : "null", value, min, max);
3070 return;
3071 }
3072
3073 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3074 }
3075
3076 /* Generic getter for "feature-words" and "filtered-features" properties */
3077 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3078 const char *name, void *opaque,
3079 Error **errp)
3080 {
3081 uint32_t *array = (uint32_t *)opaque;
3082 FeatureWord w;
3083 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3084 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3085 X86CPUFeatureWordInfoList *list = NULL;
3086
3087 for (w = 0; w < FEATURE_WORDS; w++) {
3088 FeatureWordInfo *wi = &feature_word_info[w];
3089 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3090 qwi->cpuid_input_eax = wi->cpuid_eax;
3091 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
3092 qwi->cpuid_input_ecx = wi->cpuid_ecx;
3093 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
3094 qwi->features = array[w];
3095
3096 /* List will be in reverse order, but order shouldn't matter */
3097 list_entries[w].next = list;
3098 list_entries[w].value = &word_infos[w];
3099 list = &list_entries[w];
3100 }
3101
3102 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3103 }
3104
3105 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3106 void *opaque, Error **errp)
3107 {
3108 X86CPU *cpu = X86_CPU(obj);
3109 int64_t value = cpu->hyperv_spinlock_attempts;
3110
3111 visit_type_int(v, name, &value, errp);
3112 }
3113
3114 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3115 void *opaque, Error **errp)
3116 {
3117 const int64_t min = 0xFFF;
3118 const int64_t max = UINT_MAX;
3119 X86CPU *cpu = X86_CPU(obj);
3120 Error *err = NULL;
3121 int64_t value;
3122
3123 visit_type_int(v, name, &value, &err);
3124 if (err) {
3125 error_propagate(errp, err);
3126 return;
3127 }
3128
3129 if (value < min || value > max) {
3130 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3131 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3132 object_get_typename(obj), name ? name : "null",
3133 value, min, max);
3134 return;
3135 }
3136 cpu->hyperv_spinlock_attempts = value;
3137 }
3138
3139 static const PropertyInfo qdev_prop_spinlocks = {
3140 .name = "int",
3141 .get = x86_get_hv_spinlocks,
3142 .set = x86_set_hv_spinlocks,
3143 };
3144
3145 /* Convert all '_' in a feature string option name to '-', to make feature
3146 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3147 */
3148 static inline void feat2prop(char *s)
3149 {
3150 while ((s = strchr(s, '_'))) {
3151 *s = '-';
3152 }
3153 }
3154
3155 /* Return the feature property name for a feature flag bit */
3156 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3157 {
3158 /* XSAVE components are automatically enabled by other features,
3159 * so return the original feature name instead
3160 */
3161 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3162 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3163
3164 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3165 x86_ext_save_areas[comp].bits) {
3166 w = x86_ext_save_areas[comp].feature;
3167 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3168 }
3169 }
3170
3171 assert(bitnr < 32);
3172 assert(w < FEATURE_WORDS);
3173 return feature_word_info[w].feat_names[bitnr];
3174 }
3175
3176 /* Compatibily hack to maintain legacy +-feat semantic,
3177 * where +-feat overwrites any feature set by
3178 * feat=on|feat even if the later is parsed after +-feat
3179 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3180 */
3181 static GList *plus_features, *minus_features;
3182
3183 static gint compare_string(gconstpointer a, gconstpointer b)
3184 {
3185 return g_strcmp0(a, b);
3186 }
3187
3188 /* Parse "+feature,-feature,feature=foo" CPU feature string
3189 */
3190 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3191 Error **errp)
3192 {
3193 char *featurestr; /* Single 'key=value" string being parsed */
3194 static bool cpu_globals_initialized;
3195 bool ambiguous = false;
3196
3197 if (cpu_globals_initialized) {
3198 return;
3199 }
3200 cpu_globals_initialized = true;
3201
3202 if (!features) {
3203 return;
3204 }
3205
3206 for (featurestr = strtok(features, ",");
3207 featurestr;
3208 featurestr = strtok(NULL, ",")) {
3209 const char *name;
3210 const char *val = NULL;
3211 char *eq = NULL;
3212 char num[32];
3213 GlobalProperty *prop;
3214
3215 /* Compatibility syntax: */
3216 if (featurestr[0] == '+') {
3217 plus_features = g_list_append(plus_features,
3218 g_strdup(featurestr + 1));
3219 continue;
3220 } else if (featurestr[0] == '-') {
3221 minus_features = g_list_append(minus_features,
3222 g_strdup(featurestr + 1));
3223 continue;
3224 }
3225
3226 eq = strchr(featurestr, '=');
3227 if (eq) {
3228 *eq++ = 0;
3229 val = eq;
3230 } else {
3231 val = "on";
3232 }
3233
3234 feat2prop(featurestr);
3235 name = featurestr;
3236
3237 if (g_list_find_custom(plus_features, name, compare_string)) {
3238 warn_report("Ambiguous CPU model string. "
3239 "Don't mix both \"+%s\" and \"%s=%s\"",
3240 name, name, val);
3241 ambiguous = true;
3242 }
3243 if (g_list_find_custom(minus_features, name, compare_string)) {
3244 warn_report("Ambiguous CPU model string. "
3245 "Don't mix both \"-%s\" and \"%s=%s\"",
3246 name, name, val);
3247 ambiguous = true;
3248 }
3249
3250 /* Special case: */
3251 if (!strcmp(name, "tsc-freq")) {
3252 int ret;
3253 uint64_t tsc_freq;
3254
3255 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3256 if (ret < 0 || tsc_freq > INT64_MAX) {
3257 error_setg(errp, "bad numerical value %s", val);
3258 return;
3259 }
3260 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3261 val = num;
3262 name = "tsc-frequency";
3263 }
3264
3265 prop = g_new0(typeof(*prop), 1);
3266 prop->driver = typename;
3267 prop->property = g_strdup(name);
3268 prop->value = g_strdup(val);
3269 prop->errp = &error_fatal;
3270 qdev_prop_register_global(prop);
3271 }
3272
3273 if (ambiguous) {
3274 warn_report("Compatibility of ambiguous CPU model "
3275 "strings won't be kept on future QEMU versions");
3276 }
3277 }
3278
3279 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3280 static int x86_cpu_filter_features(X86CPU *cpu);
3281
3282 /* Check for missing features that may prevent the CPU class from
3283 * running using the current machine and accelerator.
3284 */
3285 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3286 strList **missing_feats)
3287 {
3288 X86CPU *xc;
3289 FeatureWord w;
3290 Error *err = NULL;
3291 strList **next = missing_feats;
3292
3293 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3294 strList *new = g_new0(strList, 1);
3295 new->value = g_strdup("kvm");
3296 *missing_feats = new;
3297 return;
3298 }
3299
3300 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3301
3302 x86_cpu_expand_features(xc, &err);
3303 if (err) {
3304 /* Errors at x86_cpu_expand_features should never happen,
3305 * but in case it does, just report the model as not
3306 * runnable at all using the "type" property.
3307 */
3308 strList *new = g_new0(strList, 1);
3309 new->value = g_strdup("type");
3310 *next = new;
3311 next = &new->next;
3312 }
3313
3314 x86_cpu_filter_features(xc);
3315
3316 for (w = 0; w < FEATURE_WORDS; w++) {
3317 uint32_t filtered = xc->filtered_features[w];
3318 int i;
3319 for (i = 0; i < 32; i++) {
3320 if (filtered & (1UL << i)) {
3321 strList *new = g_new0(strList, 1);
3322 new->value = g_strdup(x86_cpu_feature_name(w, i));
3323 *next = new;
3324 next = &new->next;
3325 }
3326 }
3327 }
3328
3329 object_unref(OBJECT(xc));
3330 }
3331
3332 /* Print all cpuid feature names in featureset
3333 */
3334 static void listflags(FILE *f, fprintf_function print, GList *features)
3335 {
3336 size_t len = 0;
3337 GList *tmp;
3338
3339 for (tmp = features; tmp; tmp = tmp->next) {
3340 const char *name = tmp->data;
3341 if ((len + strlen(name) + 1) >= 75) {
3342 print(f, "\n");
3343 len = 0;
3344 }
3345 print(f, "%s%s", len == 0 ? " " : " ", name);
3346 len += strlen(name) + 1;
3347 }
3348 print(f, "\n");
3349 }
3350
3351 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3352 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3353 {
3354 ObjectClass *class_a = (ObjectClass *)a;
3355 ObjectClass *class_b = (ObjectClass *)b;
3356 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3357 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3358 char *name_a, *name_b;
3359 int ret;
3360
3361 if (cc_a->ordering != cc_b->ordering) {
3362 ret = cc_a->ordering - cc_b->ordering;
3363 } else {
3364 name_a = x86_cpu_class_get_model_name(cc_a);
3365 name_b = x86_cpu_class_get_model_name(cc_b);
3366 ret = strcmp(name_a, name_b);
3367 g_free(name_a);
3368 g_free(name_b);
3369 }
3370 return ret;
3371 }
3372
3373 static GSList *get_sorted_cpu_model_list(void)
3374 {
3375 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3376 list = g_slist_sort(list, x86_cpu_list_compare);
3377 return list;
3378 }
3379
3380 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3381 {
3382 ObjectClass *oc = data;
3383 X86CPUClass *cc = X86_CPU_CLASS(oc);
3384 CPUListState *s = user_data;
3385 char *name = x86_cpu_class_get_model_name(cc);
3386 const char *desc = cc->model_description;
3387 if (!desc && cc->cpu_def) {
3388 desc = cc->cpu_def->model_id;
3389 }
3390
3391 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3392 name, desc);
3393 g_free(name);
3394 }
3395
3396 /* list available CPU models and flags */
3397 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3398 {
3399 int i, j;
3400 CPUListState s = {
3401 .file = f,
3402 .cpu_fprintf = cpu_fprintf,
3403 };
3404 GSList *list;
3405 GList *names = NULL;
3406
3407 (*cpu_fprintf)(f, "Available CPUs:\n");
3408 list = get_sorted_cpu_model_list();
3409 g_slist_foreach(list, x86_cpu_list_entry, &s);
3410 g_slist_free(list);
3411
3412 names = NULL;
3413 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3414 FeatureWordInfo *fw = &feature_word_info[i];
3415 for (j = 0; j < 32; j++) {
3416 if (fw->feat_names[j]) {
3417 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3418 }
3419 }
3420 }
3421
3422 names = g_list_sort(names, (GCompareFunc)strcmp);
3423
3424 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3425 listflags(f, cpu_fprintf, names);
3426 (*cpu_fprintf)(f, "\n");
3427 g_list_free(names);
3428 }
3429
3430 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3431 {
3432 ObjectClass *oc = data;
3433 X86CPUClass *cc = X86_CPU_CLASS(oc);
3434 CpuDefinitionInfoList **cpu_list = user_data;
3435 CpuDefinitionInfoList *entry;
3436 CpuDefinitionInfo *info;
3437
3438 info = g_malloc0(sizeof(*info));
3439 info->name = x86_cpu_class_get_model_name(cc);
3440 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3441 info->has_unavailable_features = true;
3442 info->q_typename = g_strdup(object_class_get_name(oc));
3443 info->migration_safe = cc->migration_safe;
3444 info->has_migration_safe = true;
3445 info->q_static = cc->static_model;
3446
3447 entry = g_malloc0(sizeof(*entry));
3448 entry->value = info;
3449 entry->next = *cpu_list;
3450 *cpu_list = entry;
3451 }
3452
3453 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3454 {
3455 CpuDefinitionInfoList *cpu_list = NULL;
3456 GSList *list = get_sorted_cpu_model_list();
3457 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3458 g_slist_free(list);
3459 return cpu_list;
3460 }
3461
3462 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3463 bool migratable_only)
3464 {
3465 FeatureWordInfo *wi = &feature_word_info[w];
3466 uint32_t r;
3467
3468 if (kvm_enabled()) {
3469 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3470 wi->cpuid_ecx,
3471 wi->cpuid_reg);
3472 } else if (hvf_enabled()) {
3473 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3474 wi->cpuid_ecx,
3475 wi->cpuid_reg);
3476 } else if (tcg_enabled()) {
3477 r = wi->tcg_features;
3478 } else {
3479 return ~0;
3480 }
3481 if (migratable_only) {
3482 r &= x86_cpu_get_migratable_flags(w);
3483 }
3484 return r;
3485 }
3486
3487 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3488 {
3489 FeatureWord w;
3490
3491 for (w = 0; w < FEATURE_WORDS; w++) {
3492 report_unavailable_features(w, cpu->filtered_features[w]);
3493 }
3494 }
3495
3496 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3497 {
3498 PropValue *pv;
3499 for (pv = props; pv->prop; pv++) {
3500 if (!pv->value) {
3501 continue;
3502 }
3503 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3504 &error_abort);
3505 }
3506 }
3507
3508 /* Load data from X86CPUDefinition into a X86CPU object
3509 */
3510 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3511 {
3512 CPUX86State *env = &cpu->env;
3513 const char *vendor;
3514 char host_vendor[CPUID_VENDOR_SZ + 1];
3515 FeatureWord w;
3516
3517 /*NOTE: any property set by this function should be returned by
3518 * x86_cpu_static_props(), so static expansion of
3519 * query-cpu-model-expansion is always complete.
3520 */
3521
3522 /* CPU models only set _minimum_ values for level/xlevel: */
3523 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3524 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3525
3526 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3527 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3528 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3529 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3530 for (w = 0; w < FEATURE_WORDS; w++) {
3531 env->features[w] = def->features[w];
3532 }
3533
3534 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3535 cpu->legacy_cache = !def->cache_info;
3536
3537 /* Special cases not set in the X86CPUDefinition structs: */
3538 /* TODO: in-kernel irqchip for hvf */
3539 if (kvm_enabled()) {
3540 if (!kvm_irqchip_in_kernel()) {
3541 x86_cpu_change_kvm_default("x2apic", "off");
3542 }
3543
3544 x86_cpu_apply_props(cpu, kvm_default_props);
3545 } else if (tcg_enabled()) {
3546 x86_cpu_apply_props(cpu, tcg_default_props);
3547 }
3548
3549 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3550
3551 /* sysenter isn't supported in compatibility mode on AMD,
3552 * syscall isn't supported in compatibility mode on Intel.
3553 * Normally we advertise the actual CPU vendor, but you can
3554 * override this using the 'vendor' property if you want to use
3555 * KVM's sysenter/syscall emulation in compatibility mode and
3556 * when doing cross vendor migration
3557 */
3558 vendor = def->vendor;
3559 if (accel_uses_host_cpuid()) {
3560 uint32_t ebx = 0, ecx = 0, edx = 0;
3561 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3562 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3563 vendor = host_vendor;
3564 }
3565
3566 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3567
3568 }
3569
3570 /* Return a QDict containing keys for all properties that can be included
3571 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3572 * must be included in the dictionary.
3573 */
3574 static QDict *x86_cpu_static_props(void)
3575 {
3576 FeatureWord w;
3577 int i;
3578 static const char *props[] = {
3579 "min-level",
3580 "min-xlevel",
3581 "family",
3582 "model",
3583 "stepping",
3584 "model-id",
3585 "vendor",
3586 "lmce",
3587 NULL,
3588 };
3589 static QDict *d;
3590
3591 if (d) {
3592 return d;
3593 }
3594
3595 d = qdict_new();
3596 for (i = 0; props[i]; i++) {
3597 qdict_put_null(d, props[i]);
3598 }
3599
3600 for (w = 0; w < FEATURE_WORDS; w++) {
3601 FeatureWordInfo *fi = &feature_word_info[w];
3602 int bit;
3603 for (bit = 0; bit < 32; bit++) {
3604 if (!fi->feat_names[bit]) {
3605 continue;
3606 }
3607 qdict_put_null(d, fi->feat_names[bit]);
3608 }
3609 }
3610
3611 return d;
3612 }
3613
3614 /* Add an entry to @props dict, with the value for property. */
3615 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3616 {
3617 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3618 &error_abort);
3619
3620 qdict_put_obj(props, prop, value);
3621 }
3622
3623 /* Convert CPU model data from X86CPU object to a property dictionary
3624 * that can recreate exactly the same CPU model.
3625 */
3626 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3627 {
3628 QDict *sprops = x86_cpu_static_props();
3629 const QDictEntry *e;
3630
3631 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3632 const char *prop = qdict_entry_key(e);
3633 x86_cpu_expand_prop(cpu, props, prop);
3634 }
3635 }
3636
3637 /* Convert CPU model data from X86CPU object to a property dictionary
3638 * that can recreate exactly the same CPU model, including every
3639 * writeable QOM property.
3640 */
3641 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3642 {
3643 ObjectPropertyIterator iter;
3644 ObjectProperty *prop;
3645
3646 object_property_iter_init(&iter, OBJECT(cpu));
3647 while ((prop = object_property_iter_next(&iter))) {
3648 /* skip read-only or write-only properties */
3649 if (!prop->get || !prop->set) {
3650 continue;
3651 }
3652
3653 /* "hotplugged" is the only property that is configurable
3654 * on the command-line but will be set differently on CPUs
3655 * created using "-cpu ... -smp ..." and by CPUs created
3656 * on the fly by x86_cpu_from_model() for querying. Skip it.
3657 */
3658 if (!strcmp(prop->name, "hotplugged")) {
3659 continue;
3660 }
3661 x86_cpu_expand_prop(cpu, props, prop->name);
3662 }
3663 }
3664
3665 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3666 {
3667 const QDictEntry *prop;
3668 Error *err = NULL;
3669
3670 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3671 object_property_set_qobject(obj, qdict_entry_value(prop),
3672 qdict_entry_key(prop), &err);
3673 if (err) {
3674 break;
3675 }
3676 }
3677
3678 error_propagate(errp, err);
3679 }
3680
3681 /* Create X86CPU object according to model+props specification */
3682 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3683 {
3684 X86CPU *xc = NULL;
3685 X86CPUClass *xcc;
3686 Error *err = NULL;
3687
3688 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3689 if (xcc == NULL) {
3690 error_setg(&err, "CPU model '%s' not found", model);
3691 goto out;
3692 }
3693
3694 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3695 if (props) {
3696 object_apply_props(OBJECT(xc), props, &err);
3697 if (err) {
3698 goto out;
3699 }
3700 }
3701
3702 x86_cpu_expand_features(xc, &err);
3703 if (err) {
3704 goto out;
3705 }
3706
3707 out:
3708 if (err) {
3709 error_propagate(errp, err);
3710 object_unref(OBJECT(xc));
3711 xc = NULL;
3712 }
3713 return xc;
3714 }
3715
3716 CpuModelExpansionInfo *
3717 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3718 CpuModelInfo *model,
3719 Error **errp)
3720 {
3721 X86CPU *xc = NULL;
3722 Error *err = NULL;
3723 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3724 QDict *props = NULL;
3725 const char *base_name;
3726
3727 xc = x86_cpu_from_model(model->name,
3728 model->has_props ?
3729 qobject_to(QDict, model->props) :
3730 NULL, &err);
3731 if (err) {
3732 goto out;
3733 }
3734
3735 props = qdict_new();
3736
3737 switch (type) {
3738 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3739 /* Static expansion will be based on "base" only */
3740 base_name = "base";
3741 x86_cpu_to_dict(xc, props);
3742 break;
3743 case CPU_MODEL_EXPANSION_TYPE_FULL:
3744 /* As we don't return every single property, full expansion needs
3745 * to keep the original model name+props, and add extra
3746 * properties on top of that.
3747 */
3748 base_name = model->name;
3749 x86_cpu_to_dict_full(xc, props);
3750 break;
3751 default:
3752 error_setg(&err, "Unsupportted expansion type");
3753 goto out;
3754 }
3755
3756 if (!props) {
3757 props = qdict_new();
3758 }
3759 x86_cpu_to_dict(xc, props);
3760
3761 ret->model = g_new0(CpuModelInfo, 1);
3762 ret->model->name = g_strdup(base_name);
3763 ret->model->props = QOBJECT(props);
3764 ret->model->has_props = true;
3765
3766 out:
3767 object_unref(OBJECT(xc));
3768 if (err) {
3769 error_propagate(errp, err);
3770 qapi_free_CpuModelExpansionInfo(ret);
3771 ret = NULL;
3772 }
3773 return ret;
3774 }
3775
3776 static gchar *x86_gdb_arch_name(CPUState *cs)
3777 {
3778 #ifdef TARGET_X86_64
3779 return g_strdup("i386:x86-64");
3780 #else
3781 return g_strdup("i386");
3782 #endif
3783 }
3784
3785 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3786 {
3787 X86CPUDefinition *cpudef = data;
3788 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3789
3790 xcc->cpu_def = cpudef;
3791 xcc->migration_safe = true;
3792 }
3793
3794 static void x86_register_cpudef_type(X86CPUDefinition *def)
3795 {
3796 char *typename = x86_cpu_type_name(def->name);
3797 TypeInfo ti = {
3798 .name = typename,
3799 .parent = TYPE_X86_CPU,
3800 .class_init = x86_cpu_cpudef_class_init,
3801 .class_data = def,
3802 };
3803
3804 /* AMD aliases are handled at runtime based on CPUID vendor, so
3805 * they shouldn't be set on the CPU model table.
3806 */
3807 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3808 /* catch mistakes instead of silently truncating model_id when too long */
3809 assert(def->model_id && strlen(def->model_id) <= 48);
3810
3811
3812 type_register(&ti);
3813 g_free(typename);
3814 }
3815
3816 #if !defined(CONFIG_USER_ONLY)
3817
3818 void cpu_clear_apic_feature(CPUX86State *env)
3819 {
3820 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3821 }
3822
3823 #endif /* !CONFIG_USER_ONLY */
3824
3825 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3826 uint32_t *eax, uint32_t *ebx,
3827 uint32_t *ecx, uint32_t *edx)
3828 {
3829 X86CPU *cpu = x86_env_get_cpu(env);
3830 CPUState *cs = CPU(cpu);
3831 uint32_t pkg_offset;
3832 uint32_t limit;
3833 uint32_t signature[3];
3834
3835 /* Calculate & apply limits for different index ranges */
3836 if (index >= 0xC0000000) {
3837 limit = env->cpuid_xlevel2;
3838 } else if (index >= 0x80000000) {
3839 limit = env->cpuid_xlevel;
3840 } else if (index >= 0x40000000) {
3841 limit = 0x40000001;
3842 } else {
3843 limit = env->cpuid_level;
3844 }
3845
3846 if (index > limit) {
3847 /* Intel documentation states that invalid EAX input will
3848 * return the same information as EAX=cpuid_level
3849 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3850 */
3851 index = env->cpuid_level;
3852 }
3853
3854 switch(index) {
3855 case 0:
3856 *eax = env->cpuid_level;
3857 *ebx = env->cpuid_vendor1;
3858 *edx = env->cpuid_vendor2;
3859 *ecx = env->cpuid_vendor3;
3860 break;
3861 case 1:
3862 *eax = env->cpuid_version;
3863 *ebx = (cpu->apic_id << 24) |
3864 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3865 *ecx = env->features[FEAT_1_ECX];
3866 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3867 *ecx |= CPUID_EXT_OSXSAVE;
3868 }
3869 *edx = env->features[FEAT_1_EDX];
3870 if (cs->nr_cores * cs->nr_threads > 1) {
3871 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3872 *edx |= CPUID_HT;
3873 }
3874 break;
3875 case 2:
3876 /* cache info: needed for Pentium Pro compatibility */
3877 if (cpu->cache_info_passthrough) {
3878 host_cpuid(index, 0, eax, ebx, ecx, edx);
3879 break;
3880 }
3881 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3882 *ebx = 0;
3883 if (!cpu->enable_l3_cache) {
3884 *ecx = 0;
3885 } else {
3886 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
3887 }
3888 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
3889 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
3890 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
3891 break;
3892 case 4:
3893 /* cache info: needed for Core compatibility */
3894 if (cpu->cache_info_passthrough) {
3895 host_cpuid(index, count, eax, ebx, ecx, edx);
3896 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3897 *eax &= ~0xFC000000;
3898 if ((*eax & 31) && cs->nr_cores > 1) {
3899 *eax |= (cs->nr_cores - 1) << 26;
3900 }
3901 } else {
3902 *eax = 0;
3903 switch (count) {
3904 case 0: /* L1 dcache info */
3905 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
3906 1, cs->nr_cores,
3907 eax, ebx, ecx, edx);
3908 break;
3909 case 1: /* L1 icache info */
3910 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
3911 1, cs->nr_cores,
3912 eax, ebx, ecx, edx);
3913 break;
3914 case 2: /* L2 cache info */
3915 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
3916 cs->nr_threads, cs->nr_cores,
3917 eax, ebx, ecx, edx);
3918 break;
3919 case 3: /* L3 cache info */
3920 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3921 if (cpu->enable_l3_cache) {
3922 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
3923 (1 << pkg_offset), cs->nr_cores,
3924 eax, ebx, ecx, edx);
3925 break;
3926 }
3927 /* fall through */
3928 default: /* end of info */
3929 *eax = *ebx = *ecx = *edx = 0;
3930 break;
3931 }
3932 }
3933 break;
3934 case 5:
3935 /* mwait info: needed for Core compatibility */
3936 *eax = 0; /* Smallest monitor-line size in bytes */
3937 *ebx = 0; /* Largest monitor-line size in bytes */
3938 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3939 *edx = 0;
3940 break;
3941 case 6:
3942 /* Thermal and Power Leaf */
3943 *eax = env->features[FEAT_6_EAX];
3944 *ebx = 0;
3945 *ecx = 0;
3946 *edx = 0;
3947 break;
3948 case 7:
3949 /* Structured Extended Feature Flags Enumeration Leaf */
3950 if (count == 0) {
3951 *eax = 0; /* Maximum ECX value for sub-leaves */
3952 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3953 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3954 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3955 *ecx |= CPUID_7_0_ECX_OSPKE;
3956 }
3957 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3958 } else {
3959 *eax = 0;
3960 *ebx = 0;
3961 *ecx = 0;
3962 *edx = 0;
3963 }
3964 break;
3965 case 9:
3966 /* Direct Cache Access Information Leaf */
3967 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3968 *ebx = 0;
3969 *ecx = 0;
3970 *edx = 0;
3971 break;
3972 case 0xA:
3973 /* Architectural Performance Monitoring Leaf */
3974 if (kvm_enabled() && cpu->enable_pmu) {
3975 KVMState *s = cs->kvm_state;
3976
3977 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3978 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3979 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3980 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3981 } else if (hvf_enabled() && cpu->enable_pmu) {
3982 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3983 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3984 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3985 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3986 } else {
3987 *eax = 0;
3988 *ebx = 0;
3989 *ecx = 0;
3990 *edx = 0;
3991 }
3992 break;
3993 case 0xB:
3994 /* Extended Topology Enumeration Leaf */
3995 if (!cpu->enable_cpuid_0xb) {
3996 *eax = *ebx = *ecx = *edx = 0;
3997 break;
3998 }
3999
4000 *ecx = count & 0xff;
4001 *edx = cpu->apic_id;
4002
4003 switch (count) {
4004 case 0:
4005 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4006 *ebx = cs->nr_threads;
4007 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4008 break;
4009 case 1:
4010 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4011 *ebx = cs->nr_cores * cs->nr_threads;
4012 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4013 break;
4014 default:
4015 *eax = 0;
4016 *ebx = 0;
4017 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4018 }
4019
4020 assert(!(*eax & ~0x1f));
4021 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4022 break;
4023 case 0xD: {
4024 /* Processor Extended State */
4025 *eax = 0;
4026 *ebx = 0;
4027 *ecx = 0;
4028 *edx = 0;
4029 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4030 break;
4031 }
4032
4033 if (count == 0) {
4034 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4035 *eax = env->features[FEAT_XSAVE_COMP_LO];
4036 *edx = env->features[FEAT_XSAVE_COMP_HI];
4037 *ebx = *ecx;
4038 } else if (count == 1) {
4039 *eax = env->features[FEAT_XSAVE];
4040 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4041 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4042 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4043 *eax = esa->size;
4044 *ebx = esa->offset;
4045 }
4046 }
4047 break;
4048 }
4049 case 0x14: {
4050 /* Intel Processor Trace Enumeration */
4051 *eax = 0;
4052 *ebx = 0;
4053 *ecx = 0;
4054 *edx = 0;
4055 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4056 !kvm_enabled()) {
4057 break;
4058 }
4059
4060 if (count == 0) {
4061 *eax = INTEL_PT_MAX_SUBLEAF;
4062 *ebx = INTEL_PT_MINIMAL_EBX;
4063 *ecx = INTEL_PT_MINIMAL_ECX;
4064 } else if (count == 1) {
4065 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4066 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4067 }
4068 break;
4069 }
4070 case 0x40000000:
4071 /*
4072 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4073 * set here, but we restrict to TCG none the less.
4074 */
4075 if (tcg_enabled() && cpu->expose_tcg) {
4076 memcpy(signature, "TCGTCGTCGTCG", 12);
4077 *eax = 0x40000001;
4078 *ebx = signature[0];
4079 *ecx = signature[1];
4080 *edx = signature[2];
4081 } else {
4082 *eax = 0;
4083 *ebx = 0;
4084 *ecx = 0;
4085 *edx = 0;
4086 }
4087 break;
4088 case 0x40000001:
4089 *eax = 0;
4090 *ebx = 0;
4091 *ecx = 0;
4092 *edx = 0;
4093 break;
4094 case 0x80000000:
4095 *eax = env->cpuid_xlevel;
4096 *ebx = env->cpuid_vendor1;
4097 *edx = env->cpuid_vendor2;
4098 *ecx = env->cpuid_vendor3;
4099 break;
4100 case 0x80000001:
4101 *eax = env->cpuid_version;
4102 *ebx = 0;
4103 *ecx = env->features[FEAT_8000_0001_ECX];
4104 *edx = env->features[FEAT_8000_0001_EDX];
4105
4106 /* The Linux kernel checks for the CMPLegacy bit and
4107 * discards multiple thread information if it is set.
4108 * So don't set it here for Intel to make Linux guests happy.
4109 */
4110 if (cs->nr_cores * cs->nr_threads > 1) {
4111 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4112 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4113 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4114 *ecx |= 1 << 1; /* CmpLegacy bit */
4115 }
4116 }
4117 break;
4118 case 0x80000002:
4119 case 0x80000003:
4120 case 0x80000004:
4121 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4122 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4123 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4124 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4125 break;
4126 case 0x80000005:
4127 /* cache info (L1 cache) */
4128 if (cpu->cache_info_passthrough) {
4129 host_cpuid(index, 0, eax, ebx, ecx, edx);
4130 break;
4131 }
4132 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4133 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4134 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4135 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4136 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4137 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4138 break;
4139 case 0x80000006:
4140 /* cache info (L2 cache) */
4141 if (cpu->cache_info_passthrough) {
4142 host_cpuid(index, 0, eax, ebx, ecx, edx);
4143 break;
4144 }
4145 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4146 (L2_DTLB_2M_ENTRIES << 16) | \
4147 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4148 (L2_ITLB_2M_ENTRIES);
4149 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4150 (L2_DTLB_4K_ENTRIES << 16) | \
4151 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4152 (L2_ITLB_4K_ENTRIES);
4153 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4154 cpu->enable_l3_cache ?
4155 env->cache_info_amd.l3_cache : NULL,
4156 ecx, edx);
4157 break;
4158 case 0x80000007:
4159 *eax = 0;
4160 *ebx = 0;
4161 *ecx = 0;
4162 *edx = env->features[FEAT_8000_0007_EDX];
4163 break;
4164 case 0x80000008:
4165 /* virtual & phys address size in low 2 bytes. */
4166 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4167 /* 64 bit processor */
4168 *eax = cpu->phys_bits; /* configurable physical bits */
4169 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4170 *eax |= 0x00003900; /* 57 bits virtual */
4171 } else {
4172 *eax |= 0x00003000; /* 48 bits virtual */
4173 }
4174 } else {
4175 *eax = cpu->phys_bits;
4176 }
4177 *ebx = env->features[FEAT_8000_0008_EBX];
4178 *ecx = 0;
4179 *edx = 0;
4180 if (cs->nr_cores * cs->nr_threads > 1) {
4181 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4182 }
4183 break;
4184 case 0x8000000A:
4185 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4186 *eax = 0x00000001; /* SVM Revision */
4187 *ebx = 0x00000010; /* nr of ASIDs */
4188 *ecx = 0;
4189 *edx = env->features[FEAT_SVM]; /* optional features */
4190 } else {
4191 *eax = 0;
4192 *ebx = 0;
4193 *ecx = 0;
4194 *edx = 0;
4195 }
4196 break;
4197 case 0x8000001D:
4198 *eax = 0;
4199 switch (count) {
4200 case 0: /* L1 dcache info */
4201 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4202 eax, ebx, ecx, edx);
4203 break;
4204 case 1: /* L1 icache info */
4205 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4206 eax, ebx, ecx, edx);
4207 break;
4208 case 2: /* L2 cache info */
4209 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4210 eax, ebx, ecx, edx);
4211 break;
4212 case 3: /* L3 cache info */
4213 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4214 eax, ebx, ecx, edx);
4215 break;
4216 default: /* end of info */
4217 *eax = *ebx = *ecx = *edx = 0;
4218 break;
4219 }
4220 break;
4221 case 0x8000001E:
4222 assert(cpu->core_id <= 255);
4223 encode_topo_cpuid8000001e(cs, cpu,
4224 eax, ebx, ecx, edx);
4225 break;
4226 case 0xC0000000:
4227 *eax = env->cpuid_xlevel2;
4228 *ebx = 0;
4229 *ecx = 0;
4230 *edx = 0;
4231 break;
4232 case 0xC0000001:
4233 /* Support for VIA CPU's CPUID instruction */
4234 *eax = env->cpuid_version;
4235 *ebx = 0;
4236 *ecx = 0;
4237 *edx = env->features[FEAT_C000_0001_EDX];
4238 break;
4239 case 0xC0000002:
4240 case 0xC0000003:
4241 case 0xC0000004:
4242 /* Reserved for the future, and now filled with zero */
4243 *eax = 0;
4244 *ebx = 0;
4245 *ecx = 0;
4246 *edx = 0;
4247 break;
4248 case 0x8000001F:
4249 *eax = sev_enabled() ? 0x2 : 0;
4250 *ebx = sev_get_cbit_position();
4251 *ebx |= sev_get_reduced_phys_bits() << 6;
4252 *ecx = 0;
4253 *edx = 0;
4254 break;
4255 default:
4256 /* reserved values: zero */
4257 *eax = 0;
4258 *ebx = 0;
4259 *ecx = 0;
4260 *edx = 0;
4261 break;
4262 }
4263 }
4264
4265 /* CPUClass::reset() */
4266 static void x86_cpu_reset(CPUState *s)
4267 {
4268 X86CPU *cpu = X86_CPU(s);
4269 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4270 CPUX86State *env = &cpu->env;
4271 target_ulong cr4;
4272 uint64_t xcr0;
4273 int i;
4274
4275 xcc->parent_reset(s);
4276
4277 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4278
4279 env->old_exception = -1;
4280
4281 /* init to reset state */
4282
4283 env->hflags2 |= HF2_GIF_MASK;
4284
4285 cpu_x86_update_cr0(env, 0x60000010);
4286 env->a20_mask = ~0x0;
4287 env->smbase = 0x30000;
4288 env->msr_smi_count = 0;
4289
4290 env->idt.limit = 0xffff;
4291 env->gdt.limit = 0xffff;
4292 env->ldt.limit = 0xffff;
4293 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4294 env->tr.limit = 0xffff;
4295 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4296
4297 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4298 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4299 DESC_R_MASK | DESC_A_MASK);
4300 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4301 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4302 DESC_A_MASK);
4303 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4304 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4305 DESC_A_MASK);
4306 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4307 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4308 DESC_A_MASK);
4309 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4310 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4311 DESC_A_MASK);
4312 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4313 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4314 DESC_A_MASK);
4315
4316 env->eip = 0xfff0;
4317 env->regs[R_EDX] = env->cpuid_version;
4318
4319 env->eflags = 0x2;
4320
4321 /* FPU init */
4322 for (i = 0; i < 8; i++) {
4323 env->fptags[i] = 1;
4324 }
4325 cpu_set_fpuc(env, 0x37f);
4326
4327 env->mxcsr = 0x1f80;
4328 /* All units are in INIT state. */
4329 env->xstate_bv = 0;
4330
4331 env->pat = 0x0007040600070406ULL;
4332 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4333
4334 memset(env->dr, 0, sizeof(env->dr));
4335 env->dr[6] = DR6_FIXED_1;
4336 env->dr[7] = DR7_FIXED_1;
4337 cpu_breakpoint_remove_all(s, BP_CPU);
4338 cpu_watchpoint_remove_all(s, BP_CPU);
4339
4340 cr4 = 0;
4341 xcr0 = XSTATE_FP_MASK;
4342
4343 #ifdef CONFIG_USER_ONLY
4344 /* Enable all the features for user-mode. */
4345 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4346 xcr0 |= XSTATE_SSE_MASK;
4347 }
4348 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4349 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4350 if (env->features[esa->feature] & esa->bits) {
4351 xcr0 |= 1ull << i;
4352 }
4353 }
4354
4355 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4356 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4357 }
4358 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4359 cr4 |= CR4_FSGSBASE_MASK;
4360 }
4361 #endif
4362
4363 env->xcr0 = xcr0;
4364 cpu_x86_update_cr4(env, cr4);
4365
4366 /*
4367 * SDM 11.11.5 requires:
4368 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4369 * - IA32_MTRR_PHYSMASKn.V = 0
4370 * All other bits are undefined. For simplification, zero it all.
4371 */
4372 env->mtrr_deftype = 0;
4373 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4374 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4375
4376 env->interrupt_injected = -1;
4377 env->exception_injected = -1;
4378 env->nmi_injected = false;
4379 #if !defined(CONFIG_USER_ONLY)
4380 /* We hard-wire the BSP to the first CPU. */
4381 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4382
4383 s->halted = !cpu_is_bsp(cpu);
4384
4385 if (kvm_enabled()) {
4386 kvm_arch_reset_vcpu(cpu);
4387 }
4388 else if (hvf_enabled()) {
4389 hvf_reset_vcpu(s);
4390 }
4391 #endif
4392 }
4393
4394 #ifndef CONFIG_USER_ONLY
4395 bool cpu_is_bsp(X86CPU *cpu)
4396 {
4397 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4398 }
4399
4400 /* TODO: remove me, when reset over QOM tree is implemented */
4401 static void x86_cpu_machine_reset_cb(void *opaque)
4402 {
4403 X86CPU *cpu = opaque;
4404 cpu_reset(CPU(cpu));
4405 }
4406 #endif
4407
4408 static void mce_init(X86CPU *cpu)
4409 {
4410 CPUX86State *cenv = &cpu->env;
4411 unsigned int bank;
4412
4413 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4414 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4415 (CPUID_MCE | CPUID_MCA)) {
4416 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4417 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4418 cenv->mcg_ctl = ~(uint64_t)0;
4419 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4420 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4421 }
4422 }
4423 }
4424
4425 #ifndef CONFIG_USER_ONLY
4426 APICCommonClass *apic_get_class(void)
4427 {
4428 const char *apic_type = "apic";
4429
4430 /* TODO: in-kernel irqchip for hvf */
4431 if (kvm_apic_in_kernel()) {
4432 apic_type = "kvm-apic";
4433 } else if (xen_enabled()) {
4434 apic_type = "xen-apic";
4435 }
4436
4437 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4438 }
4439
4440 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4441 {
4442 APICCommonState *apic;
4443 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4444
4445 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4446
4447 object_property_add_child(OBJECT(cpu), "lapic",
4448 OBJECT(cpu->apic_state), &error_abort);
4449 object_unref(OBJECT(cpu->apic_state));
4450
4451 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4452 /* TODO: convert to link<> */
4453 apic = APIC_COMMON(cpu->apic_state);
4454 apic->cpu = cpu;
4455 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4456 }
4457
4458 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4459 {
4460 APICCommonState *apic;
4461 static bool apic_mmio_map_once;
4462
4463 if (cpu->apic_state == NULL) {
4464 return;
4465 }
4466 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4467 errp);
4468
4469 /* Map APIC MMIO area */
4470 apic = APIC_COMMON(cpu->apic_state);
4471 if (!apic_mmio_map_once) {
4472 memory_region_add_subregion_overlap(get_system_memory(),
4473 apic->apicbase &
4474 MSR_IA32_APICBASE_BASE,
4475 &apic->io_memory,
4476 0x1000);
4477 apic_mmio_map_once = true;
4478 }
4479 }
4480
4481 static void x86_cpu_machine_done(Notifier *n, void *unused)
4482 {
4483 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4484 MemoryRegion *smram =
4485 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4486
4487 if (smram) {
4488 cpu->smram = g_new(MemoryRegion, 1);
4489 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4490 smram, 0, 1ull << 32);
4491 memory_region_set_enabled(cpu->smram, true);
4492 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4493 }
4494 }
4495 #else
4496 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4497 {
4498 }
4499 #endif
4500
4501 /* Note: Only safe for use on x86(-64) hosts */
4502 static uint32_t x86_host_phys_bits(void)
4503 {
4504 uint32_t eax;
4505 uint32_t host_phys_bits;
4506
4507 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4508 if (eax >= 0x80000008) {
4509 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4510 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4511 * at 23:16 that can specify a maximum physical address bits for
4512 * the guest that can override this value; but I've not seen
4513 * anything with that set.
4514 */
4515 host_phys_bits = eax & 0xff;
4516 } else {
4517 /* It's an odd 64 bit machine that doesn't have the leaf for
4518 * physical address bits; fall back to 36 that's most older
4519 * Intel.
4520 */
4521 host_phys_bits = 36;
4522 }
4523
4524 return host_phys_bits;
4525 }
4526
4527 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4528 {
4529 if (*min < value) {
4530 *min = value;
4531 }
4532 }
4533
4534 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4535 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4536 {
4537 CPUX86State *env = &cpu->env;
4538 FeatureWordInfo *fi = &feature_word_info[w];
4539 uint32_t eax = fi->cpuid_eax;
4540 uint32_t region = eax & 0xF0000000;
4541
4542 if (!env->features[w]) {
4543 return;
4544 }
4545
4546 switch (region) {
4547 case 0x00000000:
4548 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4549 break;
4550 case 0x80000000:
4551 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4552 break;
4553 case 0xC0000000:
4554 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4555 break;
4556 }
4557 }
4558
4559 /* Calculate XSAVE components based on the configured CPU feature flags */
4560 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4561 {
4562 CPUX86State *env = &cpu->env;
4563 int i;
4564 uint64_t mask;
4565
4566 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4567 return;
4568 }
4569
4570 mask = 0;
4571 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4572 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4573 if (env->features[esa->feature] & esa->bits) {
4574 mask |= (1ULL << i);
4575 }
4576 }
4577
4578 env->features[FEAT_XSAVE_COMP_LO] = mask;
4579 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4580 }
4581
4582 /***** Steps involved on loading and filtering CPUID data
4583 *
4584 * When initializing and realizing a CPU object, the steps
4585 * involved in setting up CPUID data are:
4586 *
4587 * 1) Loading CPU model definition (X86CPUDefinition). This is
4588 * implemented by x86_cpu_load_def() and should be completely
4589 * transparent, as it is done automatically by instance_init.
4590 * No code should need to look at X86CPUDefinition structs
4591 * outside instance_init.
4592 *
4593 * 2) CPU expansion. This is done by realize before CPUID
4594 * filtering, and will make sure host/accelerator data is
4595 * loaded for CPU models that depend on host capabilities
4596 * (e.g. "host"). Done by x86_cpu_expand_features().
4597 *
4598 * 3) CPUID filtering. This initializes extra data related to
4599 * CPUID, and checks if the host supports all capabilities
4600 * required by the CPU. Runnability of a CPU model is
4601 * determined at this step. Done by x86_cpu_filter_features().
4602 *
4603 * Some operations don't require all steps to be performed.
4604 * More precisely:
4605 *
4606 * - CPU instance creation (instance_init) will run only CPU
4607 * model loading. CPU expansion can't run at instance_init-time
4608 * because host/accelerator data may be not available yet.
4609 * - CPU realization will perform both CPU model expansion and CPUID
4610 * filtering, and return an error in case one of them fails.
4611 * - query-cpu-definitions needs to run all 3 steps. It needs
4612 * to run CPUID filtering, as the 'unavailable-features'
4613 * field is set based on the filtering results.
4614 * - The query-cpu-model-expansion QMP command only needs to run
4615 * CPU model loading and CPU expansion. It should not filter
4616 * any CPUID data based on host capabilities.
4617 */
4618
4619 /* Expand CPU configuration data, based on configured features
4620 * and host/accelerator capabilities when appropriate.
4621 */
4622 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4623 {
4624 CPUX86State *env = &cpu->env;
4625 FeatureWord w;
4626 GList *l;
4627 Error *local_err = NULL;
4628
4629 /*TODO: Now cpu->max_features doesn't overwrite features
4630 * set using QOM properties, and we can convert
4631 * plus_features & minus_features to global properties
4632 * inside x86_cpu_parse_featurestr() too.
4633 */
4634 if (cpu->max_features) {
4635 for (w = 0; w < FEATURE_WORDS; w++) {
4636 /* Override only features that weren't set explicitly
4637 * by the user.
4638 */
4639 env->features[w] |=
4640 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4641 ~env->user_features[w] & \
4642 ~feature_word_info[w].no_autoenable_flags;
4643 }
4644 }
4645
4646 for (l = plus_features; l; l = l->next) {
4647 const char *prop = l->data;
4648 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4649 if (local_err) {
4650 goto out;
4651 }
4652 }
4653
4654 for (l = minus_features; l; l = l->next) {
4655 const char *prop = l->data;
4656 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4657 if (local_err) {
4658 goto out;
4659 }
4660 }
4661
4662 if (!kvm_enabled() || !cpu->expose_kvm) {
4663 env->features[FEAT_KVM] = 0;
4664 }
4665
4666 x86_cpu_enable_xsave_components(cpu);
4667
4668 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4669 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4670 if (cpu->full_cpuid_auto_level) {
4671 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4672 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4673 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4674 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4675 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4676 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4677 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4678 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4679 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4680 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4681 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4682 /* SVM requires CPUID[0x8000000A] */
4683 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4684 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4685 }
4686
4687 /* SEV requires CPUID[0x8000001F] */
4688 if (sev_enabled()) {
4689 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4690 }
4691 }
4692
4693 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4694 if (env->cpuid_level == UINT32_MAX) {
4695 env->cpuid_level = env->cpuid_min_level;
4696 }
4697 if (env->cpuid_xlevel == UINT32_MAX) {
4698 env->cpuid_xlevel = env->cpuid_min_xlevel;
4699 }
4700 if (env->cpuid_xlevel2 == UINT32_MAX) {
4701 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4702 }
4703
4704 out:
4705 if (local_err != NULL) {
4706 error_propagate(errp, local_err);
4707 }
4708 }
4709
4710 /*
4711 * Finishes initialization of CPUID data, filters CPU feature
4712 * words based on host availability of each feature.
4713 *
4714 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4715 */
4716 static int x86_cpu_filter_features(X86CPU *cpu)
4717 {
4718 CPUX86State *env = &cpu->env;
4719 FeatureWord w;
4720 int rv = 0;
4721
4722 for (w = 0; w < FEATURE_WORDS; w++) {
4723 uint32_t host_feat =
4724 x86_cpu_get_supported_feature_word(w, false);
4725 uint32_t requested_features = env->features[w];
4726 env->features[w] &= host_feat;
4727 cpu->filtered_features[w] = requested_features & ~env->features[w];
4728 if (cpu->filtered_features[w]) {
4729 rv = 1;
4730 }
4731 }
4732
4733 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4734 kvm_enabled()) {
4735 KVMState *s = CPU(cpu)->kvm_state;
4736 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4737 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4738 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4739 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4740 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4741
4742 if (!eax_0 ||
4743 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4744 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4745 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4746 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4747 INTEL_PT_ADDR_RANGES_NUM) ||
4748 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4749 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4750 (ecx_0 & INTEL_PT_IP_LIP)) {
4751 /*
4752 * Processor Trace capabilities aren't configurable, so if the
4753 * host can't emulate the capabilities we report on
4754 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4755 */
4756 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4757 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4758 rv = 1;
4759 }
4760 }
4761
4762 return rv;
4763 }
4764
4765 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4766 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4767 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4768 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4769 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4770 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4771 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4772 {
4773 CPUState *cs = CPU(dev);
4774 X86CPU *cpu = X86_CPU(dev);
4775 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4776 CPUX86State *env = &cpu->env;
4777 Error *local_err = NULL;
4778 static bool ht_warned;
4779
4780 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4781 char *name = x86_cpu_class_get_model_name(xcc);
4782 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4783 g_free(name);
4784 goto out;
4785 }
4786
4787 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4788 error_setg(errp, "apic-id property was not initialized properly");
4789 return;
4790 }
4791
4792 x86_cpu_expand_features(cpu, &local_err);
4793 if (local_err) {
4794 goto out;
4795 }
4796
4797 if (x86_cpu_filter_features(cpu) &&
4798 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4799 x86_cpu_report_filtered_features(cpu);
4800 if (cpu->enforce_cpuid) {
4801 error_setg(&local_err,
4802 accel_uses_host_cpuid() ?
4803 "Host doesn't support requested features" :
4804 "TCG doesn't support requested features");
4805 goto out;
4806 }
4807 }
4808
4809 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4810 * CPUID[1].EDX.
4811 */
4812 if (IS_AMD_CPU(env)) {
4813 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4814 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4815 & CPUID_EXT2_AMD_ALIASES);
4816 }
4817
4818 /* For 64bit systems think about the number of physical bits to present.
4819 * ideally this should be the same as the host; anything other than matching
4820 * the host can cause incorrect guest behaviour.
4821 * QEMU used to pick the magic value of 40 bits that corresponds to
4822 * consumer AMD devices but nothing else.
4823 */
4824 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4825 if (accel_uses_host_cpuid()) {
4826 uint32_t host_phys_bits = x86_host_phys_bits();
4827 static bool warned;
4828
4829 if (cpu->host_phys_bits) {
4830 /* The user asked for us to use the host physical bits */
4831 cpu->phys_bits = host_phys_bits;
4832 }
4833
4834 /* Print a warning if the user set it to a value that's not the
4835 * host value.
4836 */
4837 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4838 !warned) {
4839 warn_report("Host physical bits (%u)"
4840 " does not match phys-bits property (%u)",
4841 host_phys_bits, cpu->phys_bits);
4842 warned = true;
4843 }
4844
4845 if (cpu->phys_bits &&
4846 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4847 cpu->phys_bits < 32)) {
4848 error_setg(errp, "phys-bits should be between 32 and %u "
4849 " (but is %u)",
4850 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4851 return;
4852 }
4853 } else {
4854 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4855 error_setg(errp, "TCG only supports phys-bits=%u",
4856 TCG_PHYS_ADDR_BITS);
4857 return;
4858 }
4859 }
4860 /* 0 means it was not explicitly set by the user (or by machine
4861 * compat_props or by the host code above). In this case, the default
4862 * is the value used by TCG (40).
4863 */
4864 if (cpu->phys_bits == 0) {
4865 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4866 }
4867 } else {
4868 /* For 32 bit systems don't use the user set value, but keep
4869 * phys_bits consistent with what we tell the guest.
4870 */
4871 if (cpu->phys_bits != 0) {
4872 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4873 return;
4874 }
4875
4876 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4877 cpu->phys_bits = 36;
4878 } else {
4879 cpu->phys_bits = 32;
4880 }
4881 }
4882
4883 /* Cache information initialization */
4884 if (!cpu->legacy_cache) {
4885 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
4886 char *name = x86_cpu_class_get_model_name(xcc);
4887 error_setg(errp,
4888 "CPU model '%s' doesn't support legacy-cache=off", name);
4889 g_free(name);
4890 return;
4891 }
4892 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
4893 *xcc->cpu_def->cache_info;
4894 } else {
4895 /* Build legacy cache information */
4896 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
4897 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
4898 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
4899 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
4900
4901 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
4902 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
4903 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
4904 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
4905
4906 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
4907 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
4908 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
4909 env->cache_info_amd.l3_cache = &legacy_l3_cache;
4910 }
4911
4912
4913 cpu_exec_realizefn(cs, &local_err);
4914 if (local_err != NULL) {
4915 error_propagate(errp, local_err);
4916 return;
4917 }
4918
4919 #ifndef CONFIG_USER_ONLY
4920 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4921
4922 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4923 x86_cpu_apic_create(cpu, &local_err);
4924 if (local_err != NULL) {
4925 goto out;
4926 }
4927 }
4928 #endif
4929
4930 mce_init(cpu);
4931
4932 #ifndef CONFIG_USER_ONLY
4933 if (tcg_enabled()) {
4934 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4935 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4936
4937 /* Outer container... */
4938 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4939 memory_region_set_enabled(cpu->cpu_as_root, true);
4940
4941 /* ... with two regions inside: normal system memory with low
4942 * priority, and...
4943 */
4944 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4945 get_system_memory(), 0, ~0ull);
4946 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4947 memory_region_set_enabled(cpu->cpu_as_mem, true);
4948
4949 cs->num_ases = 2;
4950 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4951 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4952
4953 /* ... SMRAM with higher priority, linked from /machine/smram. */
4954 cpu->machine_done.notify = x86_cpu_machine_done;
4955 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4956 }
4957 #endif
4958
4959 qemu_init_vcpu(cs);
4960
4961 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4962 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4963 * based on inputs (sockets,cores,threads), it is still better to gives
4964 * users a warning.
4965 *
4966 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4967 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4968 */
4969 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4970 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4971 " -smp options properly.");
4972 ht_warned = true;
4973 }
4974
4975 x86_cpu_apic_realize(cpu, &local_err);
4976 if (local_err != NULL) {
4977 goto out;
4978 }
4979 cpu_reset(cs);
4980
4981 xcc->parent_realize(dev, &local_err);
4982
4983 out:
4984 if (local_err != NULL) {
4985 error_propagate(errp, local_err);
4986 return;
4987 }
4988 }
4989
4990 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4991 {
4992 X86CPU *cpu = X86_CPU(dev);
4993 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4994 Error *local_err = NULL;
4995
4996 #ifndef CONFIG_USER_ONLY
4997 cpu_remove_sync(CPU(dev));
4998 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4999 #endif
5000
5001 if (cpu->apic_state) {
5002 object_unparent(OBJECT(cpu->apic_state));
5003 cpu->apic_state = NULL;
5004 }
5005
5006 xcc->parent_unrealize(dev, &local_err);
5007 if (local_err != NULL) {
5008 error_propagate(errp, local_err);
5009 return;
5010 }
5011 }
5012
5013 typedef struct BitProperty {
5014 FeatureWord w;
5015 uint32_t mask;
5016 } BitProperty;
5017
5018 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5019 void *opaque, Error **errp)
5020 {
5021 X86CPU *cpu = X86_CPU(obj);
5022 BitProperty *fp = opaque;
5023 uint32_t f = cpu->env.features[fp->w];
5024 bool value = (f & fp->mask) == fp->mask;
5025 visit_type_bool(v, name, &value, errp);
5026 }
5027
5028 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5029 void *opaque, Error **errp)
5030 {
5031 DeviceState *dev = DEVICE(obj);
5032 X86CPU *cpu = X86_CPU(obj);
5033 BitProperty *fp = opaque;
5034 Error *local_err = NULL;
5035 bool value;
5036
5037 if (dev->realized) {
5038 qdev_prop_set_after_realize(dev, name, errp);
5039 return;
5040 }
5041
5042 visit_type_bool(v, name, &value, &local_err);
5043 if (local_err) {
5044 error_propagate(errp, local_err);
5045 return;
5046 }
5047
5048 if (value) {
5049 cpu->env.features[fp->w] |= fp->mask;
5050 } else {
5051 cpu->env.features[fp->w] &= ~fp->mask;
5052 }
5053 cpu->env.user_features[fp->w] |= fp->mask;
5054 }
5055
5056 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5057 void *opaque)
5058 {
5059 BitProperty *prop = opaque;
5060 g_free(prop);
5061 }
5062
5063 /* Register a boolean property to get/set a single bit in a uint32_t field.
5064 *
5065 * The same property name can be registered multiple times to make it affect
5066 * multiple bits in the same FeatureWord. In that case, the getter will return
5067 * true only if all bits are set.
5068 */
5069 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5070 const char *prop_name,
5071 FeatureWord w,
5072 int bitnr)
5073 {
5074 BitProperty *fp;
5075 ObjectProperty *op;
5076 uint32_t mask = (1UL << bitnr);
5077
5078 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5079 if (op) {
5080 fp = op->opaque;
5081 assert(fp->w == w);
5082 fp->mask |= mask;
5083 } else {
5084 fp = g_new0(BitProperty, 1);
5085 fp->w = w;
5086 fp->mask = mask;
5087 object_property_add(OBJECT(cpu), prop_name, "bool",
5088 x86_cpu_get_bit_prop,
5089 x86_cpu_set_bit_prop,
5090 x86_cpu_release_bit_prop, fp, &error_abort);
5091 }
5092 }
5093
5094 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5095 FeatureWord w,
5096 int bitnr)
5097 {
5098 FeatureWordInfo *fi = &feature_word_info[w];
5099 const char *name = fi->feat_names[bitnr];
5100
5101 if (!name) {
5102 return;
5103 }
5104
5105 /* Property names should use "-" instead of "_".
5106 * Old names containing underscores are registered as aliases
5107 * using object_property_add_alias()
5108 */
5109 assert(!strchr(name, '_'));
5110 /* aliases don't use "|" delimiters anymore, they are registered
5111 * manually using object_property_add_alias() */
5112 assert(!strchr(name, '|'));
5113 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5114 }
5115
5116 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5117 {
5118 X86CPU *cpu = X86_CPU(cs);
5119 CPUX86State *env = &cpu->env;
5120 GuestPanicInformation *panic_info = NULL;
5121
5122 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5123 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5124
5125 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5126
5127 assert(HV_CRASH_PARAMS >= 5);
5128 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5129 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5130 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5131 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5132 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5133 }
5134
5135 return panic_info;
5136 }
5137 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5138 const char *name, void *opaque,
5139 Error **errp)
5140 {
5141 CPUState *cs = CPU(obj);
5142 GuestPanicInformation *panic_info;
5143
5144 if (!cs->crash_occurred) {
5145 error_setg(errp, "No crash occured");
5146 return;
5147 }
5148
5149 panic_info = x86_cpu_get_crash_info(cs);
5150 if (panic_info == NULL) {
5151 error_setg(errp, "No crash information");
5152 return;
5153 }
5154
5155 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5156 errp);
5157 qapi_free_GuestPanicInformation(panic_info);
5158 }
5159
5160 static void x86_cpu_initfn(Object *obj)
5161 {
5162 CPUState *cs = CPU(obj);
5163 X86CPU *cpu = X86_CPU(obj);
5164 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5165 CPUX86State *env = &cpu->env;
5166 FeatureWord w;
5167
5168 cs->env_ptr = env;
5169
5170 object_property_add(obj, "family", "int",
5171 x86_cpuid_version_get_family,
5172 x86_cpuid_version_set_family, NULL, NULL, NULL);
5173 object_property_add(obj, "model", "int",
5174 x86_cpuid_version_get_model,
5175 x86_cpuid_version_set_model, NULL, NULL, NULL);
5176 object_property_add(obj, "stepping", "int",
5177 x86_cpuid_version_get_stepping,
5178 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5179 object_property_add_str(obj, "vendor",
5180 x86_cpuid_get_vendor,
5181 x86_cpuid_set_vendor, NULL);
5182 object_property_add_str(obj, "model-id",
5183 x86_cpuid_get_model_id,
5184 x86_cpuid_set_model_id, NULL);
5185 object_property_add(obj, "tsc-frequency", "int",
5186 x86_cpuid_get_tsc_freq,
5187 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5188 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5189 x86_cpu_get_feature_words,
5190 NULL, NULL, (void *)env->features, NULL);
5191 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5192 x86_cpu_get_feature_words,
5193 NULL, NULL, (void *)cpu->filtered_features, NULL);
5194
5195 object_property_add(obj, "crash-information", "GuestPanicInformation",
5196 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5197
5198 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5199
5200 for (w = 0; w < FEATURE_WORDS; w++) {
5201 int bitnr;
5202
5203 for (bitnr = 0; bitnr < 32; bitnr++) {
5204 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5205 }
5206 }
5207
5208 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5209 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5210 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5211 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5212 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5213 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5214 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5215
5216 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5217 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5218 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5219 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5220 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5221 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5222 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5223 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5224 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5225 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5226 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5227 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5228 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5229 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5230 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5231 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5232 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5233 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5234 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5235 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5236 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5237
5238 if (xcc->cpu_def) {
5239 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5240 }
5241 }
5242
5243 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5244 {
5245 X86CPU *cpu = X86_CPU(cs);
5246
5247 return cpu->apic_id;
5248 }
5249
5250 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5251 {
5252 X86CPU *cpu = X86_CPU(cs);
5253
5254 return cpu->env.cr[0] & CR0_PG_MASK;
5255 }
5256
5257 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5258 {
5259 X86CPU *cpu = X86_CPU(cs);
5260
5261 cpu->env.eip = value;
5262 }
5263
5264 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5265 {
5266 X86CPU *cpu = X86_CPU(cs);
5267
5268 cpu->env.eip = tb->pc - tb->cs_base;
5269 }
5270
5271 static bool x86_cpu_has_work(CPUState *cs)
5272 {
5273 X86CPU *cpu = X86_CPU(cs);
5274 CPUX86State *env = &cpu->env;
5275
5276 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5277 CPU_INTERRUPT_POLL)) &&
5278 (env->eflags & IF_MASK)) ||
5279 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5280 CPU_INTERRUPT_INIT |
5281 CPU_INTERRUPT_SIPI |
5282 CPU_INTERRUPT_MCE)) ||
5283 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5284 !(env->hflags & HF_SMM_MASK));
5285 }
5286
5287 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5288 {
5289 X86CPU *cpu = X86_CPU(cs);
5290 CPUX86State *env = &cpu->env;
5291
5292 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5293 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5294 : bfd_mach_i386_i8086);
5295 info->print_insn = print_insn_i386;
5296
5297 info->cap_arch = CS_ARCH_X86;
5298 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5299 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5300 : CS_MODE_16);
5301 info->cap_insn_unit = 1;
5302 info->cap_insn_split = 8;
5303 }
5304
5305 void x86_update_hflags(CPUX86State *env)
5306 {
5307 uint32_t hflags;
5308 #define HFLAG_COPY_MASK \
5309 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5310 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5311 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5312 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5313
5314 hflags = env->hflags & HFLAG_COPY_MASK;
5315 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5316 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5317 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5318 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5319 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5320
5321 if (env->cr[4] & CR4_OSFXSR_MASK) {
5322 hflags |= HF_OSFXSR_MASK;
5323 }
5324
5325 if (env->efer & MSR_EFER_LMA) {
5326 hflags |= HF_LMA_MASK;
5327 }
5328
5329 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5330 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5331 } else {
5332 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5333 (DESC_B_SHIFT - HF_CS32_SHIFT);
5334 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5335 (DESC_B_SHIFT - HF_SS32_SHIFT);
5336 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5337 !(hflags & HF_CS32_MASK)) {
5338 hflags |= HF_ADDSEG_MASK;
5339 } else {
5340 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5341 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5342 }
5343 }
5344 env->hflags = hflags;
5345 }
5346
5347 static Property x86_cpu_properties[] = {
5348 #ifdef CONFIG_USER_ONLY
5349 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5350 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5351 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5352 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5353 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5354 #else
5355 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5356 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5357 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5358 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5359 #endif
5360 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5361 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5362 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5363 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5364 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5365 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5366 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5367 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5368 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5369 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5370 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5371 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5372 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5373 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5374 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5375 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5376 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5377 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5378 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5379 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5380 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5381 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5382 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5383 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5384 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5385 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5386 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5387 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5388 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5389 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5390 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5391 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5392 false),
5393 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5394 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5395 /*
5396 * lecacy_cache defaults to true unless the CPU model provides its
5397 * own cache information (see x86_cpu_load_def()).
5398 */
5399 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5400
5401 /*
5402 * From "Requirements for Implementing the Microsoft
5403 * Hypervisor Interface":
5404 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5405 *
5406 * "Starting with Windows Server 2012 and Windows 8, if
5407 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5408 * the hypervisor imposes no specific limit to the number of VPs.
5409 * In this case, Windows Server 2012 guest VMs may use more than
5410 * 64 VPs, up to the maximum supported number of processors applicable
5411 * to the specific Windows version being used."
5412 */
5413 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5414 DEFINE_PROP_END_OF_LIST()
5415 };
5416
5417 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5418 {
5419 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5420 CPUClass *cc = CPU_CLASS(oc);
5421 DeviceClass *dc = DEVICE_CLASS(oc);
5422
5423 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5424 &xcc->parent_realize);
5425 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5426 &xcc->parent_unrealize);
5427 dc->props = x86_cpu_properties;
5428
5429 xcc->parent_reset = cc->reset;
5430 cc->reset = x86_cpu_reset;
5431 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5432
5433 cc->class_by_name = x86_cpu_class_by_name;
5434 cc->parse_features = x86_cpu_parse_featurestr;
5435 cc->has_work = x86_cpu_has_work;
5436 #ifdef CONFIG_TCG
5437 cc->do_interrupt = x86_cpu_do_interrupt;
5438 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5439 #endif
5440 cc->dump_state = x86_cpu_dump_state;
5441 cc->get_crash_info = x86_cpu_get_crash_info;
5442 cc->set_pc = x86_cpu_set_pc;
5443 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5444 cc->gdb_read_register = x86_cpu_gdb_read_register;
5445 cc->gdb_write_register = x86_cpu_gdb_write_register;
5446 cc->get_arch_id = x86_cpu_get_arch_id;
5447 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5448 #ifdef CONFIG_USER_ONLY
5449 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5450 #else
5451 cc->asidx_from_attrs = x86_asidx_from_attrs;
5452 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5453 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5454 cc->write_elf64_note = x86_cpu_write_elf64_note;
5455 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5456 cc->write_elf32_note = x86_cpu_write_elf32_note;
5457 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5458 cc->vmsd = &vmstate_x86_cpu;
5459 #endif
5460 cc->gdb_arch_name = x86_gdb_arch_name;
5461 #ifdef TARGET_X86_64
5462 cc->gdb_core_xml_file = "i386-64bit.xml";
5463 cc->gdb_num_core_regs = 57;
5464 #else
5465 cc->gdb_core_xml_file = "i386-32bit.xml";
5466 cc->gdb_num_core_regs = 41;
5467 #endif
5468 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5469 cc->debug_excp_handler = breakpoint_handler;
5470 #endif
5471 cc->cpu_exec_enter = x86_cpu_exec_enter;
5472 cc->cpu_exec_exit = x86_cpu_exec_exit;
5473 #ifdef CONFIG_TCG
5474 cc->tcg_initialize = tcg_x86_init;
5475 #endif
5476 cc->disas_set_info = x86_disas_set_info;
5477
5478 dc->user_creatable = true;
5479 }
5480
5481 static const TypeInfo x86_cpu_type_info = {
5482 .name = TYPE_X86_CPU,
5483 .parent = TYPE_CPU,
5484 .instance_size = sizeof(X86CPU),
5485 .instance_init = x86_cpu_initfn,
5486 .abstract = true,
5487 .class_size = sizeof(X86CPUClass),
5488 .class_init = x86_cpu_common_class_init,
5489 };
5490
5491
5492 /* "base" CPU model, used by query-cpu-model-expansion */
5493 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5494 {
5495 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5496
5497 xcc->static_model = true;
5498 xcc->migration_safe = true;
5499 xcc->model_description = "base CPU model type with no features enabled";
5500 xcc->ordering = 8;
5501 }
5502
5503 static const TypeInfo x86_base_cpu_type_info = {
5504 .name = X86_CPU_TYPE_NAME("base"),
5505 .parent = TYPE_X86_CPU,
5506 .class_init = x86_cpu_base_class_init,
5507 };
5508
5509 static void x86_cpu_register_types(void)
5510 {
5511 int i;
5512
5513 type_register_static(&x86_cpu_type_info);
5514 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5515 x86_register_cpudef_type(&builtin_x86_defs[i]);
5516 }
5517 type_register_static(&max_x86_cpu_type_info);
5518 type_register_static(&x86_base_cpu_type_info);
5519 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5520 type_register_static(&host_x86_cpu_type_info);
5521 #endif
5522 }
5523
5524 type_init(x86_cpu_register_types)