]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Define AMD's no SSB mitigation needed.
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
30
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
42
43 #include "standard-headers/asm-x86/kvm_para.h"
44
45 #include "sysemu/sysemu.h"
46 #include "hw/qdev-properties.h"
47 #include "hw/i386/topology.h"
48 #ifndef CONFIG_USER_ONLY
49 #include "exec/address-spaces.h"
50 #include "hw/hw.h"
51 #include "hw/xen/xen.h"
52 #include "hw/i386/apic_internal.h"
53 #endif
54
55 #include "disas/capstone.h"
56
57 /* Helpers for building CPUID[2] descriptors: */
58
59 struct CPUID2CacheDescriptorInfo {
60 enum CacheType type;
61 int level;
62 int size;
63 int line_size;
64 int associativity;
65 };
66
67 #define KiB 1024
68 #define MiB (1024 * 1024)
69
70 /*
71 * Known CPUID 2 cache descriptors.
72 * From Intel SDM Volume 2A, CPUID instruction
73 */
74 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
75 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
76 .associativity = 4, .line_size = 32, },
77 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
80 .associativity = 4, .line_size = 64, },
81 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
82 .associativity = 2, .line_size = 32, },
83 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
84 .associativity = 4, .line_size = 32, },
85 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 64, },
87 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
88 .associativity = 6, .line_size = 64, },
89 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
90 .associativity = 2, .line_size = 64, },
91 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
92 .associativity = 8, .line_size = 64, },
93 /* lines per sector is not supported cpuid2_cache_descriptor(),
94 * so descriptors 0x22, 0x23 are not included
95 */
96 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
97 .associativity = 16, .line_size = 64, },
98 /* lines per sector is not supported cpuid2_cache_descriptor(),
99 * so descriptors 0x25, 0x20 are not included
100 */
101 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
102 .associativity = 8, .line_size = 64, },
103 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
106 .associativity = 4, .line_size = 32, },
107 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
112 .associativity = 4, .line_size = 32, },
113 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
116 .associativity = 4, .line_size = 64, },
117 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
118 .associativity = 8, .line_size = 64, },
119 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
120 .associativity = 12, .line_size = 64, },
121 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
122 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
123 .associativity = 12, .line_size = 64, },
124 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
125 .associativity = 16, .line_size = 64, },
126 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
131 .associativity = 24, .line_size = 64, },
132 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
133 .associativity = 8, .line_size = 64, },
134 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
135 .associativity = 4, .line_size = 64, },
136 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
141 .associativity = 4, .line_size = 64, },
142 /* lines per sector is not supported cpuid2_cache_descriptor(),
143 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
144 */
145 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
146 .associativity = 8, .line_size = 64, },
147 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
148 .associativity = 2, .line_size = 64, },
149 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 8, .line_size = 64, },
151 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
152 .associativity = 8, .line_size = 32, },
153 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
156 .associativity = 8, .line_size = 32, },
157 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
160 .associativity = 4, .line_size = 64, },
161 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
162 .associativity = 8, .line_size = 64, },
163 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 8, .line_size = 64, },
171 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
176 .associativity = 12, .line_size = 64, },
177 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
182 .associativity = 16, .line_size = 64, },
183 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
188 .associativity = 24, .line_size = 64, },
189 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
192 .associativity = 24, .line_size = 64, },
193 };
194
195 /*
196 * "CPUID leaf 2 does not report cache descriptor information,
197 * use CPUID leaf 4 to query cache parameters"
198 */
199 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
200
201 /*
202 * Return a CPUID 2 cache descriptor for a given cache.
203 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
204 */
205 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
206 {
207 int i;
208
209 assert(cache->size > 0);
210 assert(cache->level > 0);
211 assert(cache->line_size > 0);
212 assert(cache->associativity > 0);
213 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
214 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
215 if (d->level == cache->level && d->type == cache->type &&
216 d->size == cache->size && d->line_size == cache->line_size &&
217 d->associativity == cache->associativity) {
218 return i;
219 }
220 }
221
222 return CACHE_DESCRIPTOR_UNAVAILABLE;
223 }
224
225 /* CPUID Leaf 4 constants: */
226
227 /* EAX: */
228 #define CACHE_TYPE_D 1
229 #define CACHE_TYPE_I 2
230 #define CACHE_TYPE_UNIFIED 3
231
232 #define CACHE_LEVEL(l) (l << 5)
233
234 #define CACHE_SELF_INIT_LEVEL (1 << 8)
235
236 /* EDX: */
237 #define CACHE_NO_INVD_SHARING (1 << 0)
238 #define CACHE_INCLUSIVE (1 << 1)
239 #define CACHE_COMPLEX_IDX (1 << 2)
240
241 /* Encode CacheType for CPUID[4].EAX */
242 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
243 ((t) == ICACHE) ? CACHE_TYPE_I : \
244 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
245 0 /* Invalid value */)
246
247
248 /* Encode cache info for CPUID[4] */
249 static void encode_cache_cpuid4(CPUCacheInfo *cache,
250 int num_apic_ids, int num_cores,
251 uint32_t *eax, uint32_t *ebx,
252 uint32_t *ecx, uint32_t *edx)
253 {
254 assert(cache->size == cache->line_size * cache->associativity *
255 cache->partitions * cache->sets);
256
257 assert(num_apic_ids > 0);
258 *eax = CACHE_TYPE(cache->type) |
259 CACHE_LEVEL(cache->level) |
260 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
261 ((num_cores - 1) << 26) |
262 ((num_apic_ids - 1) << 14);
263
264 assert(cache->line_size > 0);
265 assert(cache->partitions > 0);
266 assert(cache->associativity > 0);
267 /* We don't implement fully-associative caches */
268 assert(cache->associativity < cache->sets);
269 *ebx = (cache->line_size - 1) |
270 ((cache->partitions - 1) << 12) |
271 ((cache->associativity - 1) << 22);
272
273 assert(cache->sets > 0);
274 *ecx = cache->sets - 1;
275
276 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
277 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
278 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
279 }
280
281 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
282 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
283 {
284 assert(cache->size % 1024 == 0);
285 assert(cache->lines_per_tag > 0);
286 assert(cache->associativity > 0);
287 assert(cache->line_size > 0);
288 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
289 (cache->lines_per_tag << 8) | (cache->line_size);
290 }
291
292 #define ASSOC_FULL 0xFF
293
294 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
295 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
296 a == 2 ? 0x2 : \
297 a == 4 ? 0x4 : \
298 a == 8 ? 0x6 : \
299 a == 16 ? 0x8 : \
300 a == 32 ? 0xA : \
301 a == 48 ? 0xB : \
302 a == 64 ? 0xC : \
303 a == 96 ? 0xD : \
304 a == 128 ? 0xE : \
305 a == ASSOC_FULL ? 0xF : \
306 0 /* invalid value */)
307
308 /*
309 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
310 * @l3 can be NULL.
311 */
312 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
313 CPUCacheInfo *l3,
314 uint32_t *ecx, uint32_t *edx)
315 {
316 assert(l2->size % 1024 == 0);
317 assert(l2->associativity > 0);
318 assert(l2->lines_per_tag > 0);
319 assert(l2->line_size > 0);
320 *ecx = ((l2->size / 1024) << 16) |
321 (AMD_ENC_ASSOC(l2->associativity) << 12) |
322 (l2->lines_per_tag << 8) | (l2->line_size);
323
324 if (l3) {
325 assert(l3->size % (512 * 1024) == 0);
326 assert(l3->associativity > 0);
327 assert(l3->lines_per_tag > 0);
328 assert(l3->line_size > 0);
329 *edx = ((l3->size / (512 * 1024)) << 18) |
330 (AMD_ENC_ASSOC(l3->associativity) << 12) |
331 (l3->lines_per_tag << 8) | (l3->line_size);
332 } else {
333 *edx = 0;
334 }
335 }
336
337 /*
338 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
339 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
340 * Define the constants to build the cpu topology. Right now, TOPOEXT
341 * feature is enabled only on EPYC. So, these constants are based on
342 * EPYC supported configurations. We may need to handle the cases if
343 * these values change in future.
344 */
345 /* Maximum core complexes in a node */
346 #define MAX_CCX 2
347 /* Maximum cores in a core complex */
348 #define MAX_CORES_IN_CCX 4
349 /* Maximum cores in a node */
350 #define MAX_CORES_IN_NODE 8
351 /* Maximum nodes in a socket */
352 #define MAX_NODES_PER_SOCKET 4
353
354 /*
355 * Figure out the number of nodes required to build this config.
356 * Max cores in a node is 8
357 */
358 static int nodes_in_socket(int nr_cores)
359 {
360 int nodes;
361
362 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
363
364 /* Hardware does not support config with 3 nodes, return 4 in that case */
365 return (nodes == 3) ? 4 : nodes;
366 }
367
368 /*
369 * Decide the number of cores in a core complex with the given nr_cores using
370 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
371 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
372 * L3 cache is shared across all cores in a core complex. So, this will also
373 * tell us how many cores are sharing the L3 cache.
374 */
375 static int cores_in_core_complex(int nr_cores)
376 {
377 int nodes;
378
379 /* Check if we can fit all the cores in one core complex */
380 if (nr_cores <= MAX_CORES_IN_CCX) {
381 return nr_cores;
382 }
383 /* Get the number of nodes required to build this config */
384 nodes = nodes_in_socket(nr_cores);
385
386 /*
387 * Divide the cores accros all the core complexes
388 * Return rounded up value
389 */
390 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
391 }
392
393 /* Encode cache info for CPUID[8000001D] */
394 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
395 uint32_t *eax, uint32_t *ebx,
396 uint32_t *ecx, uint32_t *edx)
397 {
398 uint32_t l3_cores;
399 assert(cache->size == cache->line_size * cache->associativity *
400 cache->partitions * cache->sets);
401
402 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
403 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
404
405 /* L3 is shared among multiple cores */
406 if (cache->level == 3) {
407 l3_cores = cores_in_core_complex(cs->nr_cores);
408 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
409 } else {
410 *eax |= ((cs->nr_threads - 1) << 14);
411 }
412
413 assert(cache->line_size > 0);
414 assert(cache->partitions > 0);
415 assert(cache->associativity > 0);
416 /* We don't implement fully-associative caches */
417 assert(cache->associativity < cache->sets);
418 *ebx = (cache->line_size - 1) |
419 ((cache->partitions - 1) << 12) |
420 ((cache->associativity - 1) << 22);
421
422 assert(cache->sets > 0);
423 *ecx = cache->sets - 1;
424
425 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
426 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
427 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
428 }
429
430 /* Data structure to hold the configuration info for a given core index */
431 struct core_topology {
432 /* core complex id of the current core index */
433 int ccx_id;
434 /*
435 * Adjusted core index for this core in the topology
436 * This can be 0,1,2,3 with max 4 cores in a core complex
437 */
438 int core_id;
439 /* Node id for this core index */
440 int node_id;
441 /* Number of nodes in this config */
442 int num_nodes;
443 };
444
445 /*
446 * Build the configuration closely match the EPYC hardware. Using the EPYC
447 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
448 * right now. This could change in future.
449 * nr_cores : Total number of cores in the config
450 * core_id : Core index of the current CPU
451 * topo : Data structure to hold all the config info for this core index
452 */
453 static void build_core_topology(int nr_cores, int core_id,
454 struct core_topology *topo)
455 {
456 int nodes, cores_in_ccx;
457
458 /* First get the number of nodes required */
459 nodes = nodes_in_socket(nr_cores);
460
461 cores_in_ccx = cores_in_core_complex(nr_cores);
462
463 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
464 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
465 topo->core_id = core_id % cores_in_ccx;
466 topo->num_nodes = nodes;
467 }
468
469 /* Encode cache info for CPUID[8000001E] */
470 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
471 uint32_t *eax, uint32_t *ebx,
472 uint32_t *ecx, uint32_t *edx)
473 {
474 struct core_topology topo = {0};
475
476 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
477 *eax = cpu->apic_id;
478 /*
479 * CPUID_Fn8000001E_EBX
480 * 31:16 Reserved
481 * 15:8 Threads per core (The number of threads per core is
482 * Threads per core + 1)
483 * 7:0 Core id (see bit decoding below)
484 * SMT:
485 * 4:3 node id
486 * 2 Core complex id
487 * 1:0 Core id
488 * Non SMT:
489 * 5:4 node id
490 * 3 Core complex id
491 * 1:0 Core id
492 */
493 if (cs->nr_threads - 1) {
494 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
495 (topo.ccx_id << 2) | topo.core_id;
496 } else {
497 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
498 }
499 /*
500 * CPUID_Fn8000001E_ECX
501 * 31:11 Reserved
502 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
503 * 7:0 Node id (see bit decoding below)
504 * 2 Socket id
505 * 1:0 Node id
506 */
507 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) | topo.node_id;
508 *edx = 0;
509 }
510
511 /*
512 * Definitions of the hardcoded cache entries we expose:
513 * These are legacy cache values. If there is a need to change any
514 * of these values please use builtin_x86_defs
515 */
516
517 /* L1 data cache: */
518 static CPUCacheInfo legacy_l1d_cache = {
519 .type = DCACHE,
520 .level = 1,
521 .size = 32 * KiB,
522 .self_init = 1,
523 .line_size = 64,
524 .associativity = 8,
525 .sets = 64,
526 .partitions = 1,
527 .no_invd_sharing = true,
528 };
529
530 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
531 static CPUCacheInfo legacy_l1d_cache_amd = {
532 .type = DCACHE,
533 .level = 1,
534 .size = 64 * KiB,
535 .self_init = 1,
536 .line_size = 64,
537 .associativity = 2,
538 .sets = 512,
539 .partitions = 1,
540 .lines_per_tag = 1,
541 .no_invd_sharing = true,
542 };
543
544 /* L1 instruction cache: */
545 static CPUCacheInfo legacy_l1i_cache = {
546 .type = ICACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
555 };
556
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1i_cache_amd = {
559 .type = ICACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
569 };
570
571 /* Level 2 unified cache: */
572 static CPUCacheInfo legacy_l2_cache = {
573 .type = UNIFIED_CACHE,
574 .level = 2,
575 .size = 4 * MiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 16,
579 .sets = 4096,
580 .partitions = 1,
581 .no_invd_sharing = true,
582 };
583
584 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
585 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
586 .type = UNIFIED_CACHE,
587 .level = 2,
588 .size = 2 * MiB,
589 .line_size = 64,
590 .associativity = 8,
591 };
592
593
594 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
595 static CPUCacheInfo legacy_l2_cache_amd = {
596 .type = UNIFIED_CACHE,
597 .level = 2,
598 .size = 512 * KiB,
599 .line_size = 64,
600 .lines_per_tag = 1,
601 .associativity = 16,
602 .sets = 512,
603 .partitions = 1,
604 };
605
606 /* Level 3 unified cache: */
607 static CPUCacheInfo legacy_l3_cache = {
608 .type = UNIFIED_CACHE,
609 .level = 3,
610 .size = 16 * MiB,
611 .line_size = 64,
612 .associativity = 16,
613 .sets = 16384,
614 .partitions = 1,
615 .lines_per_tag = 1,
616 .self_init = true,
617 .inclusive = true,
618 .complex_indexing = true,
619 };
620
621 /* TLB definitions: */
622
623 #define L1_DTLB_2M_ASSOC 1
624 #define L1_DTLB_2M_ENTRIES 255
625 #define L1_DTLB_4K_ASSOC 1
626 #define L1_DTLB_4K_ENTRIES 255
627
628 #define L1_ITLB_2M_ASSOC 1
629 #define L1_ITLB_2M_ENTRIES 255
630 #define L1_ITLB_4K_ASSOC 1
631 #define L1_ITLB_4K_ENTRIES 255
632
633 #define L2_DTLB_2M_ASSOC 0 /* disabled */
634 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
635 #define L2_DTLB_4K_ASSOC 4
636 #define L2_DTLB_4K_ENTRIES 512
637
638 #define L2_ITLB_2M_ASSOC 0 /* disabled */
639 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
640 #define L2_ITLB_4K_ASSOC 4
641 #define L2_ITLB_4K_ENTRIES 512
642
643 /* CPUID Leaf 0x14 constants: */
644 #define INTEL_PT_MAX_SUBLEAF 0x1
645 /*
646 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
647 * MSR can be accessed;
648 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
649 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
650 * of Intel PT MSRs across warm reset;
651 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
652 */
653 #define INTEL_PT_MINIMAL_EBX 0xf
654 /*
655 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
656 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
657 * accessed;
658 * bit[01]: ToPA tables can hold any number of output entries, up to the
659 * maximum allowed by the MaskOrTableOffset field of
660 * IA32_RTIT_OUTPUT_MASK_PTRS;
661 * bit[02]: Support Single-Range Output scheme;
662 */
663 #define INTEL_PT_MINIMAL_ECX 0x7
664 /* generated packets which contain IP payloads have LIP values */
665 #define INTEL_PT_IP_LIP (1 << 31)
666 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
667 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
668 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
669 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
670 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
671
672 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
673 uint32_t vendor2, uint32_t vendor3)
674 {
675 int i;
676 for (i = 0; i < 4; i++) {
677 dst[i] = vendor1 >> (8 * i);
678 dst[i + 4] = vendor2 >> (8 * i);
679 dst[i + 8] = vendor3 >> (8 * i);
680 }
681 dst[CPUID_VENDOR_SZ] = '\0';
682 }
683
684 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
685 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
686 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
687 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
688 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
689 CPUID_PSE36 | CPUID_FXSR)
690 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
691 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
692 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
693 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
694 CPUID_PAE | CPUID_SEP | CPUID_APIC)
695
696 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
697 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
698 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
699 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
700 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
701 /* partly implemented:
702 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
703 /* missing:
704 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
705 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
706 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
707 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
708 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
709 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
710 /* missing:
711 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
712 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
713 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
714 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
715 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
716
717 #ifdef TARGET_X86_64
718 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
719 #else
720 #define TCG_EXT2_X86_64_FEATURES 0
721 #endif
722
723 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
724 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
725 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
726 TCG_EXT2_X86_64_FEATURES)
727 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
728 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
729 #define TCG_EXT4_FEATURES 0
730 #define TCG_SVM_FEATURES 0
731 #define TCG_KVM_FEATURES 0
732 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
733 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
734 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
735 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
736 CPUID_7_0_EBX_ERMS)
737 /* missing:
738 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
739 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
740 CPUID_7_0_EBX_RDSEED */
741 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
742 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
743 CPUID_7_0_ECX_LA57)
744 #define TCG_7_0_EDX_FEATURES 0
745 #define TCG_APM_FEATURES 0
746 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
747 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
748 /* missing:
749 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
750
751 typedef struct FeatureWordInfo {
752 /* feature flags names are taken from "Intel Processor Identification and
753 * the CPUID Instruction" and AMD's "CPUID Specification".
754 * In cases of disagreement between feature naming conventions,
755 * aliases may be added.
756 */
757 const char *feat_names[32];
758 uint32_t cpuid_eax; /* Input EAX for CPUID */
759 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
760 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
761 int cpuid_reg; /* output register (R_* constant) */
762 uint32_t tcg_features; /* Feature flags supported by TCG */
763 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
764 uint32_t migratable_flags; /* Feature flags known to be migratable */
765 /* Features that shouldn't be auto-enabled by "-cpu host" */
766 uint32_t no_autoenable_flags;
767 } FeatureWordInfo;
768
769 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
770 [FEAT_1_EDX] = {
771 .feat_names = {
772 "fpu", "vme", "de", "pse",
773 "tsc", "msr", "pae", "mce",
774 "cx8", "apic", NULL, "sep",
775 "mtrr", "pge", "mca", "cmov",
776 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
777 NULL, "ds" /* Intel dts */, "acpi", "mmx",
778 "fxsr", "sse", "sse2", "ss",
779 "ht" /* Intel htt */, "tm", "ia64", "pbe",
780 },
781 .cpuid_eax = 1, .cpuid_reg = R_EDX,
782 .tcg_features = TCG_FEATURES,
783 },
784 [FEAT_1_ECX] = {
785 .feat_names = {
786 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
787 "ds-cpl", "vmx", "smx", "est",
788 "tm2", "ssse3", "cid", NULL,
789 "fma", "cx16", "xtpr", "pdcm",
790 NULL, "pcid", "dca", "sse4.1",
791 "sse4.2", "x2apic", "movbe", "popcnt",
792 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
793 "avx", "f16c", "rdrand", "hypervisor",
794 },
795 .cpuid_eax = 1, .cpuid_reg = R_ECX,
796 .tcg_features = TCG_EXT_FEATURES,
797 },
798 /* Feature names that are already defined on feature_name[] but
799 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
800 * names on feat_names below. They are copied automatically
801 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
802 */
803 [FEAT_8000_0001_EDX] = {
804 .feat_names = {
805 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
806 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
807 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
808 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
809 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
810 "nx", NULL, "mmxext", NULL /* mmx */,
811 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
812 NULL, "lm", "3dnowext", "3dnow",
813 },
814 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
815 .tcg_features = TCG_EXT2_FEATURES,
816 },
817 [FEAT_8000_0001_ECX] = {
818 .feat_names = {
819 "lahf-lm", "cmp-legacy", "svm", "extapic",
820 "cr8legacy", "abm", "sse4a", "misalignsse",
821 "3dnowprefetch", "osvw", "ibs", "xop",
822 "skinit", "wdt", NULL, "lwp",
823 "fma4", "tce", NULL, "nodeid-msr",
824 NULL, "tbm", "topoext", "perfctr-core",
825 "perfctr-nb", NULL, NULL, NULL,
826 NULL, NULL, NULL, NULL,
827 },
828 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
829 .tcg_features = TCG_EXT3_FEATURES,
830 },
831 [FEAT_C000_0001_EDX] = {
832 .feat_names = {
833 NULL, NULL, "xstore", "xstore-en",
834 NULL, NULL, "xcrypt", "xcrypt-en",
835 "ace2", "ace2-en", "phe", "phe-en",
836 "pmm", "pmm-en", NULL, NULL,
837 NULL, NULL, NULL, NULL,
838 NULL, NULL, NULL, NULL,
839 NULL, NULL, NULL, NULL,
840 NULL, NULL, NULL, NULL,
841 },
842 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
843 .tcg_features = TCG_EXT4_FEATURES,
844 },
845 [FEAT_KVM] = {
846 .feat_names = {
847 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
848 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
849 NULL, "kvm-pv-tlb-flush", NULL, NULL,
850 NULL, NULL, NULL, NULL,
851 NULL, NULL, NULL, NULL,
852 NULL, NULL, NULL, NULL,
853 "kvmclock-stable-bit", NULL, NULL, NULL,
854 NULL, NULL, NULL, NULL,
855 },
856 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
857 .tcg_features = TCG_KVM_FEATURES,
858 },
859 [FEAT_KVM_HINTS] = {
860 .feat_names = {
861 "kvm-hint-dedicated", NULL, NULL, NULL,
862 NULL, NULL, NULL, NULL,
863 NULL, NULL, NULL, NULL,
864 NULL, NULL, NULL, NULL,
865 NULL, NULL, NULL, NULL,
866 NULL, NULL, NULL, NULL,
867 NULL, NULL, NULL, NULL,
868 NULL, NULL, NULL, NULL,
869 },
870 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
871 .tcg_features = TCG_KVM_FEATURES,
872 /*
873 * KVM hints aren't auto-enabled by -cpu host, they need to be
874 * explicitly enabled in the command-line.
875 */
876 .no_autoenable_flags = ~0U,
877 },
878 [FEAT_HYPERV_EAX] = {
879 .feat_names = {
880 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
881 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
882 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
883 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
884 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
885 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
886 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
887 NULL, NULL,
888 NULL, NULL, NULL, NULL,
889 NULL, NULL, NULL, NULL,
890 NULL, NULL, NULL, NULL,
891 NULL, NULL, NULL, NULL,
892 },
893 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
894 },
895 [FEAT_HYPERV_EBX] = {
896 .feat_names = {
897 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
898 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
899 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
900 NULL /* hv_create_port */, NULL /* hv_connect_port */,
901 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
902 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
903 NULL, NULL,
904 NULL, NULL, NULL, NULL,
905 NULL, NULL, NULL, NULL,
906 NULL, NULL, NULL, NULL,
907 NULL, NULL, NULL, NULL,
908 },
909 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
910 },
911 [FEAT_HYPERV_EDX] = {
912 .feat_names = {
913 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
914 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
915 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
916 NULL, NULL,
917 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
918 NULL, NULL, NULL, NULL,
919 NULL, NULL, NULL, NULL,
920 NULL, NULL, NULL, NULL,
921 NULL, NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 },
924 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
925 },
926 [FEAT_SVM] = {
927 .feat_names = {
928 "npt", "lbrv", "svm-lock", "nrip-save",
929 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
930 NULL, NULL, "pause-filter", NULL,
931 "pfthreshold", NULL, NULL, NULL,
932 NULL, NULL, NULL, NULL,
933 NULL, NULL, NULL, NULL,
934 NULL, NULL, NULL, NULL,
935 NULL, NULL, NULL, NULL,
936 },
937 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
938 .tcg_features = TCG_SVM_FEATURES,
939 },
940 [FEAT_7_0_EBX] = {
941 .feat_names = {
942 "fsgsbase", "tsc-adjust", NULL, "bmi1",
943 "hle", "avx2", NULL, "smep",
944 "bmi2", "erms", "invpcid", "rtm",
945 NULL, NULL, "mpx", NULL,
946 "avx512f", "avx512dq", "rdseed", "adx",
947 "smap", "avx512ifma", "pcommit", "clflushopt",
948 "clwb", "intel-pt", "avx512pf", "avx512er",
949 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
950 },
951 .cpuid_eax = 7,
952 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
953 .cpuid_reg = R_EBX,
954 .tcg_features = TCG_7_0_EBX_FEATURES,
955 },
956 [FEAT_7_0_ECX] = {
957 .feat_names = {
958 NULL, "avx512vbmi", "umip", "pku",
959 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
960 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
961 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
962 "la57", NULL, NULL, NULL,
963 NULL, NULL, "rdpid", NULL,
964 NULL, "cldemote", NULL, NULL,
965 NULL, NULL, NULL, NULL,
966 },
967 .cpuid_eax = 7,
968 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
969 .cpuid_reg = R_ECX,
970 .tcg_features = TCG_7_0_ECX_FEATURES,
971 },
972 [FEAT_7_0_EDX] = {
973 .feat_names = {
974 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
977 NULL, NULL, NULL, NULL,
978 NULL, NULL, NULL, NULL,
979 NULL, NULL, NULL, NULL,
980 NULL, NULL, "spec-ctrl", NULL,
981 NULL, NULL, NULL, "ssbd",
982 },
983 .cpuid_eax = 7,
984 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
985 .cpuid_reg = R_EDX,
986 .tcg_features = TCG_7_0_EDX_FEATURES,
987 },
988 [FEAT_8000_0007_EDX] = {
989 .feat_names = {
990 NULL, NULL, NULL, NULL,
991 NULL, NULL, NULL, NULL,
992 "invtsc", NULL, NULL, NULL,
993 NULL, NULL, NULL, NULL,
994 NULL, NULL, NULL, NULL,
995 NULL, NULL, NULL, NULL,
996 NULL, NULL, NULL, NULL,
997 NULL, NULL, NULL, NULL,
998 },
999 .cpuid_eax = 0x80000007,
1000 .cpuid_reg = R_EDX,
1001 .tcg_features = TCG_APM_FEATURES,
1002 .unmigratable_flags = CPUID_APM_INVTSC,
1003 },
1004 [FEAT_8000_0008_EBX] = {
1005 .feat_names = {
1006 NULL, NULL, NULL, NULL,
1007 NULL, NULL, NULL, NULL,
1008 NULL, NULL, NULL, NULL,
1009 "ibpb", NULL, NULL, NULL,
1010 NULL, NULL, NULL, NULL,
1011 NULL, NULL, NULL, NULL,
1012 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1013 NULL, NULL, NULL, NULL,
1014 },
1015 .cpuid_eax = 0x80000008,
1016 .cpuid_reg = R_EBX,
1017 .tcg_features = 0,
1018 .unmigratable_flags = 0,
1019 },
1020 [FEAT_XSAVE] = {
1021 .feat_names = {
1022 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1023 NULL, NULL, NULL, NULL,
1024 NULL, NULL, NULL, NULL,
1025 NULL, NULL, NULL, NULL,
1026 NULL, NULL, NULL, NULL,
1027 NULL, NULL, NULL, NULL,
1028 NULL, NULL, NULL, NULL,
1029 NULL, NULL, NULL, NULL,
1030 },
1031 .cpuid_eax = 0xd,
1032 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
1033 .cpuid_reg = R_EAX,
1034 .tcg_features = TCG_XSAVE_FEATURES,
1035 },
1036 [FEAT_6_EAX] = {
1037 .feat_names = {
1038 NULL, NULL, "arat", NULL,
1039 NULL, NULL, NULL, NULL,
1040 NULL, NULL, NULL, NULL,
1041 NULL, NULL, NULL, NULL,
1042 NULL, NULL, NULL, NULL,
1043 NULL, NULL, NULL, NULL,
1044 NULL, NULL, NULL, NULL,
1045 NULL, NULL, NULL, NULL,
1046 },
1047 .cpuid_eax = 6, .cpuid_reg = R_EAX,
1048 .tcg_features = TCG_6_EAX_FEATURES,
1049 },
1050 [FEAT_XSAVE_COMP_LO] = {
1051 .cpuid_eax = 0xD,
1052 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1053 .cpuid_reg = R_EAX,
1054 .tcg_features = ~0U,
1055 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1056 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1057 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1058 XSTATE_PKRU_MASK,
1059 },
1060 [FEAT_XSAVE_COMP_HI] = {
1061 .cpuid_eax = 0xD,
1062 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
1063 .cpuid_reg = R_EDX,
1064 .tcg_features = ~0U,
1065 },
1066 };
1067
1068 typedef struct X86RegisterInfo32 {
1069 /* Name of register */
1070 const char *name;
1071 /* QAPI enum value register */
1072 X86CPURegister32 qapi_enum;
1073 } X86RegisterInfo32;
1074
1075 #define REGISTER(reg) \
1076 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1077 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1078 REGISTER(EAX),
1079 REGISTER(ECX),
1080 REGISTER(EDX),
1081 REGISTER(EBX),
1082 REGISTER(ESP),
1083 REGISTER(EBP),
1084 REGISTER(ESI),
1085 REGISTER(EDI),
1086 };
1087 #undef REGISTER
1088
1089 typedef struct ExtSaveArea {
1090 uint32_t feature, bits;
1091 uint32_t offset, size;
1092 } ExtSaveArea;
1093
1094 static const ExtSaveArea x86_ext_save_areas[] = {
1095 [XSTATE_FP_BIT] = {
1096 /* x87 FP state component is always enabled if XSAVE is supported */
1097 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1098 /* x87 state is in the legacy region of the XSAVE area */
1099 .offset = 0,
1100 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1101 },
1102 [XSTATE_SSE_BIT] = {
1103 /* SSE state component is always enabled if XSAVE is supported */
1104 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1105 /* SSE state is in the legacy region of the XSAVE area */
1106 .offset = 0,
1107 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1108 },
1109 [XSTATE_YMM_BIT] =
1110 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1111 .offset = offsetof(X86XSaveArea, avx_state),
1112 .size = sizeof(XSaveAVX) },
1113 [XSTATE_BNDREGS_BIT] =
1114 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1115 .offset = offsetof(X86XSaveArea, bndreg_state),
1116 .size = sizeof(XSaveBNDREG) },
1117 [XSTATE_BNDCSR_BIT] =
1118 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1119 .offset = offsetof(X86XSaveArea, bndcsr_state),
1120 .size = sizeof(XSaveBNDCSR) },
1121 [XSTATE_OPMASK_BIT] =
1122 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1123 .offset = offsetof(X86XSaveArea, opmask_state),
1124 .size = sizeof(XSaveOpmask) },
1125 [XSTATE_ZMM_Hi256_BIT] =
1126 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1127 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1128 .size = sizeof(XSaveZMM_Hi256) },
1129 [XSTATE_Hi16_ZMM_BIT] =
1130 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1131 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1132 .size = sizeof(XSaveHi16_ZMM) },
1133 [XSTATE_PKRU_BIT] =
1134 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1135 .offset = offsetof(X86XSaveArea, pkru_state),
1136 .size = sizeof(XSavePKRU) },
1137 };
1138
1139 static uint32_t xsave_area_size(uint64_t mask)
1140 {
1141 int i;
1142 uint64_t ret = 0;
1143
1144 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1145 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1146 if ((mask >> i) & 1) {
1147 ret = MAX(ret, esa->offset + esa->size);
1148 }
1149 }
1150 return ret;
1151 }
1152
1153 static inline bool accel_uses_host_cpuid(void)
1154 {
1155 return kvm_enabled() || hvf_enabled();
1156 }
1157
1158 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1159 {
1160 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1161 cpu->env.features[FEAT_XSAVE_COMP_LO];
1162 }
1163
1164 const char *get_register_name_32(unsigned int reg)
1165 {
1166 if (reg >= CPU_NB_REGS32) {
1167 return NULL;
1168 }
1169 return x86_reg_info_32[reg].name;
1170 }
1171
1172 /*
1173 * Returns the set of feature flags that are supported and migratable by
1174 * QEMU, for a given FeatureWord.
1175 */
1176 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1177 {
1178 FeatureWordInfo *wi = &feature_word_info[w];
1179 uint32_t r = 0;
1180 int i;
1181
1182 for (i = 0; i < 32; i++) {
1183 uint32_t f = 1U << i;
1184
1185 /* If the feature name is known, it is implicitly considered migratable,
1186 * unless it is explicitly set in unmigratable_flags */
1187 if ((wi->migratable_flags & f) ||
1188 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1189 r |= f;
1190 }
1191 }
1192 return r;
1193 }
1194
1195 void host_cpuid(uint32_t function, uint32_t count,
1196 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1197 {
1198 uint32_t vec[4];
1199
1200 #ifdef __x86_64__
1201 asm volatile("cpuid"
1202 : "=a"(vec[0]), "=b"(vec[1]),
1203 "=c"(vec[2]), "=d"(vec[3])
1204 : "0"(function), "c"(count) : "cc");
1205 #elif defined(__i386__)
1206 asm volatile("pusha \n\t"
1207 "cpuid \n\t"
1208 "mov %%eax, 0(%2) \n\t"
1209 "mov %%ebx, 4(%2) \n\t"
1210 "mov %%ecx, 8(%2) \n\t"
1211 "mov %%edx, 12(%2) \n\t"
1212 "popa"
1213 : : "a"(function), "c"(count), "S"(vec)
1214 : "memory", "cc");
1215 #else
1216 abort();
1217 #endif
1218
1219 if (eax)
1220 *eax = vec[0];
1221 if (ebx)
1222 *ebx = vec[1];
1223 if (ecx)
1224 *ecx = vec[2];
1225 if (edx)
1226 *edx = vec[3];
1227 }
1228
1229 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1230 {
1231 uint32_t eax, ebx, ecx, edx;
1232
1233 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1234 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1235
1236 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1237 if (family) {
1238 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1239 }
1240 if (model) {
1241 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1242 }
1243 if (stepping) {
1244 *stepping = eax & 0x0F;
1245 }
1246 }
1247
1248 /* CPU class name definitions: */
1249
1250 /* Return type name for a given CPU model name
1251 * Caller is responsible for freeing the returned string.
1252 */
1253 static char *x86_cpu_type_name(const char *model_name)
1254 {
1255 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1256 }
1257
1258 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1259 {
1260 ObjectClass *oc;
1261 char *typename = x86_cpu_type_name(cpu_model);
1262 oc = object_class_by_name(typename);
1263 g_free(typename);
1264 return oc;
1265 }
1266
1267 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1268 {
1269 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1270 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1271 return g_strndup(class_name,
1272 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1273 }
1274
1275 struct X86CPUDefinition {
1276 const char *name;
1277 uint32_t level;
1278 uint32_t xlevel;
1279 /* vendor is zero-terminated, 12 character ASCII string */
1280 char vendor[CPUID_VENDOR_SZ + 1];
1281 int family;
1282 int model;
1283 int stepping;
1284 FeatureWordArray features;
1285 const char *model_id;
1286 CPUCaches *cache_info;
1287 };
1288
1289 static CPUCaches epyc_cache_info = {
1290 .l1d_cache = &(CPUCacheInfo) {
1291 .type = DCACHE,
1292 .level = 1,
1293 .size = 32 * KiB,
1294 .line_size = 64,
1295 .associativity = 8,
1296 .partitions = 1,
1297 .sets = 64,
1298 .lines_per_tag = 1,
1299 .self_init = 1,
1300 .no_invd_sharing = true,
1301 },
1302 .l1i_cache = &(CPUCacheInfo) {
1303 .type = ICACHE,
1304 .level = 1,
1305 .size = 64 * KiB,
1306 .line_size = 64,
1307 .associativity = 4,
1308 .partitions = 1,
1309 .sets = 256,
1310 .lines_per_tag = 1,
1311 .self_init = 1,
1312 .no_invd_sharing = true,
1313 },
1314 .l2_cache = &(CPUCacheInfo) {
1315 .type = UNIFIED_CACHE,
1316 .level = 2,
1317 .size = 512 * KiB,
1318 .line_size = 64,
1319 .associativity = 8,
1320 .partitions = 1,
1321 .sets = 1024,
1322 .lines_per_tag = 1,
1323 },
1324 .l3_cache = &(CPUCacheInfo) {
1325 .type = UNIFIED_CACHE,
1326 .level = 3,
1327 .size = 8 * MiB,
1328 .line_size = 64,
1329 .associativity = 16,
1330 .partitions = 1,
1331 .sets = 8192,
1332 .lines_per_tag = 1,
1333 .self_init = true,
1334 .inclusive = true,
1335 .complex_indexing = true,
1336 },
1337 };
1338
1339 static X86CPUDefinition builtin_x86_defs[] = {
1340 {
1341 .name = "qemu64",
1342 .level = 0xd,
1343 .vendor = CPUID_VENDOR_AMD,
1344 .family = 6,
1345 .model = 6,
1346 .stepping = 3,
1347 .features[FEAT_1_EDX] =
1348 PPRO_FEATURES |
1349 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1350 CPUID_PSE36,
1351 .features[FEAT_1_ECX] =
1352 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1353 .features[FEAT_8000_0001_EDX] =
1354 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1355 .features[FEAT_8000_0001_ECX] =
1356 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1357 .xlevel = 0x8000000A,
1358 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1359 },
1360 {
1361 .name = "phenom",
1362 .level = 5,
1363 .vendor = CPUID_VENDOR_AMD,
1364 .family = 16,
1365 .model = 2,
1366 .stepping = 3,
1367 /* Missing: CPUID_HT */
1368 .features[FEAT_1_EDX] =
1369 PPRO_FEATURES |
1370 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1371 CPUID_PSE36 | CPUID_VME,
1372 .features[FEAT_1_ECX] =
1373 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1374 CPUID_EXT_POPCNT,
1375 .features[FEAT_8000_0001_EDX] =
1376 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1377 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1378 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1379 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1380 CPUID_EXT3_CR8LEG,
1381 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1382 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1383 .features[FEAT_8000_0001_ECX] =
1384 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1385 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1386 /* Missing: CPUID_SVM_LBRV */
1387 .features[FEAT_SVM] =
1388 CPUID_SVM_NPT,
1389 .xlevel = 0x8000001A,
1390 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1391 },
1392 {
1393 .name = "core2duo",
1394 .level = 10,
1395 .vendor = CPUID_VENDOR_INTEL,
1396 .family = 6,
1397 .model = 15,
1398 .stepping = 11,
1399 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1400 .features[FEAT_1_EDX] =
1401 PPRO_FEATURES |
1402 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1403 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1404 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1405 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1406 .features[FEAT_1_ECX] =
1407 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1408 CPUID_EXT_CX16,
1409 .features[FEAT_8000_0001_EDX] =
1410 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1411 .features[FEAT_8000_0001_ECX] =
1412 CPUID_EXT3_LAHF_LM,
1413 .xlevel = 0x80000008,
1414 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1415 },
1416 {
1417 .name = "kvm64",
1418 .level = 0xd,
1419 .vendor = CPUID_VENDOR_INTEL,
1420 .family = 15,
1421 .model = 6,
1422 .stepping = 1,
1423 /* Missing: CPUID_HT */
1424 .features[FEAT_1_EDX] =
1425 PPRO_FEATURES | CPUID_VME |
1426 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1427 CPUID_PSE36,
1428 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1429 .features[FEAT_1_ECX] =
1430 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1431 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1432 .features[FEAT_8000_0001_EDX] =
1433 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1434 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1435 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1436 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1437 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1438 .features[FEAT_8000_0001_ECX] =
1439 0,
1440 .xlevel = 0x80000008,
1441 .model_id = "Common KVM processor"
1442 },
1443 {
1444 .name = "qemu32",
1445 .level = 4,
1446 .vendor = CPUID_VENDOR_INTEL,
1447 .family = 6,
1448 .model = 6,
1449 .stepping = 3,
1450 .features[FEAT_1_EDX] =
1451 PPRO_FEATURES,
1452 .features[FEAT_1_ECX] =
1453 CPUID_EXT_SSE3,
1454 .xlevel = 0x80000004,
1455 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1456 },
1457 {
1458 .name = "kvm32",
1459 .level = 5,
1460 .vendor = CPUID_VENDOR_INTEL,
1461 .family = 15,
1462 .model = 6,
1463 .stepping = 1,
1464 .features[FEAT_1_EDX] =
1465 PPRO_FEATURES | CPUID_VME |
1466 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1467 .features[FEAT_1_ECX] =
1468 CPUID_EXT_SSE3,
1469 .features[FEAT_8000_0001_ECX] =
1470 0,
1471 .xlevel = 0x80000008,
1472 .model_id = "Common 32-bit KVM processor"
1473 },
1474 {
1475 .name = "coreduo",
1476 .level = 10,
1477 .vendor = CPUID_VENDOR_INTEL,
1478 .family = 6,
1479 .model = 14,
1480 .stepping = 8,
1481 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1482 .features[FEAT_1_EDX] =
1483 PPRO_FEATURES | CPUID_VME |
1484 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1485 CPUID_SS,
1486 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1487 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1488 .features[FEAT_1_ECX] =
1489 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1490 .features[FEAT_8000_0001_EDX] =
1491 CPUID_EXT2_NX,
1492 .xlevel = 0x80000008,
1493 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1494 },
1495 {
1496 .name = "486",
1497 .level = 1,
1498 .vendor = CPUID_VENDOR_INTEL,
1499 .family = 4,
1500 .model = 8,
1501 .stepping = 0,
1502 .features[FEAT_1_EDX] =
1503 I486_FEATURES,
1504 .xlevel = 0,
1505 .model_id = "",
1506 },
1507 {
1508 .name = "pentium",
1509 .level = 1,
1510 .vendor = CPUID_VENDOR_INTEL,
1511 .family = 5,
1512 .model = 4,
1513 .stepping = 3,
1514 .features[FEAT_1_EDX] =
1515 PENTIUM_FEATURES,
1516 .xlevel = 0,
1517 .model_id = "",
1518 },
1519 {
1520 .name = "pentium2",
1521 .level = 2,
1522 .vendor = CPUID_VENDOR_INTEL,
1523 .family = 6,
1524 .model = 5,
1525 .stepping = 2,
1526 .features[FEAT_1_EDX] =
1527 PENTIUM2_FEATURES,
1528 .xlevel = 0,
1529 .model_id = "",
1530 },
1531 {
1532 .name = "pentium3",
1533 .level = 3,
1534 .vendor = CPUID_VENDOR_INTEL,
1535 .family = 6,
1536 .model = 7,
1537 .stepping = 3,
1538 .features[FEAT_1_EDX] =
1539 PENTIUM3_FEATURES,
1540 .xlevel = 0,
1541 .model_id = "",
1542 },
1543 {
1544 .name = "athlon",
1545 .level = 2,
1546 .vendor = CPUID_VENDOR_AMD,
1547 .family = 6,
1548 .model = 2,
1549 .stepping = 3,
1550 .features[FEAT_1_EDX] =
1551 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1552 CPUID_MCA,
1553 .features[FEAT_8000_0001_EDX] =
1554 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1555 .xlevel = 0x80000008,
1556 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1557 },
1558 {
1559 .name = "n270",
1560 .level = 10,
1561 .vendor = CPUID_VENDOR_INTEL,
1562 .family = 6,
1563 .model = 28,
1564 .stepping = 2,
1565 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1566 .features[FEAT_1_EDX] =
1567 PPRO_FEATURES |
1568 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1569 CPUID_ACPI | CPUID_SS,
1570 /* Some CPUs got no CPUID_SEP */
1571 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1572 * CPUID_EXT_XTPR */
1573 .features[FEAT_1_ECX] =
1574 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1575 CPUID_EXT_MOVBE,
1576 .features[FEAT_8000_0001_EDX] =
1577 CPUID_EXT2_NX,
1578 .features[FEAT_8000_0001_ECX] =
1579 CPUID_EXT3_LAHF_LM,
1580 .xlevel = 0x80000008,
1581 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1582 },
1583 {
1584 .name = "Conroe",
1585 .level = 10,
1586 .vendor = CPUID_VENDOR_INTEL,
1587 .family = 6,
1588 .model = 15,
1589 .stepping = 3,
1590 .features[FEAT_1_EDX] =
1591 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1592 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1593 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1594 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1595 CPUID_DE | CPUID_FP87,
1596 .features[FEAT_1_ECX] =
1597 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1598 .features[FEAT_8000_0001_EDX] =
1599 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1600 .features[FEAT_8000_0001_ECX] =
1601 CPUID_EXT3_LAHF_LM,
1602 .xlevel = 0x80000008,
1603 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1604 },
1605 {
1606 .name = "Penryn",
1607 .level = 10,
1608 .vendor = CPUID_VENDOR_INTEL,
1609 .family = 6,
1610 .model = 23,
1611 .stepping = 3,
1612 .features[FEAT_1_EDX] =
1613 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1614 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1615 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1616 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1617 CPUID_DE | CPUID_FP87,
1618 .features[FEAT_1_ECX] =
1619 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1620 CPUID_EXT_SSE3,
1621 .features[FEAT_8000_0001_EDX] =
1622 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1623 .features[FEAT_8000_0001_ECX] =
1624 CPUID_EXT3_LAHF_LM,
1625 .xlevel = 0x80000008,
1626 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1627 },
1628 {
1629 .name = "Nehalem",
1630 .level = 11,
1631 .vendor = CPUID_VENDOR_INTEL,
1632 .family = 6,
1633 .model = 26,
1634 .stepping = 3,
1635 .features[FEAT_1_EDX] =
1636 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1637 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1638 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1639 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1640 CPUID_DE | CPUID_FP87,
1641 .features[FEAT_1_ECX] =
1642 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1643 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1644 .features[FEAT_8000_0001_EDX] =
1645 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1646 .features[FEAT_8000_0001_ECX] =
1647 CPUID_EXT3_LAHF_LM,
1648 .xlevel = 0x80000008,
1649 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1650 },
1651 {
1652 .name = "Nehalem-IBRS",
1653 .level = 11,
1654 .vendor = CPUID_VENDOR_INTEL,
1655 .family = 6,
1656 .model = 26,
1657 .stepping = 3,
1658 .features[FEAT_1_EDX] =
1659 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1660 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1661 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1662 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1663 CPUID_DE | CPUID_FP87,
1664 .features[FEAT_1_ECX] =
1665 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1666 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1667 .features[FEAT_7_0_EDX] =
1668 CPUID_7_0_EDX_SPEC_CTRL,
1669 .features[FEAT_8000_0001_EDX] =
1670 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1671 .features[FEAT_8000_0001_ECX] =
1672 CPUID_EXT3_LAHF_LM,
1673 .xlevel = 0x80000008,
1674 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1675 },
1676 {
1677 .name = "Westmere",
1678 .level = 11,
1679 .vendor = CPUID_VENDOR_INTEL,
1680 .family = 6,
1681 .model = 44,
1682 .stepping = 1,
1683 .features[FEAT_1_EDX] =
1684 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1685 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1686 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1687 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1688 CPUID_DE | CPUID_FP87,
1689 .features[FEAT_1_ECX] =
1690 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1691 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1692 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1693 .features[FEAT_8000_0001_EDX] =
1694 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1695 .features[FEAT_8000_0001_ECX] =
1696 CPUID_EXT3_LAHF_LM,
1697 .features[FEAT_6_EAX] =
1698 CPUID_6_EAX_ARAT,
1699 .xlevel = 0x80000008,
1700 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1701 },
1702 {
1703 .name = "Westmere-IBRS",
1704 .level = 11,
1705 .vendor = CPUID_VENDOR_INTEL,
1706 .family = 6,
1707 .model = 44,
1708 .stepping = 1,
1709 .features[FEAT_1_EDX] =
1710 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1711 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1712 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1713 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1714 CPUID_DE | CPUID_FP87,
1715 .features[FEAT_1_ECX] =
1716 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1717 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1718 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1719 .features[FEAT_8000_0001_EDX] =
1720 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1721 .features[FEAT_8000_0001_ECX] =
1722 CPUID_EXT3_LAHF_LM,
1723 .features[FEAT_7_0_EDX] =
1724 CPUID_7_0_EDX_SPEC_CTRL,
1725 .features[FEAT_6_EAX] =
1726 CPUID_6_EAX_ARAT,
1727 .xlevel = 0x80000008,
1728 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1729 },
1730 {
1731 .name = "SandyBridge",
1732 .level = 0xd,
1733 .vendor = CPUID_VENDOR_INTEL,
1734 .family = 6,
1735 .model = 42,
1736 .stepping = 1,
1737 .features[FEAT_1_EDX] =
1738 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1739 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1740 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1741 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1742 CPUID_DE | CPUID_FP87,
1743 .features[FEAT_1_ECX] =
1744 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1745 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1746 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1747 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1748 CPUID_EXT_SSE3,
1749 .features[FEAT_8000_0001_EDX] =
1750 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1751 CPUID_EXT2_SYSCALL,
1752 .features[FEAT_8000_0001_ECX] =
1753 CPUID_EXT3_LAHF_LM,
1754 .features[FEAT_XSAVE] =
1755 CPUID_XSAVE_XSAVEOPT,
1756 .features[FEAT_6_EAX] =
1757 CPUID_6_EAX_ARAT,
1758 .xlevel = 0x80000008,
1759 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1760 },
1761 {
1762 .name = "SandyBridge-IBRS",
1763 .level = 0xd,
1764 .vendor = CPUID_VENDOR_INTEL,
1765 .family = 6,
1766 .model = 42,
1767 .stepping = 1,
1768 .features[FEAT_1_EDX] =
1769 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1770 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1771 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1772 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1773 CPUID_DE | CPUID_FP87,
1774 .features[FEAT_1_ECX] =
1775 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1776 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1777 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1778 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1779 CPUID_EXT_SSE3,
1780 .features[FEAT_8000_0001_EDX] =
1781 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1782 CPUID_EXT2_SYSCALL,
1783 .features[FEAT_8000_0001_ECX] =
1784 CPUID_EXT3_LAHF_LM,
1785 .features[FEAT_7_0_EDX] =
1786 CPUID_7_0_EDX_SPEC_CTRL,
1787 .features[FEAT_XSAVE] =
1788 CPUID_XSAVE_XSAVEOPT,
1789 .features[FEAT_6_EAX] =
1790 CPUID_6_EAX_ARAT,
1791 .xlevel = 0x80000008,
1792 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1793 },
1794 {
1795 .name = "IvyBridge",
1796 .level = 0xd,
1797 .vendor = CPUID_VENDOR_INTEL,
1798 .family = 6,
1799 .model = 58,
1800 .stepping = 9,
1801 .features[FEAT_1_EDX] =
1802 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1803 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1804 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1805 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1806 CPUID_DE | CPUID_FP87,
1807 .features[FEAT_1_ECX] =
1808 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1809 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1810 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1811 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1812 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1813 .features[FEAT_7_0_EBX] =
1814 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1815 CPUID_7_0_EBX_ERMS,
1816 .features[FEAT_8000_0001_EDX] =
1817 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1818 CPUID_EXT2_SYSCALL,
1819 .features[FEAT_8000_0001_ECX] =
1820 CPUID_EXT3_LAHF_LM,
1821 .features[FEAT_XSAVE] =
1822 CPUID_XSAVE_XSAVEOPT,
1823 .features[FEAT_6_EAX] =
1824 CPUID_6_EAX_ARAT,
1825 .xlevel = 0x80000008,
1826 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1827 },
1828 {
1829 .name = "IvyBridge-IBRS",
1830 .level = 0xd,
1831 .vendor = CPUID_VENDOR_INTEL,
1832 .family = 6,
1833 .model = 58,
1834 .stepping = 9,
1835 .features[FEAT_1_EDX] =
1836 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1837 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1838 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1839 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1840 CPUID_DE | CPUID_FP87,
1841 .features[FEAT_1_ECX] =
1842 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1843 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1844 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1845 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1846 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1847 .features[FEAT_7_0_EBX] =
1848 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1849 CPUID_7_0_EBX_ERMS,
1850 .features[FEAT_8000_0001_EDX] =
1851 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1852 CPUID_EXT2_SYSCALL,
1853 .features[FEAT_8000_0001_ECX] =
1854 CPUID_EXT3_LAHF_LM,
1855 .features[FEAT_7_0_EDX] =
1856 CPUID_7_0_EDX_SPEC_CTRL,
1857 .features[FEAT_XSAVE] =
1858 CPUID_XSAVE_XSAVEOPT,
1859 .features[FEAT_6_EAX] =
1860 CPUID_6_EAX_ARAT,
1861 .xlevel = 0x80000008,
1862 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1863 },
1864 {
1865 .name = "Haswell-noTSX",
1866 .level = 0xd,
1867 .vendor = CPUID_VENDOR_INTEL,
1868 .family = 6,
1869 .model = 60,
1870 .stepping = 1,
1871 .features[FEAT_1_EDX] =
1872 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1873 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1874 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1875 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1876 CPUID_DE | CPUID_FP87,
1877 .features[FEAT_1_ECX] =
1878 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1879 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1880 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1881 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1882 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1883 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1884 .features[FEAT_8000_0001_EDX] =
1885 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1886 CPUID_EXT2_SYSCALL,
1887 .features[FEAT_8000_0001_ECX] =
1888 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1889 .features[FEAT_7_0_EBX] =
1890 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1891 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1892 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1893 .features[FEAT_XSAVE] =
1894 CPUID_XSAVE_XSAVEOPT,
1895 .features[FEAT_6_EAX] =
1896 CPUID_6_EAX_ARAT,
1897 .xlevel = 0x80000008,
1898 .model_id = "Intel Core Processor (Haswell, no TSX)",
1899 },
1900 {
1901 .name = "Haswell-noTSX-IBRS",
1902 .level = 0xd,
1903 .vendor = CPUID_VENDOR_INTEL,
1904 .family = 6,
1905 .model = 60,
1906 .stepping = 1,
1907 .features[FEAT_1_EDX] =
1908 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1909 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1910 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1911 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1912 CPUID_DE | CPUID_FP87,
1913 .features[FEAT_1_ECX] =
1914 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1915 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1916 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1917 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1918 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1919 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1920 .features[FEAT_8000_0001_EDX] =
1921 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1922 CPUID_EXT2_SYSCALL,
1923 .features[FEAT_8000_0001_ECX] =
1924 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1925 .features[FEAT_7_0_EDX] =
1926 CPUID_7_0_EDX_SPEC_CTRL,
1927 .features[FEAT_7_0_EBX] =
1928 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1929 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1930 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1931 .features[FEAT_XSAVE] =
1932 CPUID_XSAVE_XSAVEOPT,
1933 .features[FEAT_6_EAX] =
1934 CPUID_6_EAX_ARAT,
1935 .xlevel = 0x80000008,
1936 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1937 },
1938 {
1939 .name = "Haswell",
1940 .level = 0xd,
1941 .vendor = CPUID_VENDOR_INTEL,
1942 .family = 6,
1943 .model = 60,
1944 .stepping = 4,
1945 .features[FEAT_1_EDX] =
1946 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1947 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1948 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1949 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1950 CPUID_DE | CPUID_FP87,
1951 .features[FEAT_1_ECX] =
1952 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1953 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1954 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1955 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1956 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1957 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1958 .features[FEAT_8000_0001_EDX] =
1959 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1960 CPUID_EXT2_SYSCALL,
1961 .features[FEAT_8000_0001_ECX] =
1962 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1963 .features[FEAT_7_0_EBX] =
1964 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1965 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1966 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1967 CPUID_7_0_EBX_RTM,
1968 .features[FEAT_XSAVE] =
1969 CPUID_XSAVE_XSAVEOPT,
1970 .features[FEAT_6_EAX] =
1971 CPUID_6_EAX_ARAT,
1972 .xlevel = 0x80000008,
1973 .model_id = "Intel Core Processor (Haswell)",
1974 },
1975 {
1976 .name = "Haswell-IBRS",
1977 .level = 0xd,
1978 .vendor = CPUID_VENDOR_INTEL,
1979 .family = 6,
1980 .model = 60,
1981 .stepping = 4,
1982 .features[FEAT_1_EDX] =
1983 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1984 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1985 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1986 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1987 CPUID_DE | CPUID_FP87,
1988 .features[FEAT_1_ECX] =
1989 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1990 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1991 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1992 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1993 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1994 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1995 .features[FEAT_8000_0001_EDX] =
1996 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1997 CPUID_EXT2_SYSCALL,
1998 .features[FEAT_8000_0001_ECX] =
1999 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2000 .features[FEAT_7_0_EDX] =
2001 CPUID_7_0_EDX_SPEC_CTRL,
2002 .features[FEAT_7_0_EBX] =
2003 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2004 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2005 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2006 CPUID_7_0_EBX_RTM,
2007 .features[FEAT_XSAVE] =
2008 CPUID_XSAVE_XSAVEOPT,
2009 .features[FEAT_6_EAX] =
2010 CPUID_6_EAX_ARAT,
2011 .xlevel = 0x80000008,
2012 .model_id = "Intel Core Processor (Haswell, IBRS)",
2013 },
2014 {
2015 .name = "Broadwell-noTSX",
2016 .level = 0xd,
2017 .vendor = CPUID_VENDOR_INTEL,
2018 .family = 6,
2019 .model = 61,
2020 .stepping = 2,
2021 .features[FEAT_1_EDX] =
2022 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2023 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2024 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2025 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2026 CPUID_DE | CPUID_FP87,
2027 .features[FEAT_1_ECX] =
2028 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2029 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2030 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2031 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2032 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2033 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2034 .features[FEAT_8000_0001_EDX] =
2035 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2036 CPUID_EXT2_SYSCALL,
2037 .features[FEAT_8000_0001_ECX] =
2038 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2039 .features[FEAT_7_0_EBX] =
2040 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2041 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2042 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2043 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2044 CPUID_7_0_EBX_SMAP,
2045 .features[FEAT_XSAVE] =
2046 CPUID_XSAVE_XSAVEOPT,
2047 .features[FEAT_6_EAX] =
2048 CPUID_6_EAX_ARAT,
2049 .xlevel = 0x80000008,
2050 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2051 },
2052 {
2053 .name = "Broadwell-noTSX-IBRS",
2054 .level = 0xd,
2055 .vendor = CPUID_VENDOR_INTEL,
2056 .family = 6,
2057 .model = 61,
2058 .stepping = 2,
2059 .features[FEAT_1_EDX] =
2060 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2061 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2062 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2063 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2064 CPUID_DE | CPUID_FP87,
2065 .features[FEAT_1_ECX] =
2066 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2067 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2068 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2069 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2070 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2071 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2072 .features[FEAT_8000_0001_EDX] =
2073 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2074 CPUID_EXT2_SYSCALL,
2075 .features[FEAT_8000_0001_ECX] =
2076 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2077 .features[FEAT_7_0_EDX] =
2078 CPUID_7_0_EDX_SPEC_CTRL,
2079 .features[FEAT_7_0_EBX] =
2080 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2081 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2082 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2083 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2084 CPUID_7_0_EBX_SMAP,
2085 .features[FEAT_XSAVE] =
2086 CPUID_XSAVE_XSAVEOPT,
2087 .features[FEAT_6_EAX] =
2088 CPUID_6_EAX_ARAT,
2089 .xlevel = 0x80000008,
2090 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2091 },
2092 {
2093 .name = "Broadwell",
2094 .level = 0xd,
2095 .vendor = CPUID_VENDOR_INTEL,
2096 .family = 6,
2097 .model = 61,
2098 .stepping = 2,
2099 .features[FEAT_1_EDX] =
2100 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2101 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2102 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2103 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2104 CPUID_DE | CPUID_FP87,
2105 .features[FEAT_1_ECX] =
2106 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2107 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2108 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2109 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2110 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2111 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2112 .features[FEAT_8000_0001_EDX] =
2113 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2114 CPUID_EXT2_SYSCALL,
2115 .features[FEAT_8000_0001_ECX] =
2116 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2117 .features[FEAT_7_0_EBX] =
2118 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2119 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2120 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2121 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2122 CPUID_7_0_EBX_SMAP,
2123 .features[FEAT_XSAVE] =
2124 CPUID_XSAVE_XSAVEOPT,
2125 .features[FEAT_6_EAX] =
2126 CPUID_6_EAX_ARAT,
2127 .xlevel = 0x80000008,
2128 .model_id = "Intel Core Processor (Broadwell)",
2129 },
2130 {
2131 .name = "Broadwell-IBRS",
2132 .level = 0xd,
2133 .vendor = CPUID_VENDOR_INTEL,
2134 .family = 6,
2135 .model = 61,
2136 .stepping = 2,
2137 .features[FEAT_1_EDX] =
2138 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2139 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2140 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2141 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2142 CPUID_DE | CPUID_FP87,
2143 .features[FEAT_1_ECX] =
2144 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2145 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2146 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2147 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2148 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2149 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2150 .features[FEAT_8000_0001_EDX] =
2151 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2152 CPUID_EXT2_SYSCALL,
2153 .features[FEAT_8000_0001_ECX] =
2154 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2155 .features[FEAT_7_0_EDX] =
2156 CPUID_7_0_EDX_SPEC_CTRL,
2157 .features[FEAT_7_0_EBX] =
2158 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2159 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2160 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2161 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2162 CPUID_7_0_EBX_SMAP,
2163 .features[FEAT_XSAVE] =
2164 CPUID_XSAVE_XSAVEOPT,
2165 .features[FEAT_6_EAX] =
2166 CPUID_6_EAX_ARAT,
2167 .xlevel = 0x80000008,
2168 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2169 },
2170 {
2171 .name = "Skylake-Client",
2172 .level = 0xd,
2173 .vendor = CPUID_VENDOR_INTEL,
2174 .family = 6,
2175 .model = 94,
2176 .stepping = 3,
2177 .features[FEAT_1_EDX] =
2178 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2179 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2180 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2181 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2182 CPUID_DE | CPUID_FP87,
2183 .features[FEAT_1_ECX] =
2184 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2185 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2186 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2187 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2188 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2189 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2190 .features[FEAT_8000_0001_EDX] =
2191 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2192 CPUID_EXT2_SYSCALL,
2193 .features[FEAT_8000_0001_ECX] =
2194 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2195 .features[FEAT_7_0_EBX] =
2196 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2197 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2198 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2199 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2200 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2201 /* Missing: XSAVES (not supported by some Linux versions,
2202 * including v4.1 to v4.12).
2203 * KVM doesn't yet expose any XSAVES state save component,
2204 * and the only one defined in Skylake (processor tracing)
2205 * probably will block migration anyway.
2206 */
2207 .features[FEAT_XSAVE] =
2208 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2209 CPUID_XSAVE_XGETBV1,
2210 .features[FEAT_6_EAX] =
2211 CPUID_6_EAX_ARAT,
2212 .xlevel = 0x80000008,
2213 .model_id = "Intel Core Processor (Skylake)",
2214 },
2215 {
2216 .name = "Skylake-Client-IBRS",
2217 .level = 0xd,
2218 .vendor = CPUID_VENDOR_INTEL,
2219 .family = 6,
2220 .model = 94,
2221 .stepping = 3,
2222 .features[FEAT_1_EDX] =
2223 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2224 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2225 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2226 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2227 CPUID_DE | CPUID_FP87,
2228 .features[FEAT_1_ECX] =
2229 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2230 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2231 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2232 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2233 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2234 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2235 .features[FEAT_8000_0001_EDX] =
2236 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2237 CPUID_EXT2_SYSCALL,
2238 .features[FEAT_8000_0001_ECX] =
2239 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2240 .features[FEAT_7_0_EDX] =
2241 CPUID_7_0_EDX_SPEC_CTRL,
2242 .features[FEAT_7_0_EBX] =
2243 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2244 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2245 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2246 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2247 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2248 /* Missing: XSAVES (not supported by some Linux versions,
2249 * including v4.1 to v4.12).
2250 * KVM doesn't yet expose any XSAVES state save component,
2251 * and the only one defined in Skylake (processor tracing)
2252 * probably will block migration anyway.
2253 */
2254 .features[FEAT_XSAVE] =
2255 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2256 CPUID_XSAVE_XGETBV1,
2257 .features[FEAT_6_EAX] =
2258 CPUID_6_EAX_ARAT,
2259 .xlevel = 0x80000008,
2260 .model_id = "Intel Core Processor (Skylake, IBRS)",
2261 },
2262 {
2263 .name = "Skylake-Server",
2264 .level = 0xd,
2265 .vendor = CPUID_VENDOR_INTEL,
2266 .family = 6,
2267 .model = 85,
2268 .stepping = 4,
2269 .features[FEAT_1_EDX] =
2270 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2271 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2272 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2273 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2274 CPUID_DE | CPUID_FP87,
2275 .features[FEAT_1_ECX] =
2276 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2277 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2278 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2279 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2280 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2281 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2282 .features[FEAT_8000_0001_EDX] =
2283 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2284 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2285 .features[FEAT_8000_0001_ECX] =
2286 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2287 .features[FEAT_7_0_EBX] =
2288 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2289 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2290 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2291 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2292 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2293 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2294 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2295 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2296 /* Missing: XSAVES (not supported by some Linux versions,
2297 * including v4.1 to v4.12).
2298 * KVM doesn't yet expose any XSAVES state save component,
2299 * and the only one defined in Skylake (processor tracing)
2300 * probably will block migration anyway.
2301 */
2302 .features[FEAT_XSAVE] =
2303 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2304 CPUID_XSAVE_XGETBV1,
2305 .features[FEAT_6_EAX] =
2306 CPUID_6_EAX_ARAT,
2307 .xlevel = 0x80000008,
2308 .model_id = "Intel Xeon Processor (Skylake)",
2309 },
2310 {
2311 .name = "Skylake-Server-IBRS",
2312 .level = 0xd,
2313 .vendor = CPUID_VENDOR_INTEL,
2314 .family = 6,
2315 .model = 85,
2316 .stepping = 4,
2317 .features[FEAT_1_EDX] =
2318 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2319 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2320 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2321 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2322 CPUID_DE | CPUID_FP87,
2323 .features[FEAT_1_ECX] =
2324 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2325 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2326 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2327 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2328 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2329 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2330 .features[FEAT_8000_0001_EDX] =
2331 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2332 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2333 .features[FEAT_8000_0001_ECX] =
2334 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2335 .features[FEAT_7_0_EDX] =
2336 CPUID_7_0_EDX_SPEC_CTRL,
2337 .features[FEAT_7_0_EBX] =
2338 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2339 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2340 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2341 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2342 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2343 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2344 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2345 CPUID_7_0_EBX_AVX512VL,
2346 /* Missing: XSAVES (not supported by some Linux versions,
2347 * including v4.1 to v4.12).
2348 * KVM doesn't yet expose any XSAVES state save component,
2349 * and the only one defined in Skylake (processor tracing)
2350 * probably will block migration anyway.
2351 */
2352 .features[FEAT_XSAVE] =
2353 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2354 CPUID_XSAVE_XGETBV1,
2355 .features[FEAT_6_EAX] =
2356 CPUID_6_EAX_ARAT,
2357 .xlevel = 0x80000008,
2358 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2359 },
2360 {
2361 .name = "KnightsMill",
2362 .level = 0xd,
2363 .vendor = CPUID_VENDOR_INTEL,
2364 .family = 6,
2365 .model = 133,
2366 .stepping = 0,
2367 .features[FEAT_1_EDX] =
2368 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2369 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2370 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2371 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2372 CPUID_PSE | CPUID_DE | CPUID_FP87,
2373 .features[FEAT_1_ECX] =
2374 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2375 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2376 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2377 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2378 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2379 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2380 .features[FEAT_8000_0001_EDX] =
2381 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2382 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2383 .features[FEAT_8000_0001_ECX] =
2384 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2385 .features[FEAT_7_0_EBX] =
2386 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2387 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2388 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2389 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2390 CPUID_7_0_EBX_AVX512ER,
2391 .features[FEAT_7_0_ECX] =
2392 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2393 .features[FEAT_7_0_EDX] =
2394 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2395 .features[FEAT_XSAVE] =
2396 CPUID_XSAVE_XSAVEOPT,
2397 .features[FEAT_6_EAX] =
2398 CPUID_6_EAX_ARAT,
2399 .xlevel = 0x80000008,
2400 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2401 },
2402 {
2403 .name = "Opteron_G1",
2404 .level = 5,
2405 .vendor = CPUID_VENDOR_AMD,
2406 .family = 15,
2407 .model = 6,
2408 .stepping = 1,
2409 .features[FEAT_1_EDX] =
2410 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2411 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2412 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2413 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2414 CPUID_DE | CPUID_FP87,
2415 .features[FEAT_1_ECX] =
2416 CPUID_EXT_SSE3,
2417 .features[FEAT_8000_0001_EDX] =
2418 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2419 .xlevel = 0x80000008,
2420 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2421 },
2422 {
2423 .name = "Opteron_G2",
2424 .level = 5,
2425 .vendor = CPUID_VENDOR_AMD,
2426 .family = 15,
2427 .model = 6,
2428 .stepping = 1,
2429 .features[FEAT_1_EDX] =
2430 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2431 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2432 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2433 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2434 CPUID_DE | CPUID_FP87,
2435 .features[FEAT_1_ECX] =
2436 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2437 /* Missing: CPUID_EXT2_RDTSCP */
2438 .features[FEAT_8000_0001_EDX] =
2439 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2440 .features[FEAT_8000_0001_ECX] =
2441 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2442 .xlevel = 0x80000008,
2443 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2444 },
2445 {
2446 .name = "Opteron_G3",
2447 .level = 5,
2448 .vendor = CPUID_VENDOR_AMD,
2449 .family = 16,
2450 .model = 2,
2451 .stepping = 3,
2452 .features[FEAT_1_EDX] =
2453 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2454 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2455 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2456 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2457 CPUID_DE | CPUID_FP87,
2458 .features[FEAT_1_ECX] =
2459 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2460 CPUID_EXT_SSE3,
2461 /* Missing: CPUID_EXT2_RDTSCP */
2462 .features[FEAT_8000_0001_EDX] =
2463 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2464 .features[FEAT_8000_0001_ECX] =
2465 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2466 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2467 .xlevel = 0x80000008,
2468 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2469 },
2470 {
2471 .name = "Opteron_G4",
2472 .level = 0xd,
2473 .vendor = CPUID_VENDOR_AMD,
2474 .family = 21,
2475 .model = 1,
2476 .stepping = 2,
2477 .features[FEAT_1_EDX] =
2478 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2479 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2480 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2481 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2482 CPUID_DE | CPUID_FP87,
2483 .features[FEAT_1_ECX] =
2484 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2485 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2486 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2487 CPUID_EXT_SSE3,
2488 /* Missing: CPUID_EXT2_RDTSCP */
2489 .features[FEAT_8000_0001_EDX] =
2490 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2491 CPUID_EXT2_SYSCALL,
2492 .features[FEAT_8000_0001_ECX] =
2493 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2494 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2495 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2496 CPUID_EXT3_LAHF_LM,
2497 /* no xsaveopt! */
2498 .xlevel = 0x8000001A,
2499 .model_id = "AMD Opteron 62xx class CPU",
2500 },
2501 {
2502 .name = "Opteron_G5",
2503 .level = 0xd,
2504 .vendor = CPUID_VENDOR_AMD,
2505 .family = 21,
2506 .model = 2,
2507 .stepping = 0,
2508 .features[FEAT_1_EDX] =
2509 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2510 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2511 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2512 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2513 CPUID_DE | CPUID_FP87,
2514 .features[FEAT_1_ECX] =
2515 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2516 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2517 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2518 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2519 /* Missing: CPUID_EXT2_RDTSCP */
2520 .features[FEAT_8000_0001_EDX] =
2521 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2522 CPUID_EXT2_SYSCALL,
2523 .features[FEAT_8000_0001_ECX] =
2524 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2525 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2526 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2527 CPUID_EXT3_LAHF_LM,
2528 /* no xsaveopt! */
2529 .xlevel = 0x8000001A,
2530 .model_id = "AMD Opteron 63xx class CPU",
2531 },
2532 {
2533 .name = "EPYC",
2534 .level = 0xd,
2535 .vendor = CPUID_VENDOR_AMD,
2536 .family = 23,
2537 .model = 1,
2538 .stepping = 2,
2539 .features[FEAT_1_EDX] =
2540 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2541 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2542 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2543 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2544 CPUID_VME | CPUID_FP87,
2545 .features[FEAT_1_ECX] =
2546 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2547 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2548 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2549 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2550 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2551 .features[FEAT_8000_0001_EDX] =
2552 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2553 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2554 CPUID_EXT2_SYSCALL,
2555 .features[FEAT_8000_0001_ECX] =
2556 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2557 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2558 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2559 .features[FEAT_7_0_EBX] =
2560 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2561 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2562 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2563 CPUID_7_0_EBX_SHA_NI,
2564 /* Missing: XSAVES (not supported by some Linux versions,
2565 * including v4.1 to v4.12).
2566 * KVM doesn't yet expose any XSAVES state save component.
2567 */
2568 .features[FEAT_XSAVE] =
2569 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2570 CPUID_XSAVE_XGETBV1,
2571 .features[FEAT_6_EAX] =
2572 CPUID_6_EAX_ARAT,
2573 .xlevel = 0x8000000A,
2574 .model_id = "AMD EPYC Processor",
2575 .cache_info = &epyc_cache_info,
2576 },
2577 {
2578 .name = "EPYC-IBPB",
2579 .level = 0xd,
2580 .vendor = CPUID_VENDOR_AMD,
2581 .family = 23,
2582 .model = 1,
2583 .stepping = 2,
2584 .features[FEAT_1_EDX] =
2585 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2586 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2587 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2588 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2589 CPUID_VME | CPUID_FP87,
2590 .features[FEAT_1_ECX] =
2591 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2592 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2593 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2594 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2595 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2596 .features[FEAT_8000_0001_EDX] =
2597 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2598 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2599 CPUID_EXT2_SYSCALL,
2600 .features[FEAT_8000_0001_ECX] =
2601 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2602 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2603 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2604 .features[FEAT_8000_0008_EBX] =
2605 CPUID_8000_0008_EBX_IBPB,
2606 .features[FEAT_7_0_EBX] =
2607 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2608 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2609 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2610 CPUID_7_0_EBX_SHA_NI,
2611 /* Missing: XSAVES (not supported by some Linux versions,
2612 * including v4.1 to v4.12).
2613 * KVM doesn't yet expose any XSAVES state save component.
2614 */
2615 .features[FEAT_XSAVE] =
2616 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2617 CPUID_XSAVE_XGETBV1,
2618 .features[FEAT_6_EAX] =
2619 CPUID_6_EAX_ARAT,
2620 .xlevel = 0x8000000A,
2621 .model_id = "AMD EPYC Processor (with IBPB)",
2622 .cache_info = &epyc_cache_info,
2623 },
2624 };
2625
2626 typedef struct PropValue {
2627 const char *prop, *value;
2628 } PropValue;
2629
2630 /* KVM-specific features that are automatically added/removed
2631 * from all CPU models when KVM is enabled.
2632 */
2633 static PropValue kvm_default_props[] = {
2634 { "kvmclock", "on" },
2635 { "kvm-nopiodelay", "on" },
2636 { "kvm-asyncpf", "on" },
2637 { "kvm-steal-time", "on" },
2638 { "kvm-pv-eoi", "on" },
2639 { "kvmclock-stable-bit", "on" },
2640 { "x2apic", "on" },
2641 { "acpi", "off" },
2642 { "monitor", "off" },
2643 { "svm", "off" },
2644 { NULL, NULL },
2645 };
2646
2647 /* TCG-specific defaults that override all CPU models when using TCG
2648 */
2649 static PropValue tcg_default_props[] = {
2650 { "vme", "off" },
2651 { NULL, NULL },
2652 };
2653
2654
2655 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2656 {
2657 PropValue *pv;
2658 for (pv = kvm_default_props; pv->prop; pv++) {
2659 if (!strcmp(pv->prop, prop)) {
2660 pv->value = value;
2661 break;
2662 }
2663 }
2664
2665 /* It is valid to call this function only for properties that
2666 * are already present in the kvm_default_props table.
2667 */
2668 assert(pv->prop);
2669 }
2670
2671 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2672 bool migratable_only);
2673
2674 static bool lmce_supported(void)
2675 {
2676 uint64_t mce_cap = 0;
2677
2678 #ifdef CONFIG_KVM
2679 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2680 return false;
2681 }
2682 #endif
2683
2684 return !!(mce_cap & MCG_LMCE_P);
2685 }
2686
2687 #define CPUID_MODEL_ID_SZ 48
2688
2689 /**
2690 * cpu_x86_fill_model_id:
2691 * Get CPUID model ID string from host CPU.
2692 *
2693 * @str should have at least CPUID_MODEL_ID_SZ bytes
2694 *
2695 * The function does NOT add a null terminator to the string
2696 * automatically.
2697 */
2698 static int cpu_x86_fill_model_id(char *str)
2699 {
2700 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2701 int i;
2702
2703 for (i = 0; i < 3; i++) {
2704 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2705 memcpy(str + i * 16 + 0, &eax, 4);
2706 memcpy(str + i * 16 + 4, &ebx, 4);
2707 memcpy(str + i * 16 + 8, &ecx, 4);
2708 memcpy(str + i * 16 + 12, &edx, 4);
2709 }
2710 return 0;
2711 }
2712
2713 static Property max_x86_cpu_properties[] = {
2714 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2715 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2716 DEFINE_PROP_END_OF_LIST()
2717 };
2718
2719 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2720 {
2721 DeviceClass *dc = DEVICE_CLASS(oc);
2722 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2723
2724 xcc->ordering = 9;
2725
2726 xcc->model_description =
2727 "Enables all features supported by the accelerator in the current host";
2728
2729 dc->props = max_x86_cpu_properties;
2730 }
2731
2732 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2733
2734 static void max_x86_cpu_initfn(Object *obj)
2735 {
2736 X86CPU *cpu = X86_CPU(obj);
2737 CPUX86State *env = &cpu->env;
2738 KVMState *s = kvm_state;
2739
2740 /* We can't fill the features array here because we don't know yet if
2741 * "migratable" is true or false.
2742 */
2743 cpu->max_features = true;
2744
2745 if (accel_uses_host_cpuid()) {
2746 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2747 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2748 int family, model, stepping;
2749 X86CPUDefinition host_cpudef = { };
2750 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2751
2752 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2753 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2754
2755 host_vendor_fms(vendor, &family, &model, &stepping);
2756
2757 cpu_x86_fill_model_id(model_id);
2758
2759 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2760 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2761 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2762 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2763 &error_abort);
2764 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2765 &error_abort);
2766
2767 if (kvm_enabled()) {
2768 env->cpuid_min_level =
2769 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2770 env->cpuid_min_xlevel =
2771 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2772 env->cpuid_min_xlevel2 =
2773 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2774 } else {
2775 env->cpuid_min_level =
2776 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2777 env->cpuid_min_xlevel =
2778 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2779 env->cpuid_min_xlevel2 =
2780 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2781 }
2782
2783 if (lmce_supported()) {
2784 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2785 }
2786 } else {
2787 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2788 "vendor", &error_abort);
2789 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2790 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2791 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2792 object_property_set_str(OBJECT(cpu),
2793 "QEMU TCG CPU version " QEMU_HW_VERSION,
2794 "model-id", &error_abort);
2795 }
2796
2797 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2798 }
2799
2800 static const TypeInfo max_x86_cpu_type_info = {
2801 .name = X86_CPU_TYPE_NAME("max"),
2802 .parent = TYPE_X86_CPU,
2803 .instance_init = max_x86_cpu_initfn,
2804 .class_init = max_x86_cpu_class_init,
2805 };
2806
2807 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2808 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2809 {
2810 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2811
2812 xcc->host_cpuid_required = true;
2813 xcc->ordering = 8;
2814
2815 if (kvm_enabled()) {
2816 xcc->model_description =
2817 "KVM processor with all supported host features ";
2818 } else if (hvf_enabled()) {
2819 xcc->model_description =
2820 "HVF processor with all supported host features ";
2821 }
2822 }
2823
2824 static const TypeInfo host_x86_cpu_type_info = {
2825 .name = X86_CPU_TYPE_NAME("host"),
2826 .parent = X86_CPU_TYPE_NAME("max"),
2827 .class_init = host_x86_cpu_class_init,
2828 };
2829
2830 #endif
2831
2832 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2833 {
2834 FeatureWordInfo *f = &feature_word_info[w];
2835 int i;
2836
2837 for (i = 0; i < 32; ++i) {
2838 if ((1UL << i) & mask) {
2839 const char *reg = get_register_name_32(f->cpuid_reg);
2840 assert(reg);
2841 warn_report("%s doesn't support requested feature: "
2842 "CPUID.%02XH:%s%s%s [bit %d]",
2843 accel_uses_host_cpuid() ? "host" : "TCG",
2844 f->cpuid_eax, reg,
2845 f->feat_names[i] ? "." : "",
2846 f->feat_names[i] ? f->feat_names[i] : "", i);
2847 }
2848 }
2849 }
2850
2851 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2852 const char *name, void *opaque,
2853 Error **errp)
2854 {
2855 X86CPU *cpu = X86_CPU(obj);
2856 CPUX86State *env = &cpu->env;
2857 int64_t value;
2858
2859 value = (env->cpuid_version >> 8) & 0xf;
2860 if (value == 0xf) {
2861 value += (env->cpuid_version >> 20) & 0xff;
2862 }
2863 visit_type_int(v, name, &value, errp);
2864 }
2865
2866 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2867 const char *name, void *opaque,
2868 Error **errp)
2869 {
2870 X86CPU *cpu = X86_CPU(obj);
2871 CPUX86State *env = &cpu->env;
2872 const int64_t min = 0;
2873 const int64_t max = 0xff + 0xf;
2874 Error *local_err = NULL;
2875 int64_t value;
2876
2877 visit_type_int(v, name, &value, &local_err);
2878 if (local_err) {
2879 error_propagate(errp, local_err);
2880 return;
2881 }
2882 if (value < min || value > max) {
2883 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2884 name ? name : "null", value, min, max);
2885 return;
2886 }
2887
2888 env->cpuid_version &= ~0xff00f00;
2889 if (value > 0x0f) {
2890 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2891 } else {
2892 env->cpuid_version |= value << 8;
2893 }
2894 }
2895
2896 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2897 const char *name, void *opaque,
2898 Error **errp)
2899 {
2900 X86CPU *cpu = X86_CPU(obj);
2901 CPUX86State *env = &cpu->env;
2902 int64_t value;
2903
2904 value = (env->cpuid_version >> 4) & 0xf;
2905 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2906 visit_type_int(v, name, &value, errp);
2907 }
2908
2909 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2910 const char *name, void *opaque,
2911 Error **errp)
2912 {
2913 X86CPU *cpu = X86_CPU(obj);
2914 CPUX86State *env = &cpu->env;
2915 const int64_t min = 0;
2916 const int64_t max = 0xff;
2917 Error *local_err = NULL;
2918 int64_t value;
2919
2920 visit_type_int(v, name, &value, &local_err);
2921 if (local_err) {
2922 error_propagate(errp, local_err);
2923 return;
2924 }
2925 if (value < min || value > max) {
2926 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2927 name ? name : "null", value, min, max);
2928 return;
2929 }
2930
2931 env->cpuid_version &= ~0xf00f0;
2932 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2933 }
2934
2935 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2936 const char *name, void *opaque,
2937 Error **errp)
2938 {
2939 X86CPU *cpu = X86_CPU(obj);
2940 CPUX86State *env = &cpu->env;
2941 int64_t value;
2942
2943 value = env->cpuid_version & 0xf;
2944 visit_type_int(v, name, &value, errp);
2945 }
2946
2947 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2948 const char *name, void *opaque,
2949 Error **errp)
2950 {
2951 X86CPU *cpu = X86_CPU(obj);
2952 CPUX86State *env = &cpu->env;
2953 const int64_t min = 0;
2954 const int64_t max = 0xf;
2955 Error *local_err = NULL;
2956 int64_t value;
2957
2958 visit_type_int(v, name, &value, &local_err);
2959 if (local_err) {
2960 error_propagate(errp, local_err);
2961 return;
2962 }
2963 if (value < min || value > max) {
2964 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2965 name ? name : "null", value, min, max);
2966 return;
2967 }
2968
2969 env->cpuid_version &= ~0xf;
2970 env->cpuid_version |= value & 0xf;
2971 }
2972
2973 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2974 {
2975 X86CPU *cpu = X86_CPU(obj);
2976 CPUX86State *env = &cpu->env;
2977 char *value;
2978
2979 value = g_malloc(CPUID_VENDOR_SZ + 1);
2980 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2981 env->cpuid_vendor3);
2982 return value;
2983 }
2984
2985 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2986 Error **errp)
2987 {
2988 X86CPU *cpu = X86_CPU(obj);
2989 CPUX86State *env = &cpu->env;
2990 int i;
2991
2992 if (strlen(value) != CPUID_VENDOR_SZ) {
2993 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2994 return;
2995 }
2996
2997 env->cpuid_vendor1 = 0;
2998 env->cpuid_vendor2 = 0;
2999 env->cpuid_vendor3 = 0;
3000 for (i = 0; i < 4; i++) {
3001 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3002 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3003 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3004 }
3005 }
3006
3007 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3008 {
3009 X86CPU *cpu = X86_CPU(obj);
3010 CPUX86State *env = &cpu->env;
3011 char *value;
3012 int i;
3013
3014 value = g_malloc(48 + 1);
3015 for (i = 0; i < 48; i++) {
3016 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3017 }
3018 value[48] = '\0';
3019 return value;
3020 }
3021
3022 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3023 Error **errp)
3024 {
3025 X86CPU *cpu = X86_CPU(obj);
3026 CPUX86State *env = &cpu->env;
3027 int c, len, i;
3028
3029 if (model_id == NULL) {
3030 model_id = "";
3031 }
3032 len = strlen(model_id);
3033 memset(env->cpuid_model, 0, 48);
3034 for (i = 0; i < 48; i++) {
3035 if (i >= len) {
3036 c = '\0';
3037 } else {
3038 c = (uint8_t)model_id[i];
3039 }
3040 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3041 }
3042 }
3043
3044 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3045 void *opaque, Error **errp)
3046 {
3047 X86CPU *cpu = X86_CPU(obj);
3048 int64_t value;
3049
3050 value = cpu->env.tsc_khz * 1000;
3051 visit_type_int(v, name, &value, errp);
3052 }
3053
3054 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3055 void *opaque, Error **errp)
3056 {
3057 X86CPU *cpu = X86_CPU(obj);
3058 const int64_t min = 0;
3059 const int64_t max = INT64_MAX;
3060 Error *local_err = NULL;
3061 int64_t value;
3062
3063 visit_type_int(v, name, &value, &local_err);
3064 if (local_err) {
3065 error_propagate(errp, local_err);
3066 return;
3067 }
3068 if (value < min || value > max) {
3069 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3070 name ? name : "null", value, min, max);
3071 return;
3072 }
3073
3074 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3075 }
3076
3077 /* Generic getter for "feature-words" and "filtered-features" properties */
3078 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3079 const char *name, void *opaque,
3080 Error **errp)
3081 {
3082 uint32_t *array = (uint32_t *)opaque;
3083 FeatureWord w;
3084 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3085 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3086 X86CPUFeatureWordInfoList *list = NULL;
3087
3088 for (w = 0; w < FEATURE_WORDS; w++) {
3089 FeatureWordInfo *wi = &feature_word_info[w];
3090 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3091 qwi->cpuid_input_eax = wi->cpuid_eax;
3092 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
3093 qwi->cpuid_input_ecx = wi->cpuid_ecx;
3094 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
3095 qwi->features = array[w];
3096
3097 /* List will be in reverse order, but order shouldn't matter */
3098 list_entries[w].next = list;
3099 list_entries[w].value = &word_infos[w];
3100 list = &list_entries[w];
3101 }
3102
3103 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3104 }
3105
3106 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3107 void *opaque, Error **errp)
3108 {
3109 X86CPU *cpu = X86_CPU(obj);
3110 int64_t value = cpu->hyperv_spinlock_attempts;
3111
3112 visit_type_int(v, name, &value, errp);
3113 }
3114
3115 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
3116 void *opaque, Error **errp)
3117 {
3118 const int64_t min = 0xFFF;
3119 const int64_t max = UINT_MAX;
3120 X86CPU *cpu = X86_CPU(obj);
3121 Error *err = NULL;
3122 int64_t value;
3123
3124 visit_type_int(v, name, &value, &err);
3125 if (err) {
3126 error_propagate(errp, err);
3127 return;
3128 }
3129
3130 if (value < min || value > max) {
3131 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
3132 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
3133 object_get_typename(obj), name ? name : "null",
3134 value, min, max);
3135 return;
3136 }
3137 cpu->hyperv_spinlock_attempts = value;
3138 }
3139
3140 static const PropertyInfo qdev_prop_spinlocks = {
3141 .name = "int",
3142 .get = x86_get_hv_spinlocks,
3143 .set = x86_set_hv_spinlocks,
3144 };
3145
3146 /* Convert all '_' in a feature string option name to '-', to make feature
3147 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3148 */
3149 static inline void feat2prop(char *s)
3150 {
3151 while ((s = strchr(s, '_'))) {
3152 *s = '-';
3153 }
3154 }
3155
3156 /* Return the feature property name for a feature flag bit */
3157 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3158 {
3159 /* XSAVE components are automatically enabled by other features,
3160 * so return the original feature name instead
3161 */
3162 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3163 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3164
3165 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3166 x86_ext_save_areas[comp].bits) {
3167 w = x86_ext_save_areas[comp].feature;
3168 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3169 }
3170 }
3171
3172 assert(bitnr < 32);
3173 assert(w < FEATURE_WORDS);
3174 return feature_word_info[w].feat_names[bitnr];
3175 }
3176
3177 /* Compatibily hack to maintain legacy +-feat semantic,
3178 * where +-feat overwrites any feature set by
3179 * feat=on|feat even if the later is parsed after +-feat
3180 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3181 */
3182 static GList *plus_features, *minus_features;
3183
3184 static gint compare_string(gconstpointer a, gconstpointer b)
3185 {
3186 return g_strcmp0(a, b);
3187 }
3188
3189 /* Parse "+feature,-feature,feature=foo" CPU feature string
3190 */
3191 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3192 Error **errp)
3193 {
3194 char *featurestr; /* Single 'key=value" string being parsed */
3195 static bool cpu_globals_initialized;
3196 bool ambiguous = false;
3197
3198 if (cpu_globals_initialized) {
3199 return;
3200 }
3201 cpu_globals_initialized = true;
3202
3203 if (!features) {
3204 return;
3205 }
3206
3207 for (featurestr = strtok(features, ",");
3208 featurestr;
3209 featurestr = strtok(NULL, ",")) {
3210 const char *name;
3211 const char *val = NULL;
3212 char *eq = NULL;
3213 char num[32];
3214 GlobalProperty *prop;
3215
3216 /* Compatibility syntax: */
3217 if (featurestr[0] == '+') {
3218 plus_features = g_list_append(plus_features,
3219 g_strdup(featurestr + 1));
3220 continue;
3221 } else if (featurestr[0] == '-') {
3222 minus_features = g_list_append(minus_features,
3223 g_strdup(featurestr + 1));
3224 continue;
3225 }
3226
3227 eq = strchr(featurestr, '=');
3228 if (eq) {
3229 *eq++ = 0;
3230 val = eq;
3231 } else {
3232 val = "on";
3233 }
3234
3235 feat2prop(featurestr);
3236 name = featurestr;
3237
3238 if (g_list_find_custom(plus_features, name, compare_string)) {
3239 warn_report("Ambiguous CPU model string. "
3240 "Don't mix both \"+%s\" and \"%s=%s\"",
3241 name, name, val);
3242 ambiguous = true;
3243 }
3244 if (g_list_find_custom(minus_features, name, compare_string)) {
3245 warn_report("Ambiguous CPU model string. "
3246 "Don't mix both \"-%s\" and \"%s=%s\"",
3247 name, name, val);
3248 ambiguous = true;
3249 }
3250
3251 /* Special case: */
3252 if (!strcmp(name, "tsc-freq")) {
3253 int ret;
3254 uint64_t tsc_freq;
3255
3256 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3257 if (ret < 0 || tsc_freq > INT64_MAX) {
3258 error_setg(errp, "bad numerical value %s", val);
3259 return;
3260 }
3261 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3262 val = num;
3263 name = "tsc-frequency";
3264 }
3265
3266 prop = g_new0(typeof(*prop), 1);
3267 prop->driver = typename;
3268 prop->property = g_strdup(name);
3269 prop->value = g_strdup(val);
3270 prop->errp = &error_fatal;
3271 qdev_prop_register_global(prop);
3272 }
3273
3274 if (ambiguous) {
3275 warn_report("Compatibility of ambiguous CPU model "
3276 "strings won't be kept on future QEMU versions");
3277 }
3278 }
3279
3280 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3281 static int x86_cpu_filter_features(X86CPU *cpu);
3282
3283 /* Check for missing features that may prevent the CPU class from
3284 * running using the current machine and accelerator.
3285 */
3286 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3287 strList **missing_feats)
3288 {
3289 X86CPU *xc;
3290 FeatureWord w;
3291 Error *err = NULL;
3292 strList **next = missing_feats;
3293
3294 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3295 strList *new = g_new0(strList, 1);
3296 new->value = g_strdup("kvm");
3297 *missing_feats = new;
3298 return;
3299 }
3300
3301 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3302
3303 x86_cpu_expand_features(xc, &err);
3304 if (err) {
3305 /* Errors at x86_cpu_expand_features should never happen,
3306 * but in case it does, just report the model as not
3307 * runnable at all using the "type" property.
3308 */
3309 strList *new = g_new0(strList, 1);
3310 new->value = g_strdup("type");
3311 *next = new;
3312 next = &new->next;
3313 }
3314
3315 x86_cpu_filter_features(xc);
3316
3317 for (w = 0; w < FEATURE_WORDS; w++) {
3318 uint32_t filtered = xc->filtered_features[w];
3319 int i;
3320 for (i = 0; i < 32; i++) {
3321 if (filtered & (1UL << i)) {
3322 strList *new = g_new0(strList, 1);
3323 new->value = g_strdup(x86_cpu_feature_name(w, i));
3324 *next = new;
3325 next = &new->next;
3326 }
3327 }
3328 }
3329
3330 object_unref(OBJECT(xc));
3331 }
3332
3333 /* Print all cpuid feature names in featureset
3334 */
3335 static void listflags(FILE *f, fprintf_function print, GList *features)
3336 {
3337 size_t len = 0;
3338 GList *tmp;
3339
3340 for (tmp = features; tmp; tmp = tmp->next) {
3341 const char *name = tmp->data;
3342 if ((len + strlen(name) + 1) >= 75) {
3343 print(f, "\n");
3344 len = 0;
3345 }
3346 print(f, "%s%s", len == 0 ? " " : " ", name);
3347 len += strlen(name) + 1;
3348 }
3349 print(f, "\n");
3350 }
3351
3352 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3353 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3354 {
3355 ObjectClass *class_a = (ObjectClass *)a;
3356 ObjectClass *class_b = (ObjectClass *)b;
3357 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3358 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3359 char *name_a, *name_b;
3360 int ret;
3361
3362 if (cc_a->ordering != cc_b->ordering) {
3363 ret = cc_a->ordering - cc_b->ordering;
3364 } else {
3365 name_a = x86_cpu_class_get_model_name(cc_a);
3366 name_b = x86_cpu_class_get_model_name(cc_b);
3367 ret = strcmp(name_a, name_b);
3368 g_free(name_a);
3369 g_free(name_b);
3370 }
3371 return ret;
3372 }
3373
3374 static GSList *get_sorted_cpu_model_list(void)
3375 {
3376 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3377 list = g_slist_sort(list, x86_cpu_list_compare);
3378 return list;
3379 }
3380
3381 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3382 {
3383 ObjectClass *oc = data;
3384 X86CPUClass *cc = X86_CPU_CLASS(oc);
3385 CPUListState *s = user_data;
3386 char *name = x86_cpu_class_get_model_name(cc);
3387 const char *desc = cc->model_description;
3388 if (!desc && cc->cpu_def) {
3389 desc = cc->cpu_def->model_id;
3390 }
3391
3392 (*s->cpu_fprintf)(s->file, "x86 %-20s %-48s\n",
3393 name, desc);
3394 g_free(name);
3395 }
3396
3397 /* list available CPU models and flags */
3398 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3399 {
3400 int i, j;
3401 CPUListState s = {
3402 .file = f,
3403 .cpu_fprintf = cpu_fprintf,
3404 };
3405 GSList *list;
3406 GList *names = NULL;
3407
3408 (*cpu_fprintf)(f, "Available CPUs:\n");
3409 list = get_sorted_cpu_model_list();
3410 g_slist_foreach(list, x86_cpu_list_entry, &s);
3411 g_slist_free(list);
3412
3413 names = NULL;
3414 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3415 FeatureWordInfo *fw = &feature_word_info[i];
3416 for (j = 0; j < 32; j++) {
3417 if (fw->feat_names[j]) {
3418 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3419 }
3420 }
3421 }
3422
3423 names = g_list_sort(names, (GCompareFunc)strcmp);
3424
3425 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3426 listflags(f, cpu_fprintf, names);
3427 (*cpu_fprintf)(f, "\n");
3428 g_list_free(names);
3429 }
3430
3431 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3432 {
3433 ObjectClass *oc = data;
3434 X86CPUClass *cc = X86_CPU_CLASS(oc);
3435 CpuDefinitionInfoList **cpu_list = user_data;
3436 CpuDefinitionInfoList *entry;
3437 CpuDefinitionInfo *info;
3438
3439 info = g_malloc0(sizeof(*info));
3440 info->name = x86_cpu_class_get_model_name(cc);
3441 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3442 info->has_unavailable_features = true;
3443 info->q_typename = g_strdup(object_class_get_name(oc));
3444 info->migration_safe = cc->migration_safe;
3445 info->has_migration_safe = true;
3446 info->q_static = cc->static_model;
3447
3448 entry = g_malloc0(sizeof(*entry));
3449 entry->value = info;
3450 entry->next = *cpu_list;
3451 *cpu_list = entry;
3452 }
3453
3454 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3455 {
3456 CpuDefinitionInfoList *cpu_list = NULL;
3457 GSList *list = get_sorted_cpu_model_list();
3458 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3459 g_slist_free(list);
3460 return cpu_list;
3461 }
3462
3463 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3464 bool migratable_only)
3465 {
3466 FeatureWordInfo *wi = &feature_word_info[w];
3467 uint32_t r;
3468
3469 if (kvm_enabled()) {
3470 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3471 wi->cpuid_ecx,
3472 wi->cpuid_reg);
3473 } else if (hvf_enabled()) {
3474 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3475 wi->cpuid_ecx,
3476 wi->cpuid_reg);
3477 } else if (tcg_enabled()) {
3478 r = wi->tcg_features;
3479 } else {
3480 return ~0;
3481 }
3482 if (migratable_only) {
3483 r &= x86_cpu_get_migratable_flags(w);
3484 }
3485 return r;
3486 }
3487
3488 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3489 {
3490 FeatureWord w;
3491
3492 for (w = 0; w < FEATURE_WORDS; w++) {
3493 report_unavailable_features(w, cpu->filtered_features[w]);
3494 }
3495 }
3496
3497 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3498 {
3499 PropValue *pv;
3500 for (pv = props; pv->prop; pv++) {
3501 if (!pv->value) {
3502 continue;
3503 }
3504 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3505 &error_abort);
3506 }
3507 }
3508
3509 /* Load data from X86CPUDefinition into a X86CPU object
3510 */
3511 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3512 {
3513 CPUX86State *env = &cpu->env;
3514 const char *vendor;
3515 char host_vendor[CPUID_VENDOR_SZ + 1];
3516 FeatureWord w;
3517
3518 /*NOTE: any property set by this function should be returned by
3519 * x86_cpu_static_props(), so static expansion of
3520 * query-cpu-model-expansion is always complete.
3521 */
3522
3523 /* CPU models only set _minimum_ values for level/xlevel: */
3524 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3525 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3526
3527 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3528 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3529 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3530 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3531 for (w = 0; w < FEATURE_WORDS; w++) {
3532 env->features[w] = def->features[w];
3533 }
3534
3535 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3536 cpu->legacy_cache = !def->cache_info;
3537
3538 /* Special cases not set in the X86CPUDefinition structs: */
3539 /* TODO: in-kernel irqchip for hvf */
3540 if (kvm_enabled()) {
3541 if (!kvm_irqchip_in_kernel()) {
3542 x86_cpu_change_kvm_default("x2apic", "off");
3543 }
3544
3545 x86_cpu_apply_props(cpu, kvm_default_props);
3546 } else if (tcg_enabled()) {
3547 x86_cpu_apply_props(cpu, tcg_default_props);
3548 }
3549
3550 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3551
3552 /* sysenter isn't supported in compatibility mode on AMD,
3553 * syscall isn't supported in compatibility mode on Intel.
3554 * Normally we advertise the actual CPU vendor, but you can
3555 * override this using the 'vendor' property if you want to use
3556 * KVM's sysenter/syscall emulation in compatibility mode and
3557 * when doing cross vendor migration
3558 */
3559 vendor = def->vendor;
3560 if (accel_uses_host_cpuid()) {
3561 uint32_t ebx = 0, ecx = 0, edx = 0;
3562 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3563 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3564 vendor = host_vendor;
3565 }
3566
3567 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3568
3569 }
3570
3571 /* Return a QDict containing keys for all properties that can be included
3572 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3573 * must be included in the dictionary.
3574 */
3575 static QDict *x86_cpu_static_props(void)
3576 {
3577 FeatureWord w;
3578 int i;
3579 static const char *props[] = {
3580 "min-level",
3581 "min-xlevel",
3582 "family",
3583 "model",
3584 "stepping",
3585 "model-id",
3586 "vendor",
3587 "lmce",
3588 NULL,
3589 };
3590 static QDict *d;
3591
3592 if (d) {
3593 return d;
3594 }
3595
3596 d = qdict_new();
3597 for (i = 0; props[i]; i++) {
3598 qdict_put_null(d, props[i]);
3599 }
3600
3601 for (w = 0; w < FEATURE_WORDS; w++) {
3602 FeatureWordInfo *fi = &feature_word_info[w];
3603 int bit;
3604 for (bit = 0; bit < 32; bit++) {
3605 if (!fi->feat_names[bit]) {
3606 continue;
3607 }
3608 qdict_put_null(d, fi->feat_names[bit]);
3609 }
3610 }
3611
3612 return d;
3613 }
3614
3615 /* Add an entry to @props dict, with the value for property. */
3616 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3617 {
3618 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3619 &error_abort);
3620
3621 qdict_put_obj(props, prop, value);
3622 }
3623
3624 /* Convert CPU model data from X86CPU object to a property dictionary
3625 * that can recreate exactly the same CPU model.
3626 */
3627 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3628 {
3629 QDict *sprops = x86_cpu_static_props();
3630 const QDictEntry *e;
3631
3632 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3633 const char *prop = qdict_entry_key(e);
3634 x86_cpu_expand_prop(cpu, props, prop);
3635 }
3636 }
3637
3638 /* Convert CPU model data from X86CPU object to a property dictionary
3639 * that can recreate exactly the same CPU model, including every
3640 * writeable QOM property.
3641 */
3642 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3643 {
3644 ObjectPropertyIterator iter;
3645 ObjectProperty *prop;
3646
3647 object_property_iter_init(&iter, OBJECT(cpu));
3648 while ((prop = object_property_iter_next(&iter))) {
3649 /* skip read-only or write-only properties */
3650 if (!prop->get || !prop->set) {
3651 continue;
3652 }
3653
3654 /* "hotplugged" is the only property that is configurable
3655 * on the command-line but will be set differently on CPUs
3656 * created using "-cpu ... -smp ..." and by CPUs created
3657 * on the fly by x86_cpu_from_model() for querying. Skip it.
3658 */
3659 if (!strcmp(prop->name, "hotplugged")) {
3660 continue;
3661 }
3662 x86_cpu_expand_prop(cpu, props, prop->name);
3663 }
3664 }
3665
3666 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3667 {
3668 const QDictEntry *prop;
3669 Error *err = NULL;
3670
3671 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3672 object_property_set_qobject(obj, qdict_entry_value(prop),
3673 qdict_entry_key(prop), &err);
3674 if (err) {
3675 break;
3676 }
3677 }
3678
3679 error_propagate(errp, err);
3680 }
3681
3682 /* Create X86CPU object according to model+props specification */
3683 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3684 {
3685 X86CPU *xc = NULL;
3686 X86CPUClass *xcc;
3687 Error *err = NULL;
3688
3689 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3690 if (xcc == NULL) {
3691 error_setg(&err, "CPU model '%s' not found", model);
3692 goto out;
3693 }
3694
3695 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3696 if (props) {
3697 object_apply_props(OBJECT(xc), props, &err);
3698 if (err) {
3699 goto out;
3700 }
3701 }
3702
3703 x86_cpu_expand_features(xc, &err);
3704 if (err) {
3705 goto out;
3706 }
3707
3708 out:
3709 if (err) {
3710 error_propagate(errp, err);
3711 object_unref(OBJECT(xc));
3712 xc = NULL;
3713 }
3714 return xc;
3715 }
3716
3717 CpuModelExpansionInfo *
3718 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3719 CpuModelInfo *model,
3720 Error **errp)
3721 {
3722 X86CPU *xc = NULL;
3723 Error *err = NULL;
3724 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3725 QDict *props = NULL;
3726 const char *base_name;
3727
3728 xc = x86_cpu_from_model(model->name,
3729 model->has_props ?
3730 qobject_to(QDict, model->props) :
3731 NULL, &err);
3732 if (err) {
3733 goto out;
3734 }
3735
3736 props = qdict_new();
3737
3738 switch (type) {
3739 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3740 /* Static expansion will be based on "base" only */
3741 base_name = "base";
3742 x86_cpu_to_dict(xc, props);
3743 break;
3744 case CPU_MODEL_EXPANSION_TYPE_FULL:
3745 /* As we don't return every single property, full expansion needs
3746 * to keep the original model name+props, and add extra
3747 * properties on top of that.
3748 */
3749 base_name = model->name;
3750 x86_cpu_to_dict_full(xc, props);
3751 break;
3752 default:
3753 error_setg(&err, "Unsupportted expansion type");
3754 goto out;
3755 }
3756
3757 if (!props) {
3758 props = qdict_new();
3759 }
3760 x86_cpu_to_dict(xc, props);
3761
3762 ret->model = g_new0(CpuModelInfo, 1);
3763 ret->model->name = g_strdup(base_name);
3764 ret->model->props = QOBJECT(props);
3765 ret->model->has_props = true;
3766
3767 out:
3768 object_unref(OBJECT(xc));
3769 if (err) {
3770 error_propagate(errp, err);
3771 qapi_free_CpuModelExpansionInfo(ret);
3772 ret = NULL;
3773 }
3774 return ret;
3775 }
3776
3777 static gchar *x86_gdb_arch_name(CPUState *cs)
3778 {
3779 #ifdef TARGET_X86_64
3780 return g_strdup("i386:x86-64");
3781 #else
3782 return g_strdup("i386");
3783 #endif
3784 }
3785
3786 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3787 {
3788 X86CPUDefinition *cpudef = data;
3789 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3790
3791 xcc->cpu_def = cpudef;
3792 xcc->migration_safe = true;
3793 }
3794
3795 static void x86_register_cpudef_type(X86CPUDefinition *def)
3796 {
3797 char *typename = x86_cpu_type_name(def->name);
3798 TypeInfo ti = {
3799 .name = typename,
3800 .parent = TYPE_X86_CPU,
3801 .class_init = x86_cpu_cpudef_class_init,
3802 .class_data = def,
3803 };
3804
3805 /* AMD aliases are handled at runtime based on CPUID vendor, so
3806 * they shouldn't be set on the CPU model table.
3807 */
3808 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3809 /* catch mistakes instead of silently truncating model_id when too long */
3810 assert(def->model_id && strlen(def->model_id) <= 48);
3811
3812
3813 type_register(&ti);
3814 g_free(typename);
3815 }
3816
3817 #if !defined(CONFIG_USER_ONLY)
3818
3819 void cpu_clear_apic_feature(CPUX86State *env)
3820 {
3821 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3822 }
3823
3824 #endif /* !CONFIG_USER_ONLY */
3825
3826 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3827 uint32_t *eax, uint32_t *ebx,
3828 uint32_t *ecx, uint32_t *edx)
3829 {
3830 X86CPU *cpu = x86_env_get_cpu(env);
3831 CPUState *cs = CPU(cpu);
3832 uint32_t pkg_offset;
3833 uint32_t limit;
3834 uint32_t signature[3];
3835
3836 /* Calculate & apply limits for different index ranges */
3837 if (index >= 0xC0000000) {
3838 limit = env->cpuid_xlevel2;
3839 } else if (index >= 0x80000000) {
3840 limit = env->cpuid_xlevel;
3841 } else if (index >= 0x40000000) {
3842 limit = 0x40000001;
3843 } else {
3844 limit = env->cpuid_level;
3845 }
3846
3847 if (index > limit) {
3848 /* Intel documentation states that invalid EAX input will
3849 * return the same information as EAX=cpuid_level
3850 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3851 */
3852 index = env->cpuid_level;
3853 }
3854
3855 switch(index) {
3856 case 0:
3857 *eax = env->cpuid_level;
3858 *ebx = env->cpuid_vendor1;
3859 *edx = env->cpuid_vendor2;
3860 *ecx = env->cpuid_vendor3;
3861 break;
3862 case 1:
3863 *eax = env->cpuid_version;
3864 *ebx = (cpu->apic_id << 24) |
3865 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3866 *ecx = env->features[FEAT_1_ECX];
3867 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3868 *ecx |= CPUID_EXT_OSXSAVE;
3869 }
3870 *edx = env->features[FEAT_1_EDX];
3871 if (cs->nr_cores * cs->nr_threads > 1) {
3872 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3873 *edx |= CPUID_HT;
3874 }
3875 break;
3876 case 2:
3877 /* cache info: needed for Pentium Pro compatibility */
3878 if (cpu->cache_info_passthrough) {
3879 host_cpuid(index, 0, eax, ebx, ecx, edx);
3880 break;
3881 }
3882 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3883 *ebx = 0;
3884 if (!cpu->enable_l3_cache) {
3885 *ecx = 0;
3886 } else {
3887 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
3888 }
3889 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
3890 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
3891 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
3892 break;
3893 case 4:
3894 /* cache info: needed for Core compatibility */
3895 if (cpu->cache_info_passthrough) {
3896 host_cpuid(index, count, eax, ebx, ecx, edx);
3897 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3898 *eax &= ~0xFC000000;
3899 if ((*eax & 31) && cs->nr_cores > 1) {
3900 *eax |= (cs->nr_cores - 1) << 26;
3901 }
3902 } else {
3903 *eax = 0;
3904 switch (count) {
3905 case 0: /* L1 dcache info */
3906 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
3907 1, cs->nr_cores,
3908 eax, ebx, ecx, edx);
3909 break;
3910 case 1: /* L1 icache info */
3911 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
3912 1, cs->nr_cores,
3913 eax, ebx, ecx, edx);
3914 break;
3915 case 2: /* L2 cache info */
3916 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
3917 cs->nr_threads, cs->nr_cores,
3918 eax, ebx, ecx, edx);
3919 break;
3920 case 3: /* L3 cache info */
3921 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3922 if (cpu->enable_l3_cache) {
3923 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
3924 (1 << pkg_offset), cs->nr_cores,
3925 eax, ebx, ecx, edx);
3926 break;
3927 }
3928 /* fall through */
3929 default: /* end of info */
3930 *eax = *ebx = *ecx = *edx = 0;
3931 break;
3932 }
3933 }
3934 break;
3935 case 5:
3936 /* mwait info: needed for Core compatibility */
3937 *eax = 0; /* Smallest monitor-line size in bytes */
3938 *ebx = 0; /* Largest monitor-line size in bytes */
3939 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3940 *edx = 0;
3941 break;
3942 case 6:
3943 /* Thermal and Power Leaf */
3944 *eax = env->features[FEAT_6_EAX];
3945 *ebx = 0;
3946 *ecx = 0;
3947 *edx = 0;
3948 break;
3949 case 7:
3950 /* Structured Extended Feature Flags Enumeration Leaf */
3951 if (count == 0) {
3952 *eax = 0; /* Maximum ECX value for sub-leaves */
3953 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3954 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3955 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3956 *ecx |= CPUID_7_0_ECX_OSPKE;
3957 }
3958 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3959 } else {
3960 *eax = 0;
3961 *ebx = 0;
3962 *ecx = 0;
3963 *edx = 0;
3964 }
3965 break;
3966 case 9:
3967 /* Direct Cache Access Information Leaf */
3968 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3969 *ebx = 0;
3970 *ecx = 0;
3971 *edx = 0;
3972 break;
3973 case 0xA:
3974 /* Architectural Performance Monitoring Leaf */
3975 if (kvm_enabled() && cpu->enable_pmu) {
3976 KVMState *s = cs->kvm_state;
3977
3978 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3979 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3980 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3981 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3982 } else if (hvf_enabled() && cpu->enable_pmu) {
3983 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3984 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3985 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3986 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3987 } else {
3988 *eax = 0;
3989 *ebx = 0;
3990 *ecx = 0;
3991 *edx = 0;
3992 }
3993 break;
3994 case 0xB:
3995 /* Extended Topology Enumeration Leaf */
3996 if (!cpu->enable_cpuid_0xb) {
3997 *eax = *ebx = *ecx = *edx = 0;
3998 break;
3999 }
4000
4001 *ecx = count & 0xff;
4002 *edx = cpu->apic_id;
4003
4004 switch (count) {
4005 case 0:
4006 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
4007 *ebx = cs->nr_threads;
4008 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4009 break;
4010 case 1:
4011 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
4012 *ebx = cs->nr_cores * cs->nr_threads;
4013 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4014 break;
4015 default:
4016 *eax = 0;
4017 *ebx = 0;
4018 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4019 }
4020
4021 assert(!(*eax & ~0x1f));
4022 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4023 break;
4024 case 0xD: {
4025 /* Processor Extended State */
4026 *eax = 0;
4027 *ebx = 0;
4028 *ecx = 0;
4029 *edx = 0;
4030 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4031 break;
4032 }
4033
4034 if (count == 0) {
4035 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4036 *eax = env->features[FEAT_XSAVE_COMP_LO];
4037 *edx = env->features[FEAT_XSAVE_COMP_HI];
4038 *ebx = *ecx;
4039 } else if (count == 1) {
4040 *eax = env->features[FEAT_XSAVE];
4041 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4042 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4043 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4044 *eax = esa->size;
4045 *ebx = esa->offset;
4046 }
4047 }
4048 break;
4049 }
4050 case 0x14: {
4051 /* Intel Processor Trace Enumeration */
4052 *eax = 0;
4053 *ebx = 0;
4054 *ecx = 0;
4055 *edx = 0;
4056 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4057 !kvm_enabled()) {
4058 break;
4059 }
4060
4061 if (count == 0) {
4062 *eax = INTEL_PT_MAX_SUBLEAF;
4063 *ebx = INTEL_PT_MINIMAL_EBX;
4064 *ecx = INTEL_PT_MINIMAL_ECX;
4065 } else if (count == 1) {
4066 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4067 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4068 }
4069 break;
4070 }
4071 case 0x40000000:
4072 /*
4073 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4074 * set here, but we restrict to TCG none the less.
4075 */
4076 if (tcg_enabled() && cpu->expose_tcg) {
4077 memcpy(signature, "TCGTCGTCGTCG", 12);
4078 *eax = 0x40000001;
4079 *ebx = signature[0];
4080 *ecx = signature[1];
4081 *edx = signature[2];
4082 } else {
4083 *eax = 0;
4084 *ebx = 0;
4085 *ecx = 0;
4086 *edx = 0;
4087 }
4088 break;
4089 case 0x40000001:
4090 *eax = 0;
4091 *ebx = 0;
4092 *ecx = 0;
4093 *edx = 0;
4094 break;
4095 case 0x80000000:
4096 *eax = env->cpuid_xlevel;
4097 *ebx = env->cpuid_vendor1;
4098 *edx = env->cpuid_vendor2;
4099 *ecx = env->cpuid_vendor3;
4100 break;
4101 case 0x80000001:
4102 *eax = env->cpuid_version;
4103 *ebx = 0;
4104 *ecx = env->features[FEAT_8000_0001_ECX];
4105 *edx = env->features[FEAT_8000_0001_EDX];
4106
4107 /* The Linux kernel checks for the CMPLegacy bit and
4108 * discards multiple thread information if it is set.
4109 * So don't set it here for Intel to make Linux guests happy.
4110 */
4111 if (cs->nr_cores * cs->nr_threads > 1) {
4112 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4113 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4114 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4115 *ecx |= 1 << 1; /* CmpLegacy bit */
4116 }
4117 }
4118 break;
4119 case 0x80000002:
4120 case 0x80000003:
4121 case 0x80000004:
4122 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4123 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4124 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4125 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4126 break;
4127 case 0x80000005:
4128 /* cache info (L1 cache) */
4129 if (cpu->cache_info_passthrough) {
4130 host_cpuid(index, 0, eax, ebx, ecx, edx);
4131 break;
4132 }
4133 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4134 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4135 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4136 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4137 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4138 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4139 break;
4140 case 0x80000006:
4141 /* cache info (L2 cache) */
4142 if (cpu->cache_info_passthrough) {
4143 host_cpuid(index, 0, eax, ebx, ecx, edx);
4144 break;
4145 }
4146 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4147 (L2_DTLB_2M_ENTRIES << 16) | \
4148 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4149 (L2_ITLB_2M_ENTRIES);
4150 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4151 (L2_DTLB_4K_ENTRIES << 16) | \
4152 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4153 (L2_ITLB_4K_ENTRIES);
4154 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4155 cpu->enable_l3_cache ?
4156 env->cache_info_amd.l3_cache : NULL,
4157 ecx, edx);
4158 break;
4159 case 0x80000007:
4160 *eax = 0;
4161 *ebx = 0;
4162 *ecx = 0;
4163 *edx = env->features[FEAT_8000_0007_EDX];
4164 break;
4165 case 0x80000008:
4166 /* virtual & phys address size in low 2 bytes. */
4167 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4168 /* 64 bit processor */
4169 *eax = cpu->phys_bits; /* configurable physical bits */
4170 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4171 *eax |= 0x00003900; /* 57 bits virtual */
4172 } else {
4173 *eax |= 0x00003000; /* 48 bits virtual */
4174 }
4175 } else {
4176 *eax = cpu->phys_bits;
4177 }
4178 *ebx = env->features[FEAT_8000_0008_EBX];
4179 *ecx = 0;
4180 *edx = 0;
4181 if (cs->nr_cores * cs->nr_threads > 1) {
4182 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4183 }
4184 break;
4185 case 0x8000000A:
4186 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4187 *eax = 0x00000001; /* SVM Revision */
4188 *ebx = 0x00000010; /* nr of ASIDs */
4189 *ecx = 0;
4190 *edx = env->features[FEAT_SVM]; /* optional features */
4191 } else {
4192 *eax = 0;
4193 *ebx = 0;
4194 *ecx = 0;
4195 *edx = 0;
4196 }
4197 break;
4198 case 0x8000001D:
4199 *eax = 0;
4200 switch (count) {
4201 case 0: /* L1 dcache info */
4202 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4203 eax, ebx, ecx, edx);
4204 break;
4205 case 1: /* L1 icache info */
4206 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4207 eax, ebx, ecx, edx);
4208 break;
4209 case 2: /* L2 cache info */
4210 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4211 eax, ebx, ecx, edx);
4212 break;
4213 case 3: /* L3 cache info */
4214 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4215 eax, ebx, ecx, edx);
4216 break;
4217 default: /* end of info */
4218 *eax = *ebx = *ecx = *edx = 0;
4219 break;
4220 }
4221 break;
4222 case 0x8000001E:
4223 assert(cpu->core_id <= 255);
4224 encode_topo_cpuid8000001e(cs, cpu,
4225 eax, ebx, ecx, edx);
4226 break;
4227 case 0xC0000000:
4228 *eax = env->cpuid_xlevel2;
4229 *ebx = 0;
4230 *ecx = 0;
4231 *edx = 0;
4232 break;
4233 case 0xC0000001:
4234 /* Support for VIA CPU's CPUID instruction */
4235 *eax = env->cpuid_version;
4236 *ebx = 0;
4237 *ecx = 0;
4238 *edx = env->features[FEAT_C000_0001_EDX];
4239 break;
4240 case 0xC0000002:
4241 case 0xC0000003:
4242 case 0xC0000004:
4243 /* Reserved for the future, and now filled with zero */
4244 *eax = 0;
4245 *ebx = 0;
4246 *ecx = 0;
4247 *edx = 0;
4248 break;
4249 case 0x8000001F:
4250 *eax = sev_enabled() ? 0x2 : 0;
4251 *ebx = sev_get_cbit_position();
4252 *ebx |= sev_get_reduced_phys_bits() << 6;
4253 *ecx = 0;
4254 *edx = 0;
4255 break;
4256 default:
4257 /* reserved values: zero */
4258 *eax = 0;
4259 *ebx = 0;
4260 *ecx = 0;
4261 *edx = 0;
4262 break;
4263 }
4264 }
4265
4266 /* CPUClass::reset() */
4267 static void x86_cpu_reset(CPUState *s)
4268 {
4269 X86CPU *cpu = X86_CPU(s);
4270 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4271 CPUX86State *env = &cpu->env;
4272 target_ulong cr4;
4273 uint64_t xcr0;
4274 int i;
4275
4276 xcc->parent_reset(s);
4277
4278 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4279
4280 env->old_exception = -1;
4281
4282 /* init to reset state */
4283
4284 env->hflags2 |= HF2_GIF_MASK;
4285
4286 cpu_x86_update_cr0(env, 0x60000010);
4287 env->a20_mask = ~0x0;
4288 env->smbase = 0x30000;
4289 env->msr_smi_count = 0;
4290
4291 env->idt.limit = 0xffff;
4292 env->gdt.limit = 0xffff;
4293 env->ldt.limit = 0xffff;
4294 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4295 env->tr.limit = 0xffff;
4296 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4297
4298 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4299 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4300 DESC_R_MASK | DESC_A_MASK);
4301 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4302 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4303 DESC_A_MASK);
4304 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4305 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4306 DESC_A_MASK);
4307 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4308 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4309 DESC_A_MASK);
4310 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4311 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4312 DESC_A_MASK);
4313 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4314 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4315 DESC_A_MASK);
4316
4317 env->eip = 0xfff0;
4318 env->regs[R_EDX] = env->cpuid_version;
4319
4320 env->eflags = 0x2;
4321
4322 /* FPU init */
4323 for (i = 0; i < 8; i++) {
4324 env->fptags[i] = 1;
4325 }
4326 cpu_set_fpuc(env, 0x37f);
4327
4328 env->mxcsr = 0x1f80;
4329 /* All units are in INIT state. */
4330 env->xstate_bv = 0;
4331
4332 env->pat = 0x0007040600070406ULL;
4333 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4334
4335 memset(env->dr, 0, sizeof(env->dr));
4336 env->dr[6] = DR6_FIXED_1;
4337 env->dr[7] = DR7_FIXED_1;
4338 cpu_breakpoint_remove_all(s, BP_CPU);
4339 cpu_watchpoint_remove_all(s, BP_CPU);
4340
4341 cr4 = 0;
4342 xcr0 = XSTATE_FP_MASK;
4343
4344 #ifdef CONFIG_USER_ONLY
4345 /* Enable all the features for user-mode. */
4346 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4347 xcr0 |= XSTATE_SSE_MASK;
4348 }
4349 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4350 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4351 if (env->features[esa->feature] & esa->bits) {
4352 xcr0 |= 1ull << i;
4353 }
4354 }
4355
4356 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4357 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4358 }
4359 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4360 cr4 |= CR4_FSGSBASE_MASK;
4361 }
4362 #endif
4363
4364 env->xcr0 = xcr0;
4365 cpu_x86_update_cr4(env, cr4);
4366
4367 /*
4368 * SDM 11.11.5 requires:
4369 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4370 * - IA32_MTRR_PHYSMASKn.V = 0
4371 * All other bits are undefined. For simplification, zero it all.
4372 */
4373 env->mtrr_deftype = 0;
4374 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4375 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4376
4377 env->interrupt_injected = -1;
4378 env->exception_injected = -1;
4379 env->nmi_injected = false;
4380 #if !defined(CONFIG_USER_ONLY)
4381 /* We hard-wire the BSP to the first CPU. */
4382 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4383
4384 s->halted = !cpu_is_bsp(cpu);
4385
4386 if (kvm_enabled()) {
4387 kvm_arch_reset_vcpu(cpu);
4388 }
4389 else if (hvf_enabled()) {
4390 hvf_reset_vcpu(s);
4391 }
4392 #endif
4393 }
4394
4395 #ifndef CONFIG_USER_ONLY
4396 bool cpu_is_bsp(X86CPU *cpu)
4397 {
4398 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4399 }
4400
4401 /* TODO: remove me, when reset over QOM tree is implemented */
4402 static void x86_cpu_machine_reset_cb(void *opaque)
4403 {
4404 X86CPU *cpu = opaque;
4405 cpu_reset(CPU(cpu));
4406 }
4407 #endif
4408
4409 static void mce_init(X86CPU *cpu)
4410 {
4411 CPUX86State *cenv = &cpu->env;
4412 unsigned int bank;
4413
4414 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4415 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4416 (CPUID_MCE | CPUID_MCA)) {
4417 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4418 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4419 cenv->mcg_ctl = ~(uint64_t)0;
4420 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4421 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4422 }
4423 }
4424 }
4425
4426 #ifndef CONFIG_USER_ONLY
4427 APICCommonClass *apic_get_class(void)
4428 {
4429 const char *apic_type = "apic";
4430
4431 /* TODO: in-kernel irqchip for hvf */
4432 if (kvm_apic_in_kernel()) {
4433 apic_type = "kvm-apic";
4434 } else if (xen_enabled()) {
4435 apic_type = "xen-apic";
4436 }
4437
4438 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4439 }
4440
4441 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4442 {
4443 APICCommonState *apic;
4444 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4445
4446 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4447
4448 object_property_add_child(OBJECT(cpu), "lapic",
4449 OBJECT(cpu->apic_state), &error_abort);
4450 object_unref(OBJECT(cpu->apic_state));
4451
4452 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4453 /* TODO: convert to link<> */
4454 apic = APIC_COMMON(cpu->apic_state);
4455 apic->cpu = cpu;
4456 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4457 }
4458
4459 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4460 {
4461 APICCommonState *apic;
4462 static bool apic_mmio_map_once;
4463
4464 if (cpu->apic_state == NULL) {
4465 return;
4466 }
4467 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4468 errp);
4469
4470 /* Map APIC MMIO area */
4471 apic = APIC_COMMON(cpu->apic_state);
4472 if (!apic_mmio_map_once) {
4473 memory_region_add_subregion_overlap(get_system_memory(),
4474 apic->apicbase &
4475 MSR_IA32_APICBASE_BASE,
4476 &apic->io_memory,
4477 0x1000);
4478 apic_mmio_map_once = true;
4479 }
4480 }
4481
4482 static void x86_cpu_machine_done(Notifier *n, void *unused)
4483 {
4484 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4485 MemoryRegion *smram =
4486 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4487
4488 if (smram) {
4489 cpu->smram = g_new(MemoryRegion, 1);
4490 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4491 smram, 0, 1ull << 32);
4492 memory_region_set_enabled(cpu->smram, true);
4493 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4494 }
4495 }
4496 #else
4497 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4498 {
4499 }
4500 #endif
4501
4502 /* Note: Only safe for use on x86(-64) hosts */
4503 static uint32_t x86_host_phys_bits(void)
4504 {
4505 uint32_t eax;
4506 uint32_t host_phys_bits;
4507
4508 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4509 if (eax >= 0x80000008) {
4510 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4511 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4512 * at 23:16 that can specify a maximum physical address bits for
4513 * the guest that can override this value; but I've not seen
4514 * anything with that set.
4515 */
4516 host_phys_bits = eax & 0xff;
4517 } else {
4518 /* It's an odd 64 bit machine that doesn't have the leaf for
4519 * physical address bits; fall back to 36 that's most older
4520 * Intel.
4521 */
4522 host_phys_bits = 36;
4523 }
4524
4525 return host_phys_bits;
4526 }
4527
4528 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4529 {
4530 if (*min < value) {
4531 *min = value;
4532 }
4533 }
4534
4535 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4536 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4537 {
4538 CPUX86State *env = &cpu->env;
4539 FeatureWordInfo *fi = &feature_word_info[w];
4540 uint32_t eax = fi->cpuid_eax;
4541 uint32_t region = eax & 0xF0000000;
4542
4543 if (!env->features[w]) {
4544 return;
4545 }
4546
4547 switch (region) {
4548 case 0x00000000:
4549 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4550 break;
4551 case 0x80000000:
4552 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4553 break;
4554 case 0xC0000000:
4555 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4556 break;
4557 }
4558 }
4559
4560 /* Calculate XSAVE components based on the configured CPU feature flags */
4561 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4562 {
4563 CPUX86State *env = &cpu->env;
4564 int i;
4565 uint64_t mask;
4566
4567 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4568 return;
4569 }
4570
4571 mask = 0;
4572 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4573 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4574 if (env->features[esa->feature] & esa->bits) {
4575 mask |= (1ULL << i);
4576 }
4577 }
4578
4579 env->features[FEAT_XSAVE_COMP_LO] = mask;
4580 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4581 }
4582
4583 /***** Steps involved on loading and filtering CPUID data
4584 *
4585 * When initializing and realizing a CPU object, the steps
4586 * involved in setting up CPUID data are:
4587 *
4588 * 1) Loading CPU model definition (X86CPUDefinition). This is
4589 * implemented by x86_cpu_load_def() and should be completely
4590 * transparent, as it is done automatically by instance_init.
4591 * No code should need to look at X86CPUDefinition structs
4592 * outside instance_init.
4593 *
4594 * 2) CPU expansion. This is done by realize before CPUID
4595 * filtering, and will make sure host/accelerator data is
4596 * loaded for CPU models that depend on host capabilities
4597 * (e.g. "host"). Done by x86_cpu_expand_features().
4598 *
4599 * 3) CPUID filtering. This initializes extra data related to
4600 * CPUID, and checks if the host supports all capabilities
4601 * required by the CPU. Runnability of a CPU model is
4602 * determined at this step. Done by x86_cpu_filter_features().
4603 *
4604 * Some operations don't require all steps to be performed.
4605 * More precisely:
4606 *
4607 * - CPU instance creation (instance_init) will run only CPU
4608 * model loading. CPU expansion can't run at instance_init-time
4609 * because host/accelerator data may be not available yet.
4610 * - CPU realization will perform both CPU model expansion and CPUID
4611 * filtering, and return an error in case one of them fails.
4612 * - query-cpu-definitions needs to run all 3 steps. It needs
4613 * to run CPUID filtering, as the 'unavailable-features'
4614 * field is set based on the filtering results.
4615 * - The query-cpu-model-expansion QMP command only needs to run
4616 * CPU model loading and CPU expansion. It should not filter
4617 * any CPUID data based on host capabilities.
4618 */
4619
4620 /* Expand CPU configuration data, based on configured features
4621 * and host/accelerator capabilities when appropriate.
4622 */
4623 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4624 {
4625 CPUX86State *env = &cpu->env;
4626 FeatureWord w;
4627 GList *l;
4628 Error *local_err = NULL;
4629
4630 /*TODO: Now cpu->max_features doesn't overwrite features
4631 * set using QOM properties, and we can convert
4632 * plus_features & minus_features to global properties
4633 * inside x86_cpu_parse_featurestr() too.
4634 */
4635 if (cpu->max_features) {
4636 for (w = 0; w < FEATURE_WORDS; w++) {
4637 /* Override only features that weren't set explicitly
4638 * by the user.
4639 */
4640 env->features[w] |=
4641 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4642 ~env->user_features[w] & \
4643 ~feature_word_info[w].no_autoenable_flags;
4644 }
4645 }
4646
4647 for (l = plus_features; l; l = l->next) {
4648 const char *prop = l->data;
4649 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4650 if (local_err) {
4651 goto out;
4652 }
4653 }
4654
4655 for (l = minus_features; l; l = l->next) {
4656 const char *prop = l->data;
4657 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4658 if (local_err) {
4659 goto out;
4660 }
4661 }
4662
4663 if (!kvm_enabled() || !cpu->expose_kvm) {
4664 env->features[FEAT_KVM] = 0;
4665 }
4666
4667 x86_cpu_enable_xsave_components(cpu);
4668
4669 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4670 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4671 if (cpu->full_cpuid_auto_level) {
4672 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4673 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4674 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4675 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4676 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4677 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4678 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4679 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4680 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4681 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4682 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4683 /* SVM requires CPUID[0x8000000A] */
4684 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4685 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4686 }
4687
4688 /* SEV requires CPUID[0x8000001F] */
4689 if (sev_enabled()) {
4690 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4691 }
4692 }
4693
4694 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4695 if (env->cpuid_level == UINT32_MAX) {
4696 env->cpuid_level = env->cpuid_min_level;
4697 }
4698 if (env->cpuid_xlevel == UINT32_MAX) {
4699 env->cpuid_xlevel = env->cpuid_min_xlevel;
4700 }
4701 if (env->cpuid_xlevel2 == UINT32_MAX) {
4702 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4703 }
4704
4705 out:
4706 if (local_err != NULL) {
4707 error_propagate(errp, local_err);
4708 }
4709 }
4710
4711 /*
4712 * Finishes initialization of CPUID data, filters CPU feature
4713 * words based on host availability of each feature.
4714 *
4715 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4716 */
4717 static int x86_cpu_filter_features(X86CPU *cpu)
4718 {
4719 CPUX86State *env = &cpu->env;
4720 FeatureWord w;
4721 int rv = 0;
4722
4723 for (w = 0; w < FEATURE_WORDS; w++) {
4724 uint32_t host_feat =
4725 x86_cpu_get_supported_feature_word(w, false);
4726 uint32_t requested_features = env->features[w];
4727 env->features[w] &= host_feat;
4728 cpu->filtered_features[w] = requested_features & ~env->features[w];
4729 if (cpu->filtered_features[w]) {
4730 rv = 1;
4731 }
4732 }
4733
4734 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4735 kvm_enabled()) {
4736 KVMState *s = CPU(cpu)->kvm_state;
4737 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4738 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4739 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4740 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4741 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4742
4743 if (!eax_0 ||
4744 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4745 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4746 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4747 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4748 INTEL_PT_ADDR_RANGES_NUM) ||
4749 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4750 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4751 (ecx_0 & INTEL_PT_IP_LIP)) {
4752 /*
4753 * Processor Trace capabilities aren't configurable, so if the
4754 * host can't emulate the capabilities we report on
4755 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4756 */
4757 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4758 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4759 rv = 1;
4760 }
4761 }
4762
4763 return rv;
4764 }
4765
4766 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4767 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4768 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4769 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4770 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4771 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4772 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4773 {
4774 CPUState *cs = CPU(dev);
4775 X86CPU *cpu = X86_CPU(dev);
4776 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4777 CPUX86State *env = &cpu->env;
4778 Error *local_err = NULL;
4779 static bool ht_warned;
4780
4781 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4782 char *name = x86_cpu_class_get_model_name(xcc);
4783 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4784 g_free(name);
4785 goto out;
4786 }
4787
4788 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4789 error_setg(errp, "apic-id property was not initialized properly");
4790 return;
4791 }
4792
4793 x86_cpu_expand_features(cpu, &local_err);
4794 if (local_err) {
4795 goto out;
4796 }
4797
4798 if (x86_cpu_filter_features(cpu) &&
4799 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4800 x86_cpu_report_filtered_features(cpu);
4801 if (cpu->enforce_cpuid) {
4802 error_setg(&local_err,
4803 accel_uses_host_cpuid() ?
4804 "Host doesn't support requested features" :
4805 "TCG doesn't support requested features");
4806 goto out;
4807 }
4808 }
4809
4810 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4811 * CPUID[1].EDX.
4812 */
4813 if (IS_AMD_CPU(env)) {
4814 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4815 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4816 & CPUID_EXT2_AMD_ALIASES);
4817 }
4818
4819 /* For 64bit systems think about the number of physical bits to present.
4820 * ideally this should be the same as the host; anything other than matching
4821 * the host can cause incorrect guest behaviour.
4822 * QEMU used to pick the magic value of 40 bits that corresponds to
4823 * consumer AMD devices but nothing else.
4824 */
4825 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4826 if (accel_uses_host_cpuid()) {
4827 uint32_t host_phys_bits = x86_host_phys_bits();
4828 static bool warned;
4829
4830 if (cpu->host_phys_bits) {
4831 /* The user asked for us to use the host physical bits */
4832 cpu->phys_bits = host_phys_bits;
4833 }
4834
4835 /* Print a warning if the user set it to a value that's not the
4836 * host value.
4837 */
4838 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4839 !warned) {
4840 warn_report("Host physical bits (%u)"
4841 " does not match phys-bits property (%u)",
4842 host_phys_bits, cpu->phys_bits);
4843 warned = true;
4844 }
4845
4846 if (cpu->phys_bits &&
4847 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4848 cpu->phys_bits < 32)) {
4849 error_setg(errp, "phys-bits should be between 32 and %u "
4850 " (but is %u)",
4851 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4852 return;
4853 }
4854 } else {
4855 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4856 error_setg(errp, "TCG only supports phys-bits=%u",
4857 TCG_PHYS_ADDR_BITS);
4858 return;
4859 }
4860 }
4861 /* 0 means it was not explicitly set by the user (or by machine
4862 * compat_props or by the host code above). In this case, the default
4863 * is the value used by TCG (40).
4864 */
4865 if (cpu->phys_bits == 0) {
4866 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4867 }
4868 } else {
4869 /* For 32 bit systems don't use the user set value, but keep
4870 * phys_bits consistent with what we tell the guest.
4871 */
4872 if (cpu->phys_bits != 0) {
4873 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4874 return;
4875 }
4876
4877 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4878 cpu->phys_bits = 36;
4879 } else {
4880 cpu->phys_bits = 32;
4881 }
4882 }
4883
4884 /* Cache information initialization */
4885 if (!cpu->legacy_cache) {
4886 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
4887 char *name = x86_cpu_class_get_model_name(xcc);
4888 error_setg(errp,
4889 "CPU model '%s' doesn't support legacy-cache=off", name);
4890 g_free(name);
4891 return;
4892 }
4893 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
4894 *xcc->cpu_def->cache_info;
4895 } else {
4896 /* Build legacy cache information */
4897 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
4898 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
4899 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
4900 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
4901
4902 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
4903 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
4904 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
4905 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
4906
4907 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
4908 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
4909 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
4910 env->cache_info_amd.l3_cache = &legacy_l3_cache;
4911 }
4912
4913
4914 cpu_exec_realizefn(cs, &local_err);
4915 if (local_err != NULL) {
4916 error_propagate(errp, local_err);
4917 return;
4918 }
4919
4920 #ifndef CONFIG_USER_ONLY
4921 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4922
4923 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4924 x86_cpu_apic_create(cpu, &local_err);
4925 if (local_err != NULL) {
4926 goto out;
4927 }
4928 }
4929 #endif
4930
4931 mce_init(cpu);
4932
4933 #ifndef CONFIG_USER_ONLY
4934 if (tcg_enabled()) {
4935 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4936 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4937
4938 /* Outer container... */
4939 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4940 memory_region_set_enabled(cpu->cpu_as_root, true);
4941
4942 /* ... with two regions inside: normal system memory with low
4943 * priority, and...
4944 */
4945 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4946 get_system_memory(), 0, ~0ull);
4947 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4948 memory_region_set_enabled(cpu->cpu_as_mem, true);
4949
4950 cs->num_ases = 2;
4951 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4952 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4953
4954 /* ... SMRAM with higher priority, linked from /machine/smram. */
4955 cpu->machine_done.notify = x86_cpu_machine_done;
4956 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4957 }
4958 #endif
4959
4960 qemu_init_vcpu(cs);
4961
4962 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4963 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4964 * based on inputs (sockets,cores,threads), it is still better to gives
4965 * users a warning.
4966 *
4967 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4968 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4969 */
4970 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4971 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4972 " -smp options properly.");
4973 ht_warned = true;
4974 }
4975
4976 x86_cpu_apic_realize(cpu, &local_err);
4977 if (local_err != NULL) {
4978 goto out;
4979 }
4980 cpu_reset(cs);
4981
4982 xcc->parent_realize(dev, &local_err);
4983
4984 out:
4985 if (local_err != NULL) {
4986 error_propagate(errp, local_err);
4987 return;
4988 }
4989 }
4990
4991 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4992 {
4993 X86CPU *cpu = X86_CPU(dev);
4994 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4995 Error *local_err = NULL;
4996
4997 #ifndef CONFIG_USER_ONLY
4998 cpu_remove_sync(CPU(dev));
4999 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5000 #endif
5001
5002 if (cpu->apic_state) {
5003 object_unparent(OBJECT(cpu->apic_state));
5004 cpu->apic_state = NULL;
5005 }
5006
5007 xcc->parent_unrealize(dev, &local_err);
5008 if (local_err != NULL) {
5009 error_propagate(errp, local_err);
5010 return;
5011 }
5012 }
5013
5014 typedef struct BitProperty {
5015 FeatureWord w;
5016 uint32_t mask;
5017 } BitProperty;
5018
5019 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5020 void *opaque, Error **errp)
5021 {
5022 X86CPU *cpu = X86_CPU(obj);
5023 BitProperty *fp = opaque;
5024 uint32_t f = cpu->env.features[fp->w];
5025 bool value = (f & fp->mask) == fp->mask;
5026 visit_type_bool(v, name, &value, errp);
5027 }
5028
5029 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5030 void *opaque, Error **errp)
5031 {
5032 DeviceState *dev = DEVICE(obj);
5033 X86CPU *cpu = X86_CPU(obj);
5034 BitProperty *fp = opaque;
5035 Error *local_err = NULL;
5036 bool value;
5037
5038 if (dev->realized) {
5039 qdev_prop_set_after_realize(dev, name, errp);
5040 return;
5041 }
5042
5043 visit_type_bool(v, name, &value, &local_err);
5044 if (local_err) {
5045 error_propagate(errp, local_err);
5046 return;
5047 }
5048
5049 if (value) {
5050 cpu->env.features[fp->w] |= fp->mask;
5051 } else {
5052 cpu->env.features[fp->w] &= ~fp->mask;
5053 }
5054 cpu->env.user_features[fp->w] |= fp->mask;
5055 }
5056
5057 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5058 void *opaque)
5059 {
5060 BitProperty *prop = opaque;
5061 g_free(prop);
5062 }
5063
5064 /* Register a boolean property to get/set a single bit in a uint32_t field.
5065 *
5066 * The same property name can be registered multiple times to make it affect
5067 * multiple bits in the same FeatureWord. In that case, the getter will return
5068 * true only if all bits are set.
5069 */
5070 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5071 const char *prop_name,
5072 FeatureWord w,
5073 int bitnr)
5074 {
5075 BitProperty *fp;
5076 ObjectProperty *op;
5077 uint32_t mask = (1UL << bitnr);
5078
5079 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5080 if (op) {
5081 fp = op->opaque;
5082 assert(fp->w == w);
5083 fp->mask |= mask;
5084 } else {
5085 fp = g_new0(BitProperty, 1);
5086 fp->w = w;
5087 fp->mask = mask;
5088 object_property_add(OBJECT(cpu), prop_name, "bool",
5089 x86_cpu_get_bit_prop,
5090 x86_cpu_set_bit_prop,
5091 x86_cpu_release_bit_prop, fp, &error_abort);
5092 }
5093 }
5094
5095 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5096 FeatureWord w,
5097 int bitnr)
5098 {
5099 FeatureWordInfo *fi = &feature_word_info[w];
5100 const char *name = fi->feat_names[bitnr];
5101
5102 if (!name) {
5103 return;
5104 }
5105
5106 /* Property names should use "-" instead of "_".
5107 * Old names containing underscores are registered as aliases
5108 * using object_property_add_alias()
5109 */
5110 assert(!strchr(name, '_'));
5111 /* aliases don't use "|" delimiters anymore, they are registered
5112 * manually using object_property_add_alias() */
5113 assert(!strchr(name, '|'));
5114 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5115 }
5116
5117 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5118 {
5119 X86CPU *cpu = X86_CPU(cs);
5120 CPUX86State *env = &cpu->env;
5121 GuestPanicInformation *panic_info = NULL;
5122
5123 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5124 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5125
5126 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5127
5128 assert(HV_CRASH_PARAMS >= 5);
5129 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5130 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5131 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5132 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5133 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5134 }
5135
5136 return panic_info;
5137 }
5138 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5139 const char *name, void *opaque,
5140 Error **errp)
5141 {
5142 CPUState *cs = CPU(obj);
5143 GuestPanicInformation *panic_info;
5144
5145 if (!cs->crash_occurred) {
5146 error_setg(errp, "No crash occured");
5147 return;
5148 }
5149
5150 panic_info = x86_cpu_get_crash_info(cs);
5151 if (panic_info == NULL) {
5152 error_setg(errp, "No crash information");
5153 return;
5154 }
5155
5156 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5157 errp);
5158 qapi_free_GuestPanicInformation(panic_info);
5159 }
5160
5161 static void x86_cpu_initfn(Object *obj)
5162 {
5163 CPUState *cs = CPU(obj);
5164 X86CPU *cpu = X86_CPU(obj);
5165 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5166 CPUX86State *env = &cpu->env;
5167 FeatureWord w;
5168
5169 cs->env_ptr = env;
5170
5171 object_property_add(obj, "family", "int",
5172 x86_cpuid_version_get_family,
5173 x86_cpuid_version_set_family, NULL, NULL, NULL);
5174 object_property_add(obj, "model", "int",
5175 x86_cpuid_version_get_model,
5176 x86_cpuid_version_set_model, NULL, NULL, NULL);
5177 object_property_add(obj, "stepping", "int",
5178 x86_cpuid_version_get_stepping,
5179 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5180 object_property_add_str(obj, "vendor",
5181 x86_cpuid_get_vendor,
5182 x86_cpuid_set_vendor, NULL);
5183 object_property_add_str(obj, "model-id",
5184 x86_cpuid_get_model_id,
5185 x86_cpuid_set_model_id, NULL);
5186 object_property_add(obj, "tsc-frequency", "int",
5187 x86_cpuid_get_tsc_freq,
5188 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5189 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5190 x86_cpu_get_feature_words,
5191 NULL, NULL, (void *)env->features, NULL);
5192 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5193 x86_cpu_get_feature_words,
5194 NULL, NULL, (void *)cpu->filtered_features, NULL);
5195
5196 object_property_add(obj, "crash-information", "GuestPanicInformation",
5197 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5198
5199 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
5200
5201 for (w = 0; w < FEATURE_WORDS; w++) {
5202 int bitnr;
5203
5204 for (bitnr = 0; bitnr < 32; bitnr++) {
5205 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5206 }
5207 }
5208
5209 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5210 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5211 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5212 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5213 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5214 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5215 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5216
5217 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5218 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5219 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5220 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5221 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5222 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5223 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5224 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5225 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5226 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5227 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5228 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5229 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5230 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5231 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5232 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5233 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5234 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5235 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5236 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5237 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5238
5239 if (xcc->cpu_def) {
5240 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5241 }
5242 }
5243
5244 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5245 {
5246 X86CPU *cpu = X86_CPU(cs);
5247
5248 return cpu->apic_id;
5249 }
5250
5251 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5252 {
5253 X86CPU *cpu = X86_CPU(cs);
5254
5255 return cpu->env.cr[0] & CR0_PG_MASK;
5256 }
5257
5258 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5259 {
5260 X86CPU *cpu = X86_CPU(cs);
5261
5262 cpu->env.eip = value;
5263 }
5264
5265 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5266 {
5267 X86CPU *cpu = X86_CPU(cs);
5268
5269 cpu->env.eip = tb->pc - tb->cs_base;
5270 }
5271
5272 static bool x86_cpu_has_work(CPUState *cs)
5273 {
5274 X86CPU *cpu = X86_CPU(cs);
5275 CPUX86State *env = &cpu->env;
5276
5277 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5278 CPU_INTERRUPT_POLL)) &&
5279 (env->eflags & IF_MASK)) ||
5280 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5281 CPU_INTERRUPT_INIT |
5282 CPU_INTERRUPT_SIPI |
5283 CPU_INTERRUPT_MCE)) ||
5284 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5285 !(env->hflags & HF_SMM_MASK));
5286 }
5287
5288 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5289 {
5290 X86CPU *cpu = X86_CPU(cs);
5291 CPUX86State *env = &cpu->env;
5292
5293 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5294 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5295 : bfd_mach_i386_i8086);
5296 info->print_insn = print_insn_i386;
5297
5298 info->cap_arch = CS_ARCH_X86;
5299 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5300 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5301 : CS_MODE_16);
5302 info->cap_insn_unit = 1;
5303 info->cap_insn_split = 8;
5304 }
5305
5306 void x86_update_hflags(CPUX86State *env)
5307 {
5308 uint32_t hflags;
5309 #define HFLAG_COPY_MASK \
5310 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5311 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5312 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5313 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5314
5315 hflags = env->hflags & HFLAG_COPY_MASK;
5316 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5317 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5318 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5319 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5320 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5321
5322 if (env->cr[4] & CR4_OSFXSR_MASK) {
5323 hflags |= HF_OSFXSR_MASK;
5324 }
5325
5326 if (env->efer & MSR_EFER_LMA) {
5327 hflags |= HF_LMA_MASK;
5328 }
5329
5330 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5331 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5332 } else {
5333 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5334 (DESC_B_SHIFT - HF_CS32_SHIFT);
5335 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5336 (DESC_B_SHIFT - HF_SS32_SHIFT);
5337 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5338 !(hflags & HF_CS32_MASK)) {
5339 hflags |= HF_ADDSEG_MASK;
5340 } else {
5341 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5342 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5343 }
5344 }
5345 env->hflags = hflags;
5346 }
5347
5348 static Property x86_cpu_properties[] = {
5349 #ifdef CONFIG_USER_ONLY
5350 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5351 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5352 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5353 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5354 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5355 #else
5356 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5357 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5358 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5359 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5360 #endif
5361 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5362 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5363 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5364 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5365 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5366 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5367 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5368 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5369 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5370 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5371 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5372 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5373 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5374 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5375 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5376 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5377 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5378 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5379 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5380 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5381 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5382 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5383 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5384 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5385 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5386 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5387 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5388 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5389 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5390 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5391 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5392 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5393 false),
5394 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5395 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5396 /*
5397 * lecacy_cache defaults to true unless the CPU model provides its
5398 * own cache information (see x86_cpu_load_def()).
5399 */
5400 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5401
5402 /*
5403 * From "Requirements for Implementing the Microsoft
5404 * Hypervisor Interface":
5405 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5406 *
5407 * "Starting with Windows Server 2012 and Windows 8, if
5408 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5409 * the hypervisor imposes no specific limit to the number of VPs.
5410 * In this case, Windows Server 2012 guest VMs may use more than
5411 * 64 VPs, up to the maximum supported number of processors applicable
5412 * to the specific Windows version being used."
5413 */
5414 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5415 DEFINE_PROP_END_OF_LIST()
5416 };
5417
5418 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5419 {
5420 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5421 CPUClass *cc = CPU_CLASS(oc);
5422 DeviceClass *dc = DEVICE_CLASS(oc);
5423
5424 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5425 &xcc->parent_realize);
5426 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5427 &xcc->parent_unrealize);
5428 dc->props = x86_cpu_properties;
5429
5430 xcc->parent_reset = cc->reset;
5431 cc->reset = x86_cpu_reset;
5432 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5433
5434 cc->class_by_name = x86_cpu_class_by_name;
5435 cc->parse_features = x86_cpu_parse_featurestr;
5436 cc->has_work = x86_cpu_has_work;
5437 #ifdef CONFIG_TCG
5438 cc->do_interrupt = x86_cpu_do_interrupt;
5439 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5440 #endif
5441 cc->dump_state = x86_cpu_dump_state;
5442 cc->get_crash_info = x86_cpu_get_crash_info;
5443 cc->set_pc = x86_cpu_set_pc;
5444 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5445 cc->gdb_read_register = x86_cpu_gdb_read_register;
5446 cc->gdb_write_register = x86_cpu_gdb_write_register;
5447 cc->get_arch_id = x86_cpu_get_arch_id;
5448 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5449 #ifdef CONFIG_USER_ONLY
5450 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5451 #else
5452 cc->asidx_from_attrs = x86_asidx_from_attrs;
5453 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5454 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5455 cc->write_elf64_note = x86_cpu_write_elf64_note;
5456 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5457 cc->write_elf32_note = x86_cpu_write_elf32_note;
5458 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5459 cc->vmsd = &vmstate_x86_cpu;
5460 #endif
5461 cc->gdb_arch_name = x86_gdb_arch_name;
5462 #ifdef TARGET_X86_64
5463 cc->gdb_core_xml_file = "i386-64bit.xml";
5464 cc->gdb_num_core_regs = 57;
5465 #else
5466 cc->gdb_core_xml_file = "i386-32bit.xml";
5467 cc->gdb_num_core_regs = 41;
5468 #endif
5469 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5470 cc->debug_excp_handler = breakpoint_handler;
5471 #endif
5472 cc->cpu_exec_enter = x86_cpu_exec_enter;
5473 cc->cpu_exec_exit = x86_cpu_exec_exit;
5474 #ifdef CONFIG_TCG
5475 cc->tcg_initialize = tcg_x86_init;
5476 #endif
5477 cc->disas_set_info = x86_disas_set_info;
5478
5479 dc->user_creatable = true;
5480 }
5481
5482 static const TypeInfo x86_cpu_type_info = {
5483 .name = TYPE_X86_CPU,
5484 .parent = TYPE_CPU,
5485 .instance_size = sizeof(X86CPU),
5486 .instance_init = x86_cpu_initfn,
5487 .abstract = true,
5488 .class_size = sizeof(X86CPUClass),
5489 .class_init = x86_cpu_common_class_init,
5490 };
5491
5492
5493 /* "base" CPU model, used by query-cpu-model-expansion */
5494 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5495 {
5496 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5497
5498 xcc->static_model = true;
5499 xcc->migration_safe = true;
5500 xcc->model_description = "base CPU model type with no features enabled";
5501 xcc->ordering = 8;
5502 }
5503
5504 static const TypeInfo x86_base_cpu_type_info = {
5505 .name = X86_CPU_TYPE_NAME("base"),
5506 .parent = TYPE_X86_CPU,
5507 .class_init = x86_cpu_base_class_init,
5508 };
5509
5510 static void x86_cpu_register_types(void)
5511 {
5512 int i;
5513
5514 type_register_static(&x86_cpu_type_info);
5515 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5516 x86_register_cpudef_type(&builtin_x86_defs[i]);
5517 }
5518 type_register_static(&max_x86_cpu_type_info);
5519 type_register_static(&x86_base_cpu_type_info);
5520 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5521 type_register_static(&host_x86_cpu_type_info);
5522 #endif
5523 }
5524
5525 type_init(x86_cpu_register_types)