]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Remove unused host_cpudef variable
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
22 #include "qemu/cutils.h"
23 #include "qemu/bitops.h"
24 #include "qemu/qemu-print.h"
25
26 #include "cpu.h"
27 #include "exec/exec-all.h"
28 #include "sysemu/kvm.h"
29 #include "sysemu/hvf.h"
30 #include "sysemu/cpus.h"
31 #include "kvm_i386.h"
32 #include "sev_i386.h"
33
34 #include "qemu/error-report.h"
35 #include "qemu/module.h"
36 #include "qemu/option.h"
37 #include "qemu/config-file.h"
38 #include "qapi/error.h"
39 #include "qapi/qapi-visit-machine.h"
40 #include "qapi/qapi-visit-run-state.h"
41 #include "qapi/qmp/qdict.h"
42 #include "qapi/qmp/qerror.h"
43 #include "qapi/visitor.h"
44 #include "qom/qom-qobject.h"
45 #include "sysemu/arch_init.h"
46 #include "qapi/qapi-commands-machine-target.h"
47
48 #include "standard-headers/asm-x86/kvm_para.h"
49
50 #include "sysemu/sysemu.h"
51 #include "sysemu/tcg.h"
52 #include "hw/qdev-properties.h"
53 #include "hw/i386/topology.h"
54 #ifndef CONFIG_USER_ONLY
55 #include "exec/address-spaces.h"
56 #include "hw/hw.h"
57 #include "hw/xen/xen.h"
58 #include "hw/i386/apic_internal.h"
59 #include "hw/boards.h"
60 #endif
61
62 #include "disas/capstone.h"
63
64 /* Helpers for building CPUID[2] descriptors: */
65
66 struct CPUID2CacheDescriptorInfo {
67 enum CacheType type;
68 int level;
69 int size;
70 int line_size;
71 int associativity;
72 };
73
74 /*
75 * Known CPUID 2 cache descriptors.
76 * From Intel SDM Volume 2A, CPUID instruction
77 */
78 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
79 [0x06] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 8 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x08] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 16 * KiB,
82 .associativity = 4, .line_size = 32, },
83 [0x09] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
84 .associativity = 4, .line_size = 64, },
85 [0x0A] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
86 .associativity = 2, .line_size = 32, },
87 [0x0C] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 32, },
89 [0x0D] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
90 .associativity = 4, .line_size = 64, },
91 [0x0E] = { .level = 1, .type = DATA_CACHE, .size = 24 * KiB,
92 .associativity = 6, .line_size = 64, },
93 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
94 .associativity = 2, .line_size = 64, },
95 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
96 .associativity = 8, .line_size = 64, },
97 /* lines per sector is not supported cpuid2_cache_descriptor(),
98 * so descriptors 0x22, 0x23 are not included
99 */
100 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
101 .associativity = 16, .line_size = 64, },
102 /* lines per sector is not supported cpuid2_cache_descriptor(),
103 * so descriptors 0x25, 0x20 are not included
104 */
105 [0x2C] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x30] = { .level = 1, .type = INSTRUCTION_CACHE, .size = 32 * KiB,
108 .associativity = 8, .line_size = 64, },
109 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
114 .associativity = 4, .line_size = 32, },
115 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
118 .associativity = 4, .line_size = 32, },
119 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
120 .associativity = 4, .line_size = 64, },
121 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
122 .associativity = 8, .line_size = 64, },
123 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
124 .associativity = 12, .line_size = 64, },
125 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
126 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
131 .associativity = 12, .line_size = 64, },
132 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
133 .associativity = 16, .line_size = 64, },
134 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
135 .associativity = 24, .line_size = 64, },
136 [0x60] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
137 .associativity = 8, .line_size = 64, },
138 [0x66] = { .level = 1, .type = DATA_CACHE, .size = 8 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x67] = { .level = 1, .type = DATA_CACHE, .size = 16 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x68] = { .level = 1, .type = DATA_CACHE, .size = 32 * KiB,
143 .associativity = 4, .line_size = 64, },
144 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
145 .associativity = 4, .line_size = 64, },
146 /* lines per sector is not supported cpuid2_cache_descriptor(),
147 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
148 */
149 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
150 .associativity = 8, .line_size = 64, },
151 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 2, .line_size = 64, },
153 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 64, },
155 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
158 .associativity = 8, .line_size = 32, },
159 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
162 .associativity = 8, .line_size = 32, },
163 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 8, .line_size = 64, },
167 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 4, .line_size = 64, },
173 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
178 .associativity = 8, .line_size = 64, },
179 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
184 .associativity = 12, .line_size = 64, },
185 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
190 .associativity = 16, .line_size = 64, },
191 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
194 .associativity = 24, .line_size = 64, },
195 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
196 .associativity = 24, .line_size = 64, },
197 };
198
199 /*
200 * "CPUID leaf 2 does not report cache descriptor information,
201 * use CPUID leaf 4 to query cache parameters"
202 */
203 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
204
205 /*
206 * Return a CPUID 2 cache descriptor for a given cache.
207 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
208 */
209 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
210 {
211 int i;
212
213 assert(cache->size > 0);
214 assert(cache->level > 0);
215 assert(cache->line_size > 0);
216 assert(cache->associativity > 0);
217 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
218 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
219 if (d->level == cache->level && d->type == cache->type &&
220 d->size == cache->size && d->line_size == cache->line_size &&
221 d->associativity == cache->associativity) {
222 return i;
223 }
224 }
225
226 return CACHE_DESCRIPTOR_UNAVAILABLE;
227 }
228
229 /* CPUID Leaf 4 constants: */
230
231 /* EAX: */
232 #define CACHE_TYPE_D 1
233 #define CACHE_TYPE_I 2
234 #define CACHE_TYPE_UNIFIED 3
235
236 #define CACHE_LEVEL(l) (l << 5)
237
238 #define CACHE_SELF_INIT_LEVEL (1 << 8)
239
240 /* EDX: */
241 #define CACHE_NO_INVD_SHARING (1 << 0)
242 #define CACHE_INCLUSIVE (1 << 1)
243 #define CACHE_COMPLEX_IDX (1 << 2)
244
245 /* Encode CacheType for CPUID[4].EAX */
246 #define CACHE_TYPE(t) (((t) == DATA_CACHE) ? CACHE_TYPE_D : \
247 ((t) == INSTRUCTION_CACHE) ? CACHE_TYPE_I : \
248 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
249 0 /* Invalid value */)
250
251
252 /* Encode cache info for CPUID[4] */
253 static void encode_cache_cpuid4(CPUCacheInfo *cache,
254 int num_apic_ids, int num_cores,
255 uint32_t *eax, uint32_t *ebx,
256 uint32_t *ecx, uint32_t *edx)
257 {
258 assert(cache->size == cache->line_size * cache->associativity *
259 cache->partitions * cache->sets);
260
261 assert(num_apic_ids > 0);
262 *eax = CACHE_TYPE(cache->type) |
263 CACHE_LEVEL(cache->level) |
264 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
265 ((num_cores - 1) << 26) |
266 ((num_apic_ids - 1) << 14);
267
268 assert(cache->line_size > 0);
269 assert(cache->partitions > 0);
270 assert(cache->associativity > 0);
271 /* We don't implement fully-associative caches */
272 assert(cache->associativity < cache->sets);
273 *ebx = (cache->line_size - 1) |
274 ((cache->partitions - 1) << 12) |
275 ((cache->associativity - 1) << 22);
276
277 assert(cache->sets > 0);
278 *ecx = cache->sets - 1;
279
280 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
281 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
282 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
283 }
284
285 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
286 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
287 {
288 assert(cache->size % 1024 == 0);
289 assert(cache->lines_per_tag > 0);
290 assert(cache->associativity > 0);
291 assert(cache->line_size > 0);
292 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
293 (cache->lines_per_tag << 8) | (cache->line_size);
294 }
295
296 #define ASSOC_FULL 0xFF
297
298 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
299 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
300 a == 2 ? 0x2 : \
301 a == 4 ? 0x4 : \
302 a == 8 ? 0x6 : \
303 a == 16 ? 0x8 : \
304 a == 32 ? 0xA : \
305 a == 48 ? 0xB : \
306 a == 64 ? 0xC : \
307 a == 96 ? 0xD : \
308 a == 128 ? 0xE : \
309 a == ASSOC_FULL ? 0xF : \
310 0 /* invalid value */)
311
312 /*
313 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
314 * @l3 can be NULL.
315 */
316 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
317 CPUCacheInfo *l3,
318 uint32_t *ecx, uint32_t *edx)
319 {
320 assert(l2->size % 1024 == 0);
321 assert(l2->associativity > 0);
322 assert(l2->lines_per_tag > 0);
323 assert(l2->line_size > 0);
324 *ecx = ((l2->size / 1024) << 16) |
325 (AMD_ENC_ASSOC(l2->associativity) << 12) |
326 (l2->lines_per_tag << 8) | (l2->line_size);
327
328 if (l3) {
329 assert(l3->size % (512 * 1024) == 0);
330 assert(l3->associativity > 0);
331 assert(l3->lines_per_tag > 0);
332 assert(l3->line_size > 0);
333 *edx = ((l3->size / (512 * 1024)) << 18) |
334 (AMD_ENC_ASSOC(l3->associativity) << 12) |
335 (l3->lines_per_tag << 8) | (l3->line_size);
336 } else {
337 *edx = 0;
338 }
339 }
340
341 /*
342 * Definitions used for building CPUID Leaf 0x8000001D and 0x8000001E
343 * Please refer to the AMD64 Architecture Programmer’s Manual Volume 3.
344 * Define the constants to build the cpu topology. Right now, TOPOEXT
345 * feature is enabled only on EPYC. So, these constants are based on
346 * EPYC supported configurations. We may need to handle the cases if
347 * these values change in future.
348 */
349 /* Maximum core complexes in a node */
350 #define MAX_CCX 2
351 /* Maximum cores in a core complex */
352 #define MAX_CORES_IN_CCX 4
353 /* Maximum cores in a node */
354 #define MAX_CORES_IN_NODE 8
355 /* Maximum nodes in a socket */
356 #define MAX_NODES_PER_SOCKET 4
357
358 /*
359 * Figure out the number of nodes required to build this config.
360 * Max cores in a node is 8
361 */
362 static int nodes_in_socket(int nr_cores)
363 {
364 int nodes;
365
366 nodes = DIV_ROUND_UP(nr_cores, MAX_CORES_IN_NODE);
367
368 /* Hardware does not support config with 3 nodes, return 4 in that case */
369 return (nodes == 3) ? 4 : nodes;
370 }
371
372 /*
373 * Decide the number of cores in a core complex with the given nr_cores using
374 * following set constants MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE and
375 * MAX_NODES_PER_SOCKET. Maintain symmetry as much as possible
376 * L3 cache is shared across all cores in a core complex. So, this will also
377 * tell us how many cores are sharing the L3 cache.
378 */
379 static int cores_in_core_complex(int nr_cores)
380 {
381 int nodes;
382
383 /* Check if we can fit all the cores in one core complex */
384 if (nr_cores <= MAX_CORES_IN_CCX) {
385 return nr_cores;
386 }
387 /* Get the number of nodes required to build this config */
388 nodes = nodes_in_socket(nr_cores);
389
390 /*
391 * Divide the cores accros all the core complexes
392 * Return rounded up value
393 */
394 return DIV_ROUND_UP(nr_cores, nodes * MAX_CCX);
395 }
396
397 /* Encode cache info for CPUID[8000001D] */
398 static void encode_cache_cpuid8000001d(CPUCacheInfo *cache, CPUState *cs,
399 uint32_t *eax, uint32_t *ebx,
400 uint32_t *ecx, uint32_t *edx)
401 {
402 uint32_t l3_cores;
403 assert(cache->size == cache->line_size * cache->associativity *
404 cache->partitions * cache->sets);
405
406 *eax = CACHE_TYPE(cache->type) | CACHE_LEVEL(cache->level) |
407 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0);
408
409 /* L3 is shared among multiple cores */
410 if (cache->level == 3) {
411 l3_cores = cores_in_core_complex(cs->nr_cores);
412 *eax |= ((l3_cores * cs->nr_threads) - 1) << 14;
413 } else {
414 *eax |= ((cs->nr_threads - 1) << 14);
415 }
416
417 assert(cache->line_size > 0);
418 assert(cache->partitions > 0);
419 assert(cache->associativity > 0);
420 /* We don't implement fully-associative caches */
421 assert(cache->associativity < cache->sets);
422 *ebx = (cache->line_size - 1) |
423 ((cache->partitions - 1) << 12) |
424 ((cache->associativity - 1) << 22);
425
426 assert(cache->sets > 0);
427 *ecx = cache->sets - 1;
428
429 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
430 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
431 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
432 }
433
434 /* Data structure to hold the configuration info for a given core index */
435 struct core_topology {
436 /* core complex id of the current core index */
437 int ccx_id;
438 /*
439 * Adjusted core index for this core in the topology
440 * This can be 0,1,2,3 with max 4 cores in a core complex
441 */
442 int core_id;
443 /* Node id for this core index */
444 int node_id;
445 /* Number of nodes in this config */
446 int num_nodes;
447 };
448
449 /*
450 * Build the configuration closely match the EPYC hardware. Using the EPYC
451 * hardware configuration values (MAX_CCX, MAX_CORES_IN_CCX, MAX_CORES_IN_NODE)
452 * right now. This could change in future.
453 * nr_cores : Total number of cores in the config
454 * core_id : Core index of the current CPU
455 * topo : Data structure to hold all the config info for this core index
456 */
457 static void build_core_topology(int nr_cores, int core_id,
458 struct core_topology *topo)
459 {
460 int nodes, cores_in_ccx;
461
462 /* First get the number of nodes required */
463 nodes = nodes_in_socket(nr_cores);
464
465 cores_in_ccx = cores_in_core_complex(nr_cores);
466
467 topo->node_id = core_id / (cores_in_ccx * MAX_CCX);
468 topo->ccx_id = (core_id % (cores_in_ccx * MAX_CCX)) / cores_in_ccx;
469 topo->core_id = core_id % cores_in_ccx;
470 topo->num_nodes = nodes;
471 }
472
473 /* Encode cache info for CPUID[8000001E] */
474 static void encode_topo_cpuid8000001e(CPUState *cs, X86CPU *cpu,
475 uint32_t *eax, uint32_t *ebx,
476 uint32_t *ecx, uint32_t *edx)
477 {
478 struct core_topology topo = {0};
479 unsigned long nodes;
480 int shift;
481
482 build_core_topology(cs->nr_cores, cpu->core_id, &topo);
483 *eax = cpu->apic_id;
484 /*
485 * CPUID_Fn8000001E_EBX
486 * 31:16 Reserved
487 * 15:8 Threads per core (The number of threads per core is
488 * Threads per core + 1)
489 * 7:0 Core id (see bit decoding below)
490 * SMT:
491 * 4:3 node id
492 * 2 Core complex id
493 * 1:0 Core id
494 * Non SMT:
495 * 5:4 node id
496 * 3 Core complex id
497 * 1:0 Core id
498 */
499 if (cs->nr_threads - 1) {
500 *ebx = ((cs->nr_threads - 1) << 8) | (topo.node_id << 3) |
501 (topo.ccx_id << 2) | topo.core_id;
502 } else {
503 *ebx = (topo.node_id << 4) | (topo.ccx_id << 3) | topo.core_id;
504 }
505 /*
506 * CPUID_Fn8000001E_ECX
507 * 31:11 Reserved
508 * 10:8 Nodes per processor (Nodes per processor is number of nodes + 1)
509 * 7:0 Node id (see bit decoding below)
510 * 2 Socket id
511 * 1:0 Node id
512 */
513 if (topo.num_nodes <= 4) {
514 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << 2) |
515 topo.node_id;
516 } else {
517 /*
518 * Node id fix up. Actual hardware supports up to 4 nodes. But with
519 * more than 32 cores, we may end up with more than 4 nodes.
520 * Node id is a combination of socket id and node id. Only requirement
521 * here is that this number should be unique accross the system.
522 * Shift the socket id to accommodate more nodes. We dont expect both
523 * socket id and node id to be big number at the same time. This is not
524 * an ideal config but we need to to support it. Max nodes we can have
525 * is 32 (255/8) with 8 cores per node and 255 max cores. We only need
526 * 5 bits for nodes. Find the left most set bit to represent the total
527 * number of nodes. find_last_bit returns last set bit(0 based). Left
528 * shift(+1) the socket id to represent all the nodes.
529 */
530 nodes = topo.num_nodes - 1;
531 shift = find_last_bit(&nodes, 8);
532 *ecx = ((topo.num_nodes - 1) << 8) | (cpu->socket_id << (shift + 1)) |
533 topo.node_id;
534 }
535 *edx = 0;
536 }
537
538 /*
539 * Definitions of the hardcoded cache entries we expose:
540 * These are legacy cache values. If there is a need to change any
541 * of these values please use builtin_x86_defs
542 */
543
544 /* L1 data cache: */
545 static CPUCacheInfo legacy_l1d_cache = {
546 .type = DATA_CACHE,
547 .level = 1,
548 .size = 32 * KiB,
549 .self_init = 1,
550 .line_size = 64,
551 .associativity = 8,
552 .sets = 64,
553 .partitions = 1,
554 .no_invd_sharing = true,
555 };
556
557 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
558 static CPUCacheInfo legacy_l1d_cache_amd = {
559 .type = DATA_CACHE,
560 .level = 1,
561 .size = 64 * KiB,
562 .self_init = 1,
563 .line_size = 64,
564 .associativity = 2,
565 .sets = 512,
566 .partitions = 1,
567 .lines_per_tag = 1,
568 .no_invd_sharing = true,
569 };
570
571 /* L1 instruction cache: */
572 static CPUCacheInfo legacy_l1i_cache = {
573 .type = INSTRUCTION_CACHE,
574 .level = 1,
575 .size = 32 * KiB,
576 .self_init = 1,
577 .line_size = 64,
578 .associativity = 8,
579 .sets = 64,
580 .partitions = 1,
581 .no_invd_sharing = true,
582 };
583
584 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
585 static CPUCacheInfo legacy_l1i_cache_amd = {
586 .type = INSTRUCTION_CACHE,
587 .level = 1,
588 .size = 64 * KiB,
589 .self_init = 1,
590 .line_size = 64,
591 .associativity = 2,
592 .sets = 512,
593 .partitions = 1,
594 .lines_per_tag = 1,
595 .no_invd_sharing = true,
596 };
597
598 /* Level 2 unified cache: */
599 static CPUCacheInfo legacy_l2_cache = {
600 .type = UNIFIED_CACHE,
601 .level = 2,
602 .size = 4 * MiB,
603 .self_init = 1,
604 .line_size = 64,
605 .associativity = 16,
606 .sets = 4096,
607 .partitions = 1,
608 .no_invd_sharing = true,
609 };
610
611 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
612 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
613 .type = UNIFIED_CACHE,
614 .level = 2,
615 .size = 2 * MiB,
616 .line_size = 64,
617 .associativity = 8,
618 };
619
620
621 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
622 static CPUCacheInfo legacy_l2_cache_amd = {
623 .type = UNIFIED_CACHE,
624 .level = 2,
625 .size = 512 * KiB,
626 .line_size = 64,
627 .lines_per_tag = 1,
628 .associativity = 16,
629 .sets = 512,
630 .partitions = 1,
631 };
632
633 /* Level 3 unified cache: */
634 static CPUCacheInfo legacy_l3_cache = {
635 .type = UNIFIED_CACHE,
636 .level = 3,
637 .size = 16 * MiB,
638 .line_size = 64,
639 .associativity = 16,
640 .sets = 16384,
641 .partitions = 1,
642 .lines_per_tag = 1,
643 .self_init = true,
644 .inclusive = true,
645 .complex_indexing = true,
646 };
647
648 /* TLB definitions: */
649
650 #define L1_DTLB_2M_ASSOC 1
651 #define L1_DTLB_2M_ENTRIES 255
652 #define L1_DTLB_4K_ASSOC 1
653 #define L1_DTLB_4K_ENTRIES 255
654
655 #define L1_ITLB_2M_ASSOC 1
656 #define L1_ITLB_2M_ENTRIES 255
657 #define L1_ITLB_4K_ASSOC 1
658 #define L1_ITLB_4K_ENTRIES 255
659
660 #define L2_DTLB_2M_ASSOC 0 /* disabled */
661 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
662 #define L2_DTLB_4K_ASSOC 4
663 #define L2_DTLB_4K_ENTRIES 512
664
665 #define L2_ITLB_2M_ASSOC 0 /* disabled */
666 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
667 #define L2_ITLB_4K_ASSOC 4
668 #define L2_ITLB_4K_ENTRIES 512
669
670 /* CPUID Leaf 0x14 constants: */
671 #define INTEL_PT_MAX_SUBLEAF 0x1
672 /*
673 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
674 * MSR can be accessed;
675 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
676 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
677 * of Intel PT MSRs across warm reset;
678 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
679 */
680 #define INTEL_PT_MINIMAL_EBX 0xf
681 /*
682 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
683 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
684 * accessed;
685 * bit[01]: ToPA tables can hold any number of output entries, up to the
686 * maximum allowed by the MaskOrTableOffset field of
687 * IA32_RTIT_OUTPUT_MASK_PTRS;
688 * bit[02]: Support Single-Range Output scheme;
689 */
690 #define INTEL_PT_MINIMAL_ECX 0x7
691 /* generated packets which contain IP payloads have LIP values */
692 #define INTEL_PT_IP_LIP (1 << 31)
693 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
694 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
695 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
696 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
697 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
698
699 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
700 uint32_t vendor2, uint32_t vendor3)
701 {
702 int i;
703 for (i = 0; i < 4; i++) {
704 dst[i] = vendor1 >> (8 * i);
705 dst[i + 4] = vendor2 >> (8 * i);
706 dst[i + 8] = vendor3 >> (8 * i);
707 }
708 dst[CPUID_VENDOR_SZ] = '\0';
709 }
710
711 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
712 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
713 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
714 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
715 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
716 CPUID_PSE36 | CPUID_FXSR)
717 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
718 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
719 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
720 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
721 CPUID_PAE | CPUID_SEP | CPUID_APIC)
722
723 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
724 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
725 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
726 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
727 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
728 /* partly implemented:
729 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
730 /* missing:
731 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
732 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
733 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
734 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
735 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
736 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR | \
737 CPUID_EXT_RDRAND)
738 /* missing:
739 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
740 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
741 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
742 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
743 CPUID_EXT_F16C */
744
745 #ifdef TARGET_X86_64
746 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
747 #else
748 #define TCG_EXT2_X86_64_FEATURES 0
749 #endif
750
751 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
752 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
753 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
754 TCG_EXT2_X86_64_FEATURES)
755 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
756 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
757 #define TCG_EXT4_FEATURES 0
758 #define TCG_SVM_FEATURES CPUID_SVM_NPT
759 #define TCG_KVM_FEATURES 0
760 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
761 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
762 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
763 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
764 CPUID_7_0_EBX_ERMS)
765 /* missing:
766 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
767 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
768 CPUID_7_0_EBX_RDSEED */
769 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \
770 /* CPUID_7_0_ECX_OSPKE is dynamic */ \
771 CPUID_7_0_ECX_LA57)
772 #define TCG_7_0_EDX_FEATURES 0
773 #define TCG_APM_FEATURES 0
774 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
775 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
776 /* missing:
777 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
778
779 typedef enum FeatureWordType {
780 CPUID_FEATURE_WORD,
781 MSR_FEATURE_WORD,
782 } FeatureWordType;
783
784 typedef struct FeatureWordInfo {
785 FeatureWordType type;
786 /* feature flags names are taken from "Intel Processor Identification and
787 * the CPUID Instruction" and AMD's "CPUID Specification".
788 * In cases of disagreement between feature naming conventions,
789 * aliases may be added.
790 */
791 const char *feat_names[32];
792 union {
793 /* If type==CPUID_FEATURE_WORD */
794 struct {
795 uint32_t eax; /* Input EAX for CPUID */
796 bool needs_ecx; /* CPUID instruction uses ECX as input */
797 uint32_t ecx; /* Input ECX value for CPUID */
798 int reg; /* output register (R_* constant) */
799 } cpuid;
800 /* If type==MSR_FEATURE_WORD */
801 struct {
802 uint32_t index;
803 struct { /*CPUID that enumerate this MSR*/
804 FeatureWord cpuid_class;
805 uint32_t cpuid_flag;
806 } cpuid_dep;
807 } msr;
808 };
809 uint32_t tcg_features; /* Feature flags supported by TCG */
810 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
811 uint32_t migratable_flags; /* Feature flags known to be migratable */
812 /* Features that shouldn't be auto-enabled by "-cpu host" */
813 uint32_t no_autoenable_flags;
814 } FeatureWordInfo;
815
816 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
817 [FEAT_1_EDX] = {
818 .type = CPUID_FEATURE_WORD,
819 .feat_names = {
820 "fpu", "vme", "de", "pse",
821 "tsc", "msr", "pae", "mce",
822 "cx8", "apic", NULL, "sep",
823 "mtrr", "pge", "mca", "cmov",
824 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
825 NULL, "ds" /* Intel dts */, "acpi", "mmx",
826 "fxsr", "sse", "sse2", "ss",
827 "ht" /* Intel htt */, "tm", "ia64", "pbe",
828 },
829 .cpuid = {.eax = 1, .reg = R_EDX, },
830 .tcg_features = TCG_FEATURES,
831 },
832 [FEAT_1_ECX] = {
833 .type = CPUID_FEATURE_WORD,
834 .feat_names = {
835 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
836 "ds-cpl", "vmx", "smx", "est",
837 "tm2", "ssse3", "cid", NULL,
838 "fma", "cx16", "xtpr", "pdcm",
839 NULL, "pcid", "dca", "sse4.1",
840 "sse4.2", "x2apic", "movbe", "popcnt",
841 "tsc-deadline", "aes", "xsave", NULL /* osxsave */,
842 "avx", "f16c", "rdrand", "hypervisor",
843 },
844 .cpuid = { .eax = 1, .reg = R_ECX, },
845 .tcg_features = TCG_EXT_FEATURES,
846 },
847 /* Feature names that are already defined on feature_name[] but
848 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
849 * names on feat_names below. They are copied automatically
850 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
851 */
852 [FEAT_8000_0001_EDX] = {
853 .type = CPUID_FEATURE_WORD,
854 .feat_names = {
855 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
856 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
857 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
858 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
859 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
860 "nx", NULL, "mmxext", NULL /* mmx */,
861 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
862 NULL, "lm", "3dnowext", "3dnow",
863 },
864 .cpuid = { .eax = 0x80000001, .reg = R_EDX, },
865 .tcg_features = TCG_EXT2_FEATURES,
866 },
867 [FEAT_8000_0001_ECX] = {
868 .type = CPUID_FEATURE_WORD,
869 .feat_names = {
870 "lahf-lm", "cmp-legacy", "svm", "extapic",
871 "cr8legacy", "abm", "sse4a", "misalignsse",
872 "3dnowprefetch", "osvw", "ibs", "xop",
873 "skinit", "wdt", NULL, "lwp",
874 "fma4", "tce", NULL, "nodeid-msr",
875 NULL, "tbm", "topoext", "perfctr-core",
876 "perfctr-nb", NULL, NULL, NULL,
877 NULL, NULL, NULL, NULL,
878 },
879 .cpuid = { .eax = 0x80000001, .reg = R_ECX, },
880 .tcg_features = TCG_EXT3_FEATURES,
881 /*
882 * TOPOEXT is always allowed but can't be enabled blindly by
883 * "-cpu host", as it requires consistent cache topology info
884 * to be provided so it doesn't confuse guests.
885 */
886 .no_autoenable_flags = CPUID_EXT3_TOPOEXT,
887 },
888 [FEAT_C000_0001_EDX] = {
889 .type = CPUID_FEATURE_WORD,
890 .feat_names = {
891 NULL, NULL, "xstore", "xstore-en",
892 NULL, NULL, "xcrypt", "xcrypt-en",
893 "ace2", "ace2-en", "phe", "phe-en",
894 "pmm", "pmm-en", NULL, NULL,
895 NULL, NULL, NULL, NULL,
896 NULL, NULL, NULL, NULL,
897 NULL, NULL, NULL, NULL,
898 NULL, NULL, NULL, NULL,
899 },
900 .cpuid = { .eax = 0xC0000001, .reg = R_EDX, },
901 .tcg_features = TCG_EXT4_FEATURES,
902 },
903 [FEAT_KVM] = {
904 .type = CPUID_FEATURE_WORD,
905 .feat_names = {
906 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
907 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
908 NULL, "kvm-pv-tlb-flush", NULL, "kvm-pv-ipi",
909 NULL, NULL, NULL, NULL,
910 NULL, NULL, NULL, NULL,
911 NULL, NULL, NULL, NULL,
912 "kvmclock-stable-bit", NULL, NULL, NULL,
913 NULL, NULL, NULL, NULL,
914 },
915 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EAX, },
916 .tcg_features = TCG_KVM_FEATURES,
917 },
918 [FEAT_KVM_HINTS] = {
919 .type = CPUID_FEATURE_WORD,
920 .feat_names = {
921 "kvm-hint-dedicated", NULL, NULL, NULL,
922 NULL, NULL, NULL, NULL,
923 NULL, NULL, NULL, NULL,
924 NULL, NULL, NULL, NULL,
925 NULL, NULL, NULL, NULL,
926 NULL, NULL, NULL, NULL,
927 NULL, NULL, NULL, NULL,
928 NULL, NULL, NULL, NULL,
929 },
930 .cpuid = { .eax = KVM_CPUID_FEATURES, .reg = R_EDX, },
931 .tcg_features = TCG_KVM_FEATURES,
932 /*
933 * KVM hints aren't auto-enabled by -cpu host, they need to be
934 * explicitly enabled in the command-line.
935 */
936 .no_autoenable_flags = ~0U,
937 },
938 /*
939 * .feat_names are commented out for Hyper-V enlightenments because we
940 * don't want to have two different ways for enabling them on QEMU command
941 * line. Some features (e.g. "hyperv_time", "hyperv_vapic", ...) require
942 * enabling several feature bits simultaneously, exposing these bits
943 * individually may just confuse guests.
944 */
945 [FEAT_HYPERV_EAX] = {
946 .type = CPUID_FEATURE_WORD,
947 .feat_names = {
948 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
949 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
950 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
951 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
952 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
953 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
954 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
955 NULL, NULL,
956 NULL, NULL, NULL, NULL,
957 NULL, NULL, NULL, NULL,
958 NULL, NULL, NULL, NULL,
959 NULL, NULL, NULL, NULL,
960 },
961 .cpuid = { .eax = 0x40000003, .reg = R_EAX, },
962 },
963 [FEAT_HYPERV_EBX] = {
964 .type = CPUID_FEATURE_WORD,
965 .feat_names = {
966 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
967 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
968 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
969 NULL /* hv_create_port */, NULL /* hv_connect_port */,
970 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
971 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
972 NULL, NULL,
973 NULL, NULL, NULL, NULL,
974 NULL, NULL, NULL, NULL,
975 NULL, NULL, NULL, NULL,
976 NULL, NULL, NULL, NULL,
977 },
978 .cpuid = { .eax = 0x40000003, .reg = R_EBX, },
979 },
980 [FEAT_HYPERV_EDX] = {
981 .type = CPUID_FEATURE_WORD,
982 .feat_names = {
983 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
984 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
985 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
986 NULL, NULL,
987 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
988 NULL, NULL, NULL, NULL,
989 NULL, NULL, NULL, NULL,
990 NULL, NULL, NULL, NULL,
991 NULL, NULL, NULL, NULL,
992 NULL, NULL, NULL, NULL,
993 },
994 .cpuid = { .eax = 0x40000003, .reg = R_EDX, },
995 },
996 [FEAT_HV_RECOMM_EAX] = {
997 .type = CPUID_FEATURE_WORD,
998 .feat_names = {
999 NULL /* hv_recommend_pv_as_switch */,
1000 NULL /* hv_recommend_pv_tlbflush_local */,
1001 NULL /* hv_recommend_pv_tlbflush_remote */,
1002 NULL /* hv_recommend_msr_apic_access */,
1003 NULL /* hv_recommend_msr_reset */,
1004 NULL /* hv_recommend_relaxed_timing */,
1005 NULL /* hv_recommend_dma_remapping */,
1006 NULL /* hv_recommend_int_remapping */,
1007 NULL /* hv_recommend_x2apic_msrs */,
1008 NULL /* hv_recommend_autoeoi_deprecation */,
1009 NULL /* hv_recommend_pv_ipi */,
1010 NULL /* hv_recommend_ex_hypercalls */,
1011 NULL /* hv_hypervisor_is_nested */,
1012 NULL /* hv_recommend_int_mbec */,
1013 NULL /* hv_recommend_evmcs */,
1014 NULL,
1015 NULL, NULL, NULL, NULL,
1016 NULL, NULL, NULL, NULL,
1017 NULL, NULL, NULL, NULL,
1018 NULL, NULL, NULL, NULL,
1019 },
1020 .cpuid = { .eax = 0x40000004, .reg = R_EAX, },
1021 },
1022 [FEAT_HV_NESTED_EAX] = {
1023 .type = CPUID_FEATURE_WORD,
1024 .cpuid = { .eax = 0x4000000A, .reg = R_EAX, },
1025 },
1026 [FEAT_SVM] = {
1027 .type = CPUID_FEATURE_WORD,
1028 .feat_names = {
1029 "npt", "lbrv", "svm-lock", "nrip-save",
1030 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
1031 NULL, NULL, "pause-filter", NULL,
1032 "pfthreshold", NULL, NULL, NULL,
1033 NULL, NULL, NULL, NULL,
1034 NULL, NULL, NULL, NULL,
1035 NULL, NULL, NULL, NULL,
1036 NULL, NULL, NULL, NULL,
1037 },
1038 .cpuid = { .eax = 0x8000000A, .reg = R_EDX, },
1039 .tcg_features = TCG_SVM_FEATURES,
1040 },
1041 [FEAT_7_0_EBX] = {
1042 .type = CPUID_FEATURE_WORD,
1043 .feat_names = {
1044 "fsgsbase", "tsc-adjust", NULL, "bmi1",
1045 "hle", "avx2", NULL, "smep",
1046 "bmi2", "erms", "invpcid", "rtm",
1047 NULL, NULL, "mpx", NULL,
1048 "avx512f", "avx512dq", "rdseed", "adx",
1049 "smap", "avx512ifma", "pcommit", "clflushopt",
1050 "clwb", "intel-pt", "avx512pf", "avx512er",
1051 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
1052 },
1053 .cpuid = {
1054 .eax = 7,
1055 .needs_ecx = true, .ecx = 0,
1056 .reg = R_EBX,
1057 },
1058 .tcg_features = TCG_7_0_EBX_FEATURES,
1059 },
1060 [FEAT_7_0_ECX] = {
1061 .type = CPUID_FEATURE_WORD,
1062 .feat_names = {
1063 NULL, "avx512vbmi", "umip", "pku",
1064 NULL /* ospke */, NULL, "avx512vbmi2", NULL,
1065 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
1066 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
1067 "la57", NULL, NULL, NULL,
1068 NULL, NULL, "rdpid", NULL,
1069 NULL, "cldemote", NULL, "movdiri",
1070 "movdir64b", NULL, NULL, NULL,
1071 },
1072 .cpuid = {
1073 .eax = 7,
1074 .needs_ecx = true, .ecx = 0,
1075 .reg = R_ECX,
1076 },
1077 .tcg_features = TCG_7_0_ECX_FEATURES,
1078 },
1079 [FEAT_7_0_EDX] = {
1080 .type = CPUID_FEATURE_WORD,
1081 .feat_names = {
1082 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
1083 NULL, NULL, NULL, NULL,
1084 NULL, NULL, "md-clear", NULL,
1085 NULL, NULL, NULL, NULL,
1086 NULL, NULL, NULL, NULL,
1087 NULL, NULL, NULL, NULL,
1088 NULL, NULL, "spec-ctrl", "stibp",
1089 NULL, "arch-capabilities", "core-capability", "ssbd",
1090 },
1091 .cpuid = {
1092 .eax = 7,
1093 .needs_ecx = true, .ecx = 0,
1094 .reg = R_EDX,
1095 },
1096 .tcg_features = TCG_7_0_EDX_FEATURES,
1097 },
1098 [FEAT_8000_0007_EDX] = {
1099 .type = CPUID_FEATURE_WORD,
1100 .feat_names = {
1101 NULL, NULL, NULL, NULL,
1102 NULL, NULL, NULL, NULL,
1103 "invtsc", NULL, NULL, NULL,
1104 NULL, NULL, NULL, NULL,
1105 NULL, NULL, NULL, NULL,
1106 NULL, NULL, NULL, NULL,
1107 NULL, NULL, NULL, NULL,
1108 NULL, NULL, NULL, NULL,
1109 },
1110 .cpuid = { .eax = 0x80000007, .reg = R_EDX, },
1111 .tcg_features = TCG_APM_FEATURES,
1112 .unmigratable_flags = CPUID_APM_INVTSC,
1113 },
1114 [FEAT_8000_0008_EBX] = {
1115 .type = CPUID_FEATURE_WORD,
1116 .feat_names = {
1117 NULL, NULL, NULL, NULL,
1118 NULL, NULL, NULL, NULL,
1119 NULL, "wbnoinvd", NULL, NULL,
1120 "ibpb", NULL, NULL, NULL,
1121 NULL, NULL, NULL, NULL,
1122 NULL, NULL, NULL, NULL,
1123 "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL,
1124 NULL, NULL, NULL, NULL,
1125 },
1126 .cpuid = { .eax = 0x80000008, .reg = R_EBX, },
1127 .tcg_features = 0,
1128 .unmigratable_flags = 0,
1129 },
1130 [FEAT_XSAVE] = {
1131 .type = CPUID_FEATURE_WORD,
1132 .feat_names = {
1133 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
1134 NULL, NULL, NULL, NULL,
1135 NULL, NULL, NULL, NULL,
1136 NULL, NULL, NULL, NULL,
1137 NULL, NULL, NULL, NULL,
1138 NULL, NULL, NULL, NULL,
1139 NULL, NULL, NULL, NULL,
1140 NULL, NULL, NULL, NULL,
1141 },
1142 .cpuid = {
1143 .eax = 0xd,
1144 .needs_ecx = true, .ecx = 1,
1145 .reg = R_EAX,
1146 },
1147 .tcg_features = TCG_XSAVE_FEATURES,
1148 },
1149 [FEAT_6_EAX] = {
1150 .type = CPUID_FEATURE_WORD,
1151 .feat_names = {
1152 NULL, NULL, "arat", NULL,
1153 NULL, NULL, NULL, NULL,
1154 NULL, NULL, NULL, NULL,
1155 NULL, NULL, NULL, NULL,
1156 NULL, NULL, NULL, NULL,
1157 NULL, NULL, NULL, NULL,
1158 NULL, NULL, NULL, NULL,
1159 NULL, NULL, NULL, NULL,
1160 },
1161 .cpuid = { .eax = 6, .reg = R_EAX, },
1162 .tcg_features = TCG_6_EAX_FEATURES,
1163 },
1164 [FEAT_XSAVE_COMP_LO] = {
1165 .type = CPUID_FEATURE_WORD,
1166 .cpuid = {
1167 .eax = 0xD,
1168 .needs_ecx = true, .ecx = 0,
1169 .reg = R_EAX,
1170 },
1171 .tcg_features = ~0U,
1172 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
1173 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
1174 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
1175 XSTATE_PKRU_MASK,
1176 },
1177 [FEAT_XSAVE_COMP_HI] = {
1178 .type = CPUID_FEATURE_WORD,
1179 .cpuid = {
1180 .eax = 0xD,
1181 .needs_ecx = true, .ecx = 0,
1182 .reg = R_EDX,
1183 },
1184 .tcg_features = ~0U,
1185 },
1186 /*Below are MSR exposed features*/
1187 [FEAT_ARCH_CAPABILITIES] = {
1188 .type = MSR_FEATURE_WORD,
1189 .feat_names = {
1190 "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry",
1191 "ssb-no", "mds-no", NULL, NULL,
1192 NULL, NULL, NULL, NULL,
1193 NULL, NULL, NULL, NULL,
1194 NULL, NULL, NULL, NULL,
1195 NULL, NULL, NULL, NULL,
1196 NULL, NULL, NULL, NULL,
1197 NULL, NULL, NULL, NULL,
1198 },
1199 .msr = {
1200 .index = MSR_IA32_ARCH_CAPABILITIES,
1201 .cpuid_dep = {
1202 FEAT_7_0_EDX,
1203 CPUID_7_0_EDX_ARCH_CAPABILITIES
1204 }
1205 },
1206 },
1207 [FEAT_CORE_CAPABILITY] = {
1208 .type = MSR_FEATURE_WORD,
1209 .feat_names = {
1210 NULL, NULL, NULL, NULL,
1211 NULL, "split-lock-detect", NULL, NULL,
1212 NULL, NULL, NULL, NULL,
1213 NULL, NULL, NULL, NULL,
1214 NULL, NULL, NULL, NULL,
1215 NULL, NULL, NULL, NULL,
1216 NULL, NULL, NULL, NULL,
1217 NULL, NULL, NULL, NULL,
1218 },
1219 .msr = {
1220 .index = MSR_IA32_CORE_CAPABILITY,
1221 .cpuid_dep = {
1222 FEAT_7_0_EDX,
1223 CPUID_7_0_EDX_CORE_CAPABILITY,
1224 },
1225 },
1226 },
1227 };
1228
1229 typedef struct X86RegisterInfo32 {
1230 /* Name of register */
1231 const char *name;
1232 /* QAPI enum value register */
1233 X86CPURegister32 qapi_enum;
1234 } X86RegisterInfo32;
1235
1236 #define REGISTER(reg) \
1237 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
1238 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
1239 REGISTER(EAX),
1240 REGISTER(ECX),
1241 REGISTER(EDX),
1242 REGISTER(EBX),
1243 REGISTER(ESP),
1244 REGISTER(EBP),
1245 REGISTER(ESI),
1246 REGISTER(EDI),
1247 };
1248 #undef REGISTER
1249
1250 typedef struct ExtSaveArea {
1251 uint32_t feature, bits;
1252 uint32_t offset, size;
1253 } ExtSaveArea;
1254
1255 static const ExtSaveArea x86_ext_save_areas[] = {
1256 [XSTATE_FP_BIT] = {
1257 /* x87 FP state component is always enabled if XSAVE is supported */
1258 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1259 /* x87 state is in the legacy region of the XSAVE area */
1260 .offset = 0,
1261 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1262 },
1263 [XSTATE_SSE_BIT] = {
1264 /* SSE state component is always enabled if XSAVE is supported */
1265 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
1266 /* SSE state is in the legacy region of the XSAVE area */
1267 .offset = 0,
1268 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
1269 },
1270 [XSTATE_YMM_BIT] =
1271 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
1272 .offset = offsetof(X86XSaveArea, avx_state),
1273 .size = sizeof(XSaveAVX) },
1274 [XSTATE_BNDREGS_BIT] =
1275 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1276 .offset = offsetof(X86XSaveArea, bndreg_state),
1277 .size = sizeof(XSaveBNDREG) },
1278 [XSTATE_BNDCSR_BIT] =
1279 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
1280 .offset = offsetof(X86XSaveArea, bndcsr_state),
1281 .size = sizeof(XSaveBNDCSR) },
1282 [XSTATE_OPMASK_BIT] =
1283 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1284 .offset = offsetof(X86XSaveArea, opmask_state),
1285 .size = sizeof(XSaveOpmask) },
1286 [XSTATE_ZMM_Hi256_BIT] =
1287 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1288 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
1289 .size = sizeof(XSaveZMM_Hi256) },
1290 [XSTATE_Hi16_ZMM_BIT] =
1291 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
1292 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
1293 .size = sizeof(XSaveHi16_ZMM) },
1294 [XSTATE_PKRU_BIT] =
1295 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
1296 .offset = offsetof(X86XSaveArea, pkru_state),
1297 .size = sizeof(XSavePKRU) },
1298 };
1299
1300 static uint32_t xsave_area_size(uint64_t mask)
1301 {
1302 int i;
1303 uint64_t ret = 0;
1304
1305 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
1306 const ExtSaveArea *esa = &x86_ext_save_areas[i];
1307 if ((mask >> i) & 1) {
1308 ret = MAX(ret, esa->offset + esa->size);
1309 }
1310 }
1311 return ret;
1312 }
1313
1314 static inline bool accel_uses_host_cpuid(void)
1315 {
1316 return kvm_enabled() || hvf_enabled();
1317 }
1318
1319 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
1320 {
1321 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
1322 cpu->env.features[FEAT_XSAVE_COMP_LO];
1323 }
1324
1325 const char *get_register_name_32(unsigned int reg)
1326 {
1327 if (reg >= CPU_NB_REGS32) {
1328 return NULL;
1329 }
1330 return x86_reg_info_32[reg].name;
1331 }
1332
1333 /*
1334 * Returns the set of feature flags that are supported and migratable by
1335 * QEMU, for a given FeatureWord.
1336 */
1337 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1338 {
1339 FeatureWordInfo *wi = &feature_word_info[w];
1340 uint32_t r = 0;
1341 int i;
1342
1343 for (i = 0; i < 32; i++) {
1344 uint32_t f = 1U << i;
1345
1346 /* If the feature name is known, it is implicitly considered migratable,
1347 * unless it is explicitly set in unmigratable_flags */
1348 if ((wi->migratable_flags & f) ||
1349 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1350 r |= f;
1351 }
1352 }
1353 return r;
1354 }
1355
1356 void host_cpuid(uint32_t function, uint32_t count,
1357 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1358 {
1359 uint32_t vec[4];
1360
1361 #ifdef __x86_64__
1362 asm volatile("cpuid"
1363 : "=a"(vec[0]), "=b"(vec[1]),
1364 "=c"(vec[2]), "=d"(vec[3])
1365 : "0"(function), "c"(count) : "cc");
1366 #elif defined(__i386__)
1367 asm volatile("pusha \n\t"
1368 "cpuid \n\t"
1369 "mov %%eax, 0(%2) \n\t"
1370 "mov %%ebx, 4(%2) \n\t"
1371 "mov %%ecx, 8(%2) \n\t"
1372 "mov %%edx, 12(%2) \n\t"
1373 "popa"
1374 : : "a"(function), "c"(count), "S"(vec)
1375 : "memory", "cc");
1376 #else
1377 abort();
1378 #endif
1379
1380 if (eax)
1381 *eax = vec[0];
1382 if (ebx)
1383 *ebx = vec[1];
1384 if (ecx)
1385 *ecx = vec[2];
1386 if (edx)
1387 *edx = vec[3];
1388 }
1389
1390 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1391 {
1392 uint32_t eax, ebx, ecx, edx;
1393
1394 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1395 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1396
1397 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1398 if (family) {
1399 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1400 }
1401 if (model) {
1402 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1403 }
1404 if (stepping) {
1405 *stepping = eax & 0x0F;
1406 }
1407 }
1408
1409 /* CPU class name definitions: */
1410
1411 /* Return type name for a given CPU model name
1412 * Caller is responsible for freeing the returned string.
1413 */
1414 static char *x86_cpu_type_name(const char *model_name)
1415 {
1416 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1417 }
1418
1419 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1420 {
1421 ObjectClass *oc;
1422 char *typename = x86_cpu_type_name(cpu_model);
1423 oc = object_class_by_name(typename);
1424 g_free(typename);
1425 return oc;
1426 }
1427
1428 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1429 {
1430 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1431 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1432 return g_strndup(class_name,
1433 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1434 }
1435
1436 struct X86CPUDefinition {
1437 const char *name;
1438 uint32_t level;
1439 uint32_t xlevel;
1440 /* vendor is zero-terminated, 12 character ASCII string */
1441 char vendor[CPUID_VENDOR_SZ + 1];
1442 int family;
1443 int model;
1444 int stepping;
1445 FeatureWordArray features;
1446 const char *model_id;
1447 CPUCaches *cache_info;
1448 };
1449
1450 static CPUCaches epyc_cache_info = {
1451 .l1d_cache = &(CPUCacheInfo) {
1452 .type = DATA_CACHE,
1453 .level = 1,
1454 .size = 32 * KiB,
1455 .line_size = 64,
1456 .associativity = 8,
1457 .partitions = 1,
1458 .sets = 64,
1459 .lines_per_tag = 1,
1460 .self_init = 1,
1461 .no_invd_sharing = true,
1462 },
1463 .l1i_cache = &(CPUCacheInfo) {
1464 .type = INSTRUCTION_CACHE,
1465 .level = 1,
1466 .size = 64 * KiB,
1467 .line_size = 64,
1468 .associativity = 4,
1469 .partitions = 1,
1470 .sets = 256,
1471 .lines_per_tag = 1,
1472 .self_init = 1,
1473 .no_invd_sharing = true,
1474 },
1475 .l2_cache = &(CPUCacheInfo) {
1476 .type = UNIFIED_CACHE,
1477 .level = 2,
1478 .size = 512 * KiB,
1479 .line_size = 64,
1480 .associativity = 8,
1481 .partitions = 1,
1482 .sets = 1024,
1483 .lines_per_tag = 1,
1484 },
1485 .l3_cache = &(CPUCacheInfo) {
1486 .type = UNIFIED_CACHE,
1487 .level = 3,
1488 .size = 8 * MiB,
1489 .line_size = 64,
1490 .associativity = 16,
1491 .partitions = 1,
1492 .sets = 8192,
1493 .lines_per_tag = 1,
1494 .self_init = true,
1495 .inclusive = true,
1496 .complex_indexing = true,
1497 },
1498 };
1499
1500 static X86CPUDefinition builtin_x86_defs[] = {
1501 {
1502 .name = "qemu64",
1503 .level = 0xd,
1504 .vendor = CPUID_VENDOR_AMD,
1505 .family = 6,
1506 .model = 6,
1507 .stepping = 3,
1508 .features[FEAT_1_EDX] =
1509 PPRO_FEATURES |
1510 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1511 CPUID_PSE36,
1512 .features[FEAT_1_ECX] =
1513 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1514 .features[FEAT_8000_0001_EDX] =
1515 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1516 .features[FEAT_8000_0001_ECX] =
1517 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1518 .xlevel = 0x8000000A,
1519 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1520 },
1521 {
1522 .name = "phenom",
1523 .level = 5,
1524 .vendor = CPUID_VENDOR_AMD,
1525 .family = 16,
1526 .model = 2,
1527 .stepping = 3,
1528 /* Missing: CPUID_HT */
1529 .features[FEAT_1_EDX] =
1530 PPRO_FEATURES |
1531 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1532 CPUID_PSE36 | CPUID_VME,
1533 .features[FEAT_1_ECX] =
1534 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1535 CPUID_EXT_POPCNT,
1536 .features[FEAT_8000_0001_EDX] =
1537 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1538 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1539 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1540 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1541 CPUID_EXT3_CR8LEG,
1542 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1543 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1544 .features[FEAT_8000_0001_ECX] =
1545 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1546 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1547 /* Missing: CPUID_SVM_LBRV */
1548 .features[FEAT_SVM] =
1549 CPUID_SVM_NPT,
1550 .xlevel = 0x8000001A,
1551 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1552 },
1553 {
1554 .name = "core2duo",
1555 .level = 10,
1556 .vendor = CPUID_VENDOR_INTEL,
1557 .family = 6,
1558 .model = 15,
1559 .stepping = 11,
1560 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1561 .features[FEAT_1_EDX] =
1562 PPRO_FEATURES |
1563 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1564 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1565 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1566 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1567 .features[FEAT_1_ECX] =
1568 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1569 CPUID_EXT_CX16,
1570 .features[FEAT_8000_0001_EDX] =
1571 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1572 .features[FEAT_8000_0001_ECX] =
1573 CPUID_EXT3_LAHF_LM,
1574 .xlevel = 0x80000008,
1575 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1576 },
1577 {
1578 .name = "kvm64",
1579 .level = 0xd,
1580 .vendor = CPUID_VENDOR_INTEL,
1581 .family = 15,
1582 .model = 6,
1583 .stepping = 1,
1584 /* Missing: CPUID_HT */
1585 .features[FEAT_1_EDX] =
1586 PPRO_FEATURES | CPUID_VME |
1587 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1588 CPUID_PSE36,
1589 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1590 .features[FEAT_1_ECX] =
1591 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1592 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1593 .features[FEAT_8000_0001_EDX] =
1594 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1595 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1596 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1597 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1598 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1599 .features[FEAT_8000_0001_ECX] =
1600 0,
1601 .xlevel = 0x80000008,
1602 .model_id = "Common KVM processor"
1603 },
1604 {
1605 .name = "qemu32",
1606 .level = 4,
1607 .vendor = CPUID_VENDOR_INTEL,
1608 .family = 6,
1609 .model = 6,
1610 .stepping = 3,
1611 .features[FEAT_1_EDX] =
1612 PPRO_FEATURES,
1613 .features[FEAT_1_ECX] =
1614 CPUID_EXT_SSE3,
1615 .xlevel = 0x80000004,
1616 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1617 },
1618 {
1619 .name = "kvm32",
1620 .level = 5,
1621 .vendor = CPUID_VENDOR_INTEL,
1622 .family = 15,
1623 .model = 6,
1624 .stepping = 1,
1625 .features[FEAT_1_EDX] =
1626 PPRO_FEATURES | CPUID_VME |
1627 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1628 .features[FEAT_1_ECX] =
1629 CPUID_EXT_SSE3,
1630 .features[FEAT_8000_0001_ECX] =
1631 0,
1632 .xlevel = 0x80000008,
1633 .model_id = "Common 32-bit KVM processor"
1634 },
1635 {
1636 .name = "coreduo",
1637 .level = 10,
1638 .vendor = CPUID_VENDOR_INTEL,
1639 .family = 6,
1640 .model = 14,
1641 .stepping = 8,
1642 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1643 .features[FEAT_1_EDX] =
1644 PPRO_FEATURES | CPUID_VME |
1645 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1646 CPUID_SS,
1647 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1648 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1649 .features[FEAT_1_ECX] =
1650 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1651 .features[FEAT_8000_0001_EDX] =
1652 CPUID_EXT2_NX,
1653 .xlevel = 0x80000008,
1654 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1655 },
1656 {
1657 .name = "486",
1658 .level = 1,
1659 .vendor = CPUID_VENDOR_INTEL,
1660 .family = 4,
1661 .model = 8,
1662 .stepping = 0,
1663 .features[FEAT_1_EDX] =
1664 I486_FEATURES,
1665 .xlevel = 0,
1666 .model_id = "",
1667 },
1668 {
1669 .name = "pentium",
1670 .level = 1,
1671 .vendor = CPUID_VENDOR_INTEL,
1672 .family = 5,
1673 .model = 4,
1674 .stepping = 3,
1675 .features[FEAT_1_EDX] =
1676 PENTIUM_FEATURES,
1677 .xlevel = 0,
1678 .model_id = "",
1679 },
1680 {
1681 .name = "pentium2",
1682 .level = 2,
1683 .vendor = CPUID_VENDOR_INTEL,
1684 .family = 6,
1685 .model = 5,
1686 .stepping = 2,
1687 .features[FEAT_1_EDX] =
1688 PENTIUM2_FEATURES,
1689 .xlevel = 0,
1690 .model_id = "",
1691 },
1692 {
1693 .name = "pentium3",
1694 .level = 3,
1695 .vendor = CPUID_VENDOR_INTEL,
1696 .family = 6,
1697 .model = 7,
1698 .stepping = 3,
1699 .features[FEAT_1_EDX] =
1700 PENTIUM3_FEATURES,
1701 .xlevel = 0,
1702 .model_id = "",
1703 },
1704 {
1705 .name = "athlon",
1706 .level = 2,
1707 .vendor = CPUID_VENDOR_AMD,
1708 .family = 6,
1709 .model = 2,
1710 .stepping = 3,
1711 .features[FEAT_1_EDX] =
1712 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1713 CPUID_MCA,
1714 .features[FEAT_8000_0001_EDX] =
1715 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1716 .xlevel = 0x80000008,
1717 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1718 },
1719 {
1720 .name = "n270",
1721 .level = 10,
1722 .vendor = CPUID_VENDOR_INTEL,
1723 .family = 6,
1724 .model = 28,
1725 .stepping = 2,
1726 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1727 .features[FEAT_1_EDX] =
1728 PPRO_FEATURES |
1729 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1730 CPUID_ACPI | CPUID_SS,
1731 /* Some CPUs got no CPUID_SEP */
1732 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1733 * CPUID_EXT_XTPR */
1734 .features[FEAT_1_ECX] =
1735 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1736 CPUID_EXT_MOVBE,
1737 .features[FEAT_8000_0001_EDX] =
1738 CPUID_EXT2_NX,
1739 .features[FEAT_8000_0001_ECX] =
1740 CPUID_EXT3_LAHF_LM,
1741 .xlevel = 0x80000008,
1742 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1743 },
1744 {
1745 .name = "Conroe",
1746 .level = 10,
1747 .vendor = CPUID_VENDOR_INTEL,
1748 .family = 6,
1749 .model = 15,
1750 .stepping = 3,
1751 .features[FEAT_1_EDX] =
1752 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1753 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1754 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1755 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1756 CPUID_DE | CPUID_FP87,
1757 .features[FEAT_1_ECX] =
1758 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1759 .features[FEAT_8000_0001_EDX] =
1760 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1761 .features[FEAT_8000_0001_ECX] =
1762 CPUID_EXT3_LAHF_LM,
1763 .xlevel = 0x80000008,
1764 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1765 },
1766 {
1767 .name = "Penryn",
1768 .level = 10,
1769 .vendor = CPUID_VENDOR_INTEL,
1770 .family = 6,
1771 .model = 23,
1772 .stepping = 3,
1773 .features[FEAT_1_EDX] =
1774 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1775 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1776 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1777 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1778 CPUID_DE | CPUID_FP87,
1779 .features[FEAT_1_ECX] =
1780 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1781 CPUID_EXT_SSE3,
1782 .features[FEAT_8000_0001_EDX] =
1783 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1784 .features[FEAT_8000_0001_ECX] =
1785 CPUID_EXT3_LAHF_LM,
1786 .xlevel = 0x80000008,
1787 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1788 },
1789 {
1790 .name = "Nehalem",
1791 .level = 11,
1792 .vendor = CPUID_VENDOR_INTEL,
1793 .family = 6,
1794 .model = 26,
1795 .stepping = 3,
1796 .features[FEAT_1_EDX] =
1797 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1798 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1799 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1800 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1801 CPUID_DE | CPUID_FP87,
1802 .features[FEAT_1_ECX] =
1803 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1804 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1805 .features[FEAT_8000_0001_EDX] =
1806 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1807 .features[FEAT_8000_0001_ECX] =
1808 CPUID_EXT3_LAHF_LM,
1809 .xlevel = 0x80000008,
1810 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1811 },
1812 {
1813 .name = "Nehalem-IBRS",
1814 .level = 11,
1815 .vendor = CPUID_VENDOR_INTEL,
1816 .family = 6,
1817 .model = 26,
1818 .stepping = 3,
1819 .features[FEAT_1_EDX] =
1820 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1821 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1822 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1823 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1824 CPUID_DE | CPUID_FP87,
1825 .features[FEAT_1_ECX] =
1826 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1827 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1828 .features[FEAT_7_0_EDX] =
1829 CPUID_7_0_EDX_SPEC_CTRL,
1830 .features[FEAT_8000_0001_EDX] =
1831 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1832 .features[FEAT_8000_0001_ECX] =
1833 CPUID_EXT3_LAHF_LM,
1834 .xlevel = 0x80000008,
1835 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1836 },
1837 {
1838 .name = "Westmere",
1839 .level = 11,
1840 .vendor = CPUID_VENDOR_INTEL,
1841 .family = 6,
1842 .model = 44,
1843 .stepping = 1,
1844 .features[FEAT_1_EDX] =
1845 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1846 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1847 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1848 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1849 CPUID_DE | CPUID_FP87,
1850 .features[FEAT_1_ECX] =
1851 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1852 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1853 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1854 .features[FEAT_8000_0001_EDX] =
1855 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1856 .features[FEAT_8000_0001_ECX] =
1857 CPUID_EXT3_LAHF_LM,
1858 .features[FEAT_6_EAX] =
1859 CPUID_6_EAX_ARAT,
1860 .xlevel = 0x80000008,
1861 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1862 },
1863 {
1864 .name = "Westmere-IBRS",
1865 .level = 11,
1866 .vendor = CPUID_VENDOR_INTEL,
1867 .family = 6,
1868 .model = 44,
1869 .stepping = 1,
1870 .features[FEAT_1_EDX] =
1871 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1872 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1873 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1874 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1875 CPUID_DE | CPUID_FP87,
1876 .features[FEAT_1_ECX] =
1877 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1878 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1879 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1880 .features[FEAT_8000_0001_EDX] =
1881 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1882 .features[FEAT_8000_0001_ECX] =
1883 CPUID_EXT3_LAHF_LM,
1884 .features[FEAT_7_0_EDX] =
1885 CPUID_7_0_EDX_SPEC_CTRL,
1886 .features[FEAT_6_EAX] =
1887 CPUID_6_EAX_ARAT,
1888 .xlevel = 0x80000008,
1889 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1890 },
1891 {
1892 .name = "SandyBridge",
1893 .level = 0xd,
1894 .vendor = CPUID_VENDOR_INTEL,
1895 .family = 6,
1896 .model = 42,
1897 .stepping = 1,
1898 .features[FEAT_1_EDX] =
1899 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1900 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1901 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1902 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1903 CPUID_DE | CPUID_FP87,
1904 .features[FEAT_1_ECX] =
1905 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1906 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1907 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1908 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1909 CPUID_EXT_SSE3,
1910 .features[FEAT_8000_0001_EDX] =
1911 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1912 CPUID_EXT2_SYSCALL,
1913 .features[FEAT_8000_0001_ECX] =
1914 CPUID_EXT3_LAHF_LM,
1915 .features[FEAT_XSAVE] =
1916 CPUID_XSAVE_XSAVEOPT,
1917 .features[FEAT_6_EAX] =
1918 CPUID_6_EAX_ARAT,
1919 .xlevel = 0x80000008,
1920 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1921 },
1922 {
1923 .name = "SandyBridge-IBRS",
1924 .level = 0xd,
1925 .vendor = CPUID_VENDOR_INTEL,
1926 .family = 6,
1927 .model = 42,
1928 .stepping = 1,
1929 .features[FEAT_1_EDX] =
1930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1934 CPUID_DE | CPUID_FP87,
1935 .features[FEAT_1_ECX] =
1936 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1937 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1938 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1939 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1940 CPUID_EXT_SSE3,
1941 .features[FEAT_8000_0001_EDX] =
1942 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1943 CPUID_EXT2_SYSCALL,
1944 .features[FEAT_8000_0001_ECX] =
1945 CPUID_EXT3_LAHF_LM,
1946 .features[FEAT_7_0_EDX] =
1947 CPUID_7_0_EDX_SPEC_CTRL,
1948 .features[FEAT_XSAVE] =
1949 CPUID_XSAVE_XSAVEOPT,
1950 .features[FEAT_6_EAX] =
1951 CPUID_6_EAX_ARAT,
1952 .xlevel = 0x80000008,
1953 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1954 },
1955 {
1956 .name = "IvyBridge",
1957 .level = 0xd,
1958 .vendor = CPUID_VENDOR_INTEL,
1959 .family = 6,
1960 .model = 58,
1961 .stepping = 9,
1962 .features[FEAT_1_EDX] =
1963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1967 CPUID_DE | CPUID_FP87,
1968 .features[FEAT_1_ECX] =
1969 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1970 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1971 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1972 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1973 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1974 .features[FEAT_7_0_EBX] =
1975 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1976 CPUID_7_0_EBX_ERMS,
1977 .features[FEAT_8000_0001_EDX] =
1978 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1979 CPUID_EXT2_SYSCALL,
1980 .features[FEAT_8000_0001_ECX] =
1981 CPUID_EXT3_LAHF_LM,
1982 .features[FEAT_XSAVE] =
1983 CPUID_XSAVE_XSAVEOPT,
1984 .features[FEAT_6_EAX] =
1985 CPUID_6_EAX_ARAT,
1986 .xlevel = 0x80000008,
1987 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1988 },
1989 {
1990 .name = "IvyBridge-IBRS",
1991 .level = 0xd,
1992 .vendor = CPUID_VENDOR_INTEL,
1993 .family = 6,
1994 .model = 58,
1995 .stepping = 9,
1996 .features[FEAT_1_EDX] =
1997 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1998 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1999 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2000 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2001 CPUID_DE | CPUID_FP87,
2002 .features[FEAT_1_ECX] =
2003 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2004 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
2005 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2006 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2007 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2008 .features[FEAT_7_0_EBX] =
2009 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
2010 CPUID_7_0_EBX_ERMS,
2011 .features[FEAT_8000_0001_EDX] =
2012 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2013 CPUID_EXT2_SYSCALL,
2014 .features[FEAT_8000_0001_ECX] =
2015 CPUID_EXT3_LAHF_LM,
2016 .features[FEAT_7_0_EDX] =
2017 CPUID_7_0_EDX_SPEC_CTRL,
2018 .features[FEAT_XSAVE] =
2019 CPUID_XSAVE_XSAVEOPT,
2020 .features[FEAT_6_EAX] =
2021 CPUID_6_EAX_ARAT,
2022 .xlevel = 0x80000008,
2023 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
2024 },
2025 {
2026 .name = "Haswell-noTSX",
2027 .level = 0xd,
2028 .vendor = CPUID_VENDOR_INTEL,
2029 .family = 6,
2030 .model = 60,
2031 .stepping = 1,
2032 .features[FEAT_1_EDX] =
2033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2037 CPUID_DE | CPUID_FP87,
2038 .features[FEAT_1_ECX] =
2039 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2040 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2041 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2042 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2043 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2044 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2045 .features[FEAT_8000_0001_EDX] =
2046 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2047 CPUID_EXT2_SYSCALL,
2048 .features[FEAT_8000_0001_ECX] =
2049 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2050 .features[FEAT_7_0_EBX] =
2051 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2052 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2053 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2054 .features[FEAT_XSAVE] =
2055 CPUID_XSAVE_XSAVEOPT,
2056 .features[FEAT_6_EAX] =
2057 CPUID_6_EAX_ARAT,
2058 .xlevel = 0x80000008,
2059 .model_id = "Intel Core Processor (Haswell, no TSX)",
2060 },
2061 {
2062 .name = "Haswell-noTSX-IBRS",
2063 .level = 0xd,
2064 .vendor = CPUID_VENDOR_INTEL,
2065 .family = 6,
2066 .model = 60,
2067 .stepping = 1,
2068 .features[FEAT_1_EDX] =
2069 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2070 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2071 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2072 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2073 CPUID_DE | CPUID_FP87,
2074 .features[FEAT_1_ECX] =
2075 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2076 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2077 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2078 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2079 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2080 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2081 .features[FEAT_8000_0001_EDX] =
2082 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2083 CPUID_EXT2_SYSCALL,
2084 .features[FEAT_8000_0001_ECX] =
2085 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2086 .features[FEAT_7_0_EDX] =
2087 CPUID_7_0_EDX_SPEC_CTRL,
2088 .features[FEAT_7_0_EBX] =
2089 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2090 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2091 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
2092 .features[FEAT_XSAVE] =
2093 CPUID_XSAVE_XSAVEOPT,
2094 .features[FEAT_6_EAX] =
2095 CPUID_6_EAX_ARAT,
2096 .xlevel = 0x80000008,
2097 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
2098 },
2099 {
2100 .name = "Haswell",
2101 .level = 0xd,
2102 .vendor = CPUID_VENDOR_INTEL,
2103 .family = 6,
2104 .model = 60,
2105 .stepping = 4,
2106 .features[FEAT_1_EDX] =
2107 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2108 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2109 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2110 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2111 CPUID_DE | CPUID_FP87,
2112 .features[FEAT_1_ECX] =
2113 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2114 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2115 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2116 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2117 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2118 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2119 .features[FEAT_8000_0001_EDX] =
2120 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2121 CPUID_EXT2_SYSCALL,
2122 .features[FEAT_8000_0001_ECX] =
2123 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2124 .features[FEAT_7_0_EBX] =
2125 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2126 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2127 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2128 CPUID_7_0_EBX_RTM,
2129 .features[FEAT_XSAVE] =
2130 CPUID_XSAVE_XSAVEOPT,
2131 .features[FEAT_6_EAX] =
2132 CPUID_6_EAX_ARAT,
2133 .xlevel = 0x80000008,
2134 .model_id = "Intel Core Processor (Haswell)",
2135 },
2136 {
2137 .name = "Haswell-IBRS",
2138 .level = 0xd,
2139 .vendor = CPUID_VENDOR_INTEL,
2140 .family = 6,
2141 .model = 60,
2142 .stepping = 4,
2143 .features[FEAT_1_EDX] =
2144 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2145 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2146 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2147 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2148 CPUID_DE | CPUID_FP87,
2149 .features[FEAT_1_ECX] =
2150 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2151 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2152 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2153 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2154 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2155 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2156 .features[FEAT_8000_0001_EDX] =
2157 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2158 CPUID_EXT2_SYSCALL,
2159 .features[FEAT_8000_0001_ECX] =
2160 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
2161 .features[FEAT_7_0_EDX] =
2162 CPUID_7_0_EDX_SPEC_CTRL,
2163 .features[FEAT_7_0_EBX] =
2164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2165 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2167 CPUID_7_0_EBX_RTM,
2168 .features[FEAT_XSAVE] =
2169 CPUID_XSAVE_XSAVEOPT,
2170 .features[FEAT_6_EAX] =
2171 CPUID_6_EAX_ARAT,
2172 .xlevel = 0x80000008,
2173 .model_id = "Intel Core Processor (Haswell, IBRS)",
2174 },
2175 {
2176 .name = "Broadwell-noTSX",
2177 .level = 0xd,
2178 .vendor = CPUID_VENDOR_INTEL,
2179 .family = 6,
2180 .model = 61,
2181 .stepping = 2,
2182 .features[FEAT_1_EDX] =
2183 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2184 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2185 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2186 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2187 CPUID_DE | CPUID_FP87,
2188 .features[FEAT_1_ECX] =
2189 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2190 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2191 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2192 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2193 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2194 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2195 .features[FEAT_8000_0001_EDX] =
2196 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2197 CPUID_EXT2_SYSCALL,
2198 .features[FEAT_8000_0001_ECX] =
2199 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2200 .features[FEAT_7_0_EBX] =
2201 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2202 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2203 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2204 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2205 CPUID_7_0_EBX_SMAP,
2206 .features[FEAT_XSAVE] =
2207 CPUID_XSAVE_XSAVEOPT,
2208 .features[FEAT_6_EAX] =
2209 CPUID_6_EAX_ARAT,
2210 .xlevel = 0x80000008,
2211 .model_id = "Intel Core Processor (Broadwell, no TSX)",
2212 },
2213 {
2214 .name = "Broadwell-noTSX-IBRS",
2215 .level = 0xd,
2216 .vendor = CPUID_VENDOR_INTEL,
2217 .family = 6,
2218 .model = 61,
2219 .stepping = 2,
2220 .features[FEAT_1_EDX] =
2221 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2222 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2223 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2224 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2225 CPUID_DE | CPUID_FP87,
2226 .features[FEAT_1_ECX] =
2227 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2228 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2229 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2230 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2231 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2232 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2233 .features[FEAT_8000_0001_EDX] =
2234 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2235 CPUID_EXT2_SYSCALL,
2236 .features[FEAT_8000_0001_ECX] =
2237 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2238 .features[FEAT_7_0_EDX] =
2239 CPUID_7_0_EDX_SPEC_CTRL,
2240 .features[FEAT_7_0_EBX] =
2241 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2242 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2243 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2244 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2245 CPUID_7_0_EBX_SMAP,
2246 .features[FEAT_XSAVE] =
2247 CPUID_XSAVE_XSAVEOPT,
2248 .features[FEAT_6_EAX] =
2249 CPUID_6_EAX_ARAT,
2250 .xlevel = 0x80000008,
2251 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
2252 },
2253 {
2254 .name = "Broadwell",
2255 .level = 0xd,
2256 .vendor = CPUID_VENDOR_INTEL,
2257 .family = 6,
2258 .model = 61,
2259 .stepping = 2,
2260 .features[FEAT_1_EDX] =
2261 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2262 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2263 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2264 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2265 CPUID_DE | CPUID_FP87,
2266 .features[FEAT_1_ECX] =
2267 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2268 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2269 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2270 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2271 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2272 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2273 .features[FEAT_8000_0001_EDX] =
2274 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2275 CPUID_EXT2_SYSCALL,
2276 .features[FEAT_8000_0001_ECX] =
2277 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2278 .features[FEAT_7_0_EBX] =
2279 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2280 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2281 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2282 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2283 CPUID_7_0_EBX_SMAP,
2284 .features[FEAT_XSAVE] =
2285 CPUID_XSAVE_XSAVEOPT,
2286 .features[FEAT_6_EAX] =
2287 CPUID_6_EAX_ARAT,
2288 .xlevel = 0x80000008,
2289 .model_id = "Intel Core Processor (Broadwell)",
2290 },
2291 {
2292 .name = "Broadwell-IBRS",
2293 .level = 0xd,
2294 .vendor = CPUID_VENDOR_INTEL,
2295 .family = 6,
2296 .model = 61,
2297 .stepping = 2,
2298 .features[FEAT_1_EDX] =
2299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2303 CPUID_DE | CPUID_FP87,
2304 .features[FEAT_1_ECX] =
2305 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2306 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2307 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2308 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2309 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2310 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2311 .features[FEAT_8000_0001_EDX] =
2312 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2313 CPUID_EXT2_SYSCALL,
2314 .features[FEAT_8000_0001_ECX] =
2315 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2316 .features[FEAT_7_0_EDX] =
2317 CPUID_7_0_EDX_SPEC_CTRL,
2318 .features[FEAT_7_0_EBX] =
2319 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2320 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2321 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2322 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2323 CPUID_7_0_EBX_SMAP,
2324 .features[FEAT_XSAVE] =
2325 CPUID_XSAVE_XSAVEOPT,
2326 .features[FEAT_6_EAX] =
2327 CPUID_6_EAX_ARAT,
2328 .xlevel = 0x80000008,
2329 .model_id = "Intel Core Processor (Broadwell, IBRS)",
2330 },
2331 {
2332 .name = "Skylake-Client",
2333 .level = 0xd,
2334 .vendor = CPUID_VENDOR_INTEL,
2335 .family = 6,
2336 .model = 94,
2337 .stepping = 3,
2338 .features[FEAT_1_EDX] =
2339 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2340 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2341 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2342 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2343 CPUID_DE | CPUID_FP87,
2344 .features[FEAT_1_ECX] =
2345 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2346 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2347 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2348 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2349 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2350 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2351 .features[FEAT_8000_0001_EDX] =
2352 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2353 CPUID_EXT2_SYSCALL,
2354 .features[FEAT_8000_0001_ECX] =
2355 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2356 .features[FEAT_7_0_EBX] =
2357 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2358 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2359 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2360 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2361 CPUID_7_0_EBX_SMAP,
2362 /* Missing: XSAVES (not supported by some Linux versions,
2363 * including v4.1 to v4.12).
2364 * KVM doesn't yet expose any XSAVES state save component,
2365 * and the only one defined in Skylake (processor tracing)
2366 * probably will block migration anyway.
2367 */
2368 .features[FEAT_XSAVE] =
2369 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2370 CPUID_XSAVE_XGETBV1,
2371 .features[FEAT_6_EAX] =
2372 CPUID_6_EAX_ARAT,
2373 .xlevel = 0x80000008,
2374 .model_id = "Intel Core Processor (Skylake)",
2375 },
2376 {
2377 .name = "Skylake-Client-IBRS",
2378 .level = 0xd,
2379 .vendor = CPUID_VENDOR_INTEL,
2380 .family = 6,
2381 .model = 94,
2382 .stepping = 3,
2383 .features[FEAT_1_EDX] =
2384 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2385 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2386 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2387 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2388 CPUID_DE | CPUID_FP87,
2389 .features[FEAT_1_ECX] =
2390 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2391 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2392 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2393 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2394 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2395 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2396 .features[FEAT_8000_0001_EDX] =
2397 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2398 CPUID_EXT2_SYSCALL,
2399 .features[FEAT_8000_0001_ECX] =
2400 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2401 .features[FEAT_7_0_EDX] =
2402 CPUID_7_0_EDX_SPEC_CTRL,
2403 .features[FEAT_7_0_EBX] =
2404 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2405 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2406 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2407 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2408 CPUID_7_0_EBX_SMAP,
2409 /* Missing: XSAVES (not supported by some Linux versions,
2410 * including v4.1 to v4.12).
2411 * KVM doesn't yet expose any XSAVES state save component,
2412 * and the only one defined in Skylake (processor tracing)
2413 * probably will block migration anyway.
2414 */
2415 .features[FEAT_XSAVE] =
2416 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2417 CPUID_XSAVE_XGETBV1,
2418 .features[FEAT_6_EAX] =
2419 CPUID_6_EAX_ARAT,
2420 .xlevel = 0x80000008,
2421 .model_id = "Intel Core Processor (Skylake, IBRS)",
2422 },
2423 {
2424 .name = "Skylake-Server",
2425 .level = 0xd,
2426 .vendor = CPUID_VENDOR_INTEL,
2427 .family = 6,
2428 .model = 85,
2429 .stepping = 4,
2430 .features[FEAT_1_EDX] =
2431 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2432 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2433 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2434 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2435 CPUID_DE | CPUID_FP87,
2436 .features[FEAT_1_ECX] =
2437 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2438 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2439 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2440 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2441 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2442 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2443 .features[FEAT_8000_0001_EDX] =
2444 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2445 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2446 .features[FEAT_8000_0001_ECX] =
2447 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2448 .features[FEAT_7_0_EBX] =
2449 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2450 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2451 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2452 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2453 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2454 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2455 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2456 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2457 .features[FEAT_7_0_ECX] =
2458 CPUID_7_0_ECX_PKU,
2459 /* Missing: XSAVES (not supported by some Linux versions,
2460 * including v4.1 to v4.12).
2461 * KVM doesn't yet expose any XSAVES state save component,
2462 * and the only one defined in Skylake (processor tracing)
2463 * probably will block migration anyway.
2464 */
2465 .features[FEAT_XSAVE] =
2466 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2467 CPUID_XSAVE_XGETBV1,
2468 .features[FEAT_6_EAX] =
2469 CPUID_6_EAX_ARAT,
2470 .xlevel = 0x80000008,
2471 .model_id = "Intel Xeon Processor (Skylake)",
2472 },
2473 {
2474 .name = "Skylake-Server-IBRS",
2475 .level = 0xd,
2476 .vendor = CPUID_VENDOR_INTEL,
2477 .family = 6,
2478 .model = 85,
2479 .stepping = 4,
2480 .features[FEAT_1_EDX] =
2481 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2482 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2483 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2484 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2485 CPUID_DE | CPUID_FP87,
2486 .features[FEAT_1_ECX] =
2487 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2488 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2489 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2490 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2491 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2492 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2493 .features[FEAT_8000_0001_EDX] =
2494 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2495 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2496 .features[FEAT_8000_0001_ECX] =
2497 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2498 .features[FEAT_7_0_EDX] =
2499 CPUID_7_0_EDX_SPEC_CTRL,
2500 .features[FEAT_7_0_EBX] =
2501 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2502 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2503 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2504 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2505 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2506 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2507 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2508 CPUID_7_0_EBX_AVX512VL,
2509 .features[FEAT_7_0_ECX] =
2510 CPUID_7_0_ECX_PKU,
2511 /* Missing: XSAVES (not supported by some Linux versions,
2512 * including v4.1 to v4.12).
2513 * KVM doesn't yet expose any XSAVES state save component,
2514 * and the only one defined in Skylake (processor tracing)
2515 * probably will block migration anyway.
2516 */
2517 .features[FEAT_XSAVE] =
2518 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2519 CPUID_XSAVE_XGETBV1,
2520 .features[FEAT_6_EAX] =
2521 CPUID_6_EAX_ARAT,
2522 .xlevel = 0x80000008,
2523 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2524 },
2525 {
2526 .name = "Cascadelake-Server",
2527 .level = 0xd,
2528 .vendor = CPUID_VENDOR_INTEL,
2529 .family = 6,
2530 .model = 85,
2531 .stepping = 6,
2532 .features[FEAT_1_EDX] =
2533 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2534 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2535 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2536 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2537 CPUID_DE | CPUID_FP87,
2538 .features[FEAT_1_ECX] =
2539 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2540 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2541 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2542 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2543 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2544 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2545 .features[FEAT_8000_0001_EDX] =
2546 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2547 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2548 .features[FEAT_8000_0001_ECX] =
2549 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2550 .features[FEAT_7_0_EBX] =
2551 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2552 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2553 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2554 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2555 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2556 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2557 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2558 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2559 .features[FEAT_7_0_ECX] =
2560 CPUID_7_0_ECX_PKU |
2561 CPUID_7_0_ECX_AVX512VNNI,
2562 .features[FEAT_7_0_EDX] =
2563 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2564 /* Missing: XSAVES (not supported by some Linux versions,
2565 * including v4.1 to v4.12).
2566 * KVM doesn't yet expose any XSAVES state save component,
2567 * and the only one defined in Skylake (processor tracing)
2568 * probably will block migration anyway.
2569 */
2570 .features[FEAT_XSAVE] =
2571 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2572 CPUID_XSAVE_XGETBV1,
2573 .features[FEAT_6_EAX] =
2574 CPUID_6_EAX_ARAT,
2575 .xlevel = 0x80000008,
2576 .model_id = "Intel Xeon Processor (Cascadelake)",
2577 },
2578 {
2579 .name = "Icelake-Client",
2580 .level = 0xd,
2581 .vendor = CPUID_VENDOR_INTEL,
2582 .family = 6,
2583 .model = 126,
2584 .stepping = 0,
2585 .features[FEAT_1_EDX] =
2586 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2587 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2588 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2589 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2590 CPUID_DE | CPUID_FP87,
2591 .features[FEAT_1_ECX] =
2592 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2593 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2594 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2595 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2596 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2597 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2598 .features[FEAT_8000_0001_EDX] =
2599 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2600 CPUID_EXT2_SYSCALL,
2601 .features[FEAT_8000_0001_ECX] =
2602 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2603 .features[FEAT_8000_0008_EBX] =
2604 CPUID_8000_0008_EBX_WBNOINVD,
2605 .features[FEAT_7_0_EBX] =
2606 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2607 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2608 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2609 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2610 CPUID_7_0_EBX_SMAP,
2611 .features[FEAT_7_0_ECX] =
2612 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2613 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2614 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2615 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2616 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2617 .features[FEAT_7_0_EDX] =
2618 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2619 /* Missing: XSAVES (not supported by some Linux versions,
2620 * including v4.1 to v4.12).
2621 * KVM doesn't yet expose any XSAVES state save component,
2622 * and the only one defined in Skylake (processor tracing)
2623 * probably will block migration anyway.
2624 */
2625 .features[FEAT_XSAVE] =
2626 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2627 CPUID_XSAVE_XGETBV1,
2628 .features[FEAT_6_EAX] =
2629 CPUID_6_EAX_ARAT,
2630 .xlevel = 0x80000008,
2631 .model_id = "Intel Core Processor (Icelake)",
2632 },
2633 {
2634 .name = "Icelake-Server",
2635 .level = 0xd,
2636 .vendor = CPUID_VENDOR_INTEL,
2637 .family = 6,
2638 .model = 134,
2639 .stepping = 0,
2640 .features[FEAT_1_EDX] =
2641 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2642 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2643 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2644 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2645 CPUID_DE | CPUID_FP87,
2646 .features[FEAT_1_ECX] =
2647 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2648 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2649 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2650 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2651 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2652 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2653 .features[FEAT_8000_0001_EDX] =
2654 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2655 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2656 .features[FEAT_8000_0001_ECX] =
2657 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2658 .features[FEAT_8000_0008_EBX] =
2659 CPUID_8000_0008_EBX_WBNOINVD,
2660 .features[FEAT_7_0_EBX] =
2661 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2662 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2663 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2664 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2665 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLWB |
2666 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2667 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2668 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2669 .features[FEAT_7_0_ECX] =
2670 CPUID_7_0_ECX_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU |
2671 CPUID_7_0_ECX_VBMI2 | CPUID_7_0_ECX_GFNI |
2672 CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ |
2673 CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG |
2674 CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57,
2675 .features[FEAT_7_0_EDX] =
2676 CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_SPEC_CTRL_SSBD,
2677 /* Missing: XSAVES (not supported by some Linux versions,
2678 * including v4.1 to v4.12).
2679 * KVM doesn't yet expose any XSAVES state save component,
2680 * and the only one defined in Skylake (processor tracing)
2681 * probably will block migration anyway.
2682 */
2683 .features[FEAT_XSAVE] =
2684 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2685 CPUID_XSAVE_XGETBV1,
2686 .features[FEAT_6_EAX] =
2687 CPUID_6_EAX_ARAT,
2688 .xlevel = 0x80000008,
2689 .model_id = "Intel Xeon Processor (Icelake)",
2690 },
2691 {
2692 .name = "KnightsMill",
2693 .level = 0xd,
2694 .vendor = CPUID_VENDOR_INTEL,
2695 .family = 6,
2696 .model = 133,
2697 .stepping = 0,
2698 .features[FEAT_1_EDX] =
2699 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2700 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2701 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2702 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2703 CPUID_PSE | CPUID_DE | CPUID_FP87,
2704 .features[FEAT_1_ECX] =
2705 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2706 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2707 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2708 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2709 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2710 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2711 .features[FEAT_8000_0001_EDX] =
2712 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2713 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2714 .features[FEAT_8000_0001_ECX] =
2715 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2716 .features[FEAT_7_0_EBX] =
2717 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2718 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2719 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2720 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2721 CPUID_7_0_EBX_AVX512ER,
2722 .features[FEAT_7_0_ECX] =
2723 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2724 .features[FEAT_7_0_EDX] =
2725 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2726 .features[FEAT_XSAVE] =
2727 CPUID_XSAVE_XSAVEOPT,
2728 .features[FEAT_6_EAX] =
2729 CPUID_6_EAX_ARAT,
2730 .xlevel = 0x80000008,
2731 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2732 },
2733 {
2734 .name = "Opteron_G1",
2735 .level = 5,
2736 .vendor = CPUID_VENDOR_AMD,
2737 .family = 15,
2738 .model = 6,
2739 .stepping = 1,
2740 .features[FEAT_1_EDX] =
2741 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2742 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2743 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2744 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2745 CPUID_DE | CPUID_FP87,
2746 .features[FEAT_1_ECX] =
2747 CPUID_EXT_SSE3,
2748 .features[FEAT_8000_0001_EDX] =
2749 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2750 .xlevel = 0x80000008,
2751 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2752 },
2753 {
2754 .name = "Opteron_G2",
2755 .level = 5,
2756 .vendor = CPUID_VENDOR_AMD,
2757 .family = 15,
2758 .model = 6,
2759 .stepping = 1,
2760 .features[FEAT_1_EDX] =
2761 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2762 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2763 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2764 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2765 CPUID_DE | CPUID_FP87,
2766 .features[FEAT_1_ECX] =
2767 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2768 .features[FEAT_8000_0001_EDX] =
2769 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2770 .features[FEAT_8000_0001_ECX] =
2771 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2772 .xlevel = 0x80000008,
2773 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2774 },
2775 {
2776 .name = "Opteron_G3",
2777 .level = 5,
2778 .vendor = CPUID_VENDOR_AMD,
2779 .family = 16,
2780 .model = 2,
2781 .stepping = 3,
2782 .features[FEAT_1_EDX] =
2783 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2784 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2785 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2786 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2787 CPUID_DE | CPUID_FP87,
2788 .features[FEAT_1_ECX] =
2789 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2790 CPUID_EXT_SSE3,
2791 .features[FEAT_8000_0001_EDX] =
2792 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL |
2793 CPUID_EXT2_RDTSCP,
2794 .features[FEAT_8000_0001_ECX] =
2795 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2796 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2797 .xlevel = 0x80000008,
2798 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2799 },
2800 {
2801 .name = "Opteron_G4",
2802 .level = 0xd,
2803 .vendor = CPUID_VENDOR_AMD,
2804 .family = 21,
2805 .model = 1,
2806 .stepping = 2,
2807 .features[FEAT_1_EDX] =
2808 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2809 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2810 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2811 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2812 CPUID_DE | CPUID_FP87,
2813 .features[FEAT_1_ECX] =
2814 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2815 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2816 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2817 CPUID_EXT_SSE3,
2818 .features[FEAT_8000_0001_EDX] =
2819 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2820 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2821 .features[FEAT_8000_0001_ECX] =
2822 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2823 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2824 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2825 CPUID_EXT3_LAHF_LM,
2826 .features[FEAT_SVM] =
2827 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2828 /* no xsaveopt! */
2829 .xlevel = 0x8000001A,
2830 .model_id = "AMD Opteron 62xx class CPU",
2831 },
2832 {
2833 .name = "Opteron_G5",
2834 .level = 0xd,
2835 .vendor = CPUID_VENDOR_AMD,
2836 .family = 21,
2837 .model = 2,
2838 .stepping = 0,
2839 .features[FEAT_1_EDX] =
2840 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2841 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2842 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2843 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2844 CPUID_DE | CPUID_FP87,
2845 .features[FEAT_1_ECX] =
2846 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2847 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2848 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2849 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2850 .features[FEAT_8000_0001_EDX] =
2851 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2852 CPUID_EXT2_SYSCALL | CPUID_EXT2_RDTSCP,
2853 .features[FEAT_8000_0001_ECX] =
2854 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2855 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2856 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2857 CPUID_EXT3_LAHF_LM,
2858 .features[FEAT_SVM] =
2859 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2860 /* no xsaveopt! */
2861 .xlevel = 0x8000001A,
2862 .model_id = "AMD Opteron 63xx class CPU",
2863 },
2864 {
2865 .name = "EPYC",
2866 .level = 0xd,
2867 .vendor = CPUID_VENDOR_AMD,
2868 .family = 23,
2869 .model = 1,
2870 .stepping = 2,
2871 .features[FEAT_1_EDX] =
2872 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2873 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2874 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2875 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2876 CPUID_VME | CPUID_FP87,
2877 .features[FEAT_1_ECX] =
2878 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2879 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2880 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2881 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2882 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2883 .features[FEAT_8000_0001_EDX] =
2884 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2885 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2886 CPUID_EXT2_SYSCALL,
2887 .features[FEAT_8000_0001_ECX] =
2888 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2889 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2890 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2891 CPUID_EXT3_TOPOEXT,
2892 .features[FEAT_7_0_EBX] =
2893 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2894 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2895 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2896 CPUID_7_0_EBX_SHA_NI,
2897 /* Missing: XSAVES (not supported by some Linux versions,
2898 * including v4.1 to v4.12).
2899 * KVM doesn't yet expose any XSAVES state save component.
2900 */
2901 .features[FEAT_XSAVE] =
2902 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2903 CPUID_XSAVE_XGETBV1,
2904 .features[FEAT_6_EAX] =
2905 CPUID_6_EAX_ARAT,
2906 .features[FEAT_SVM] =
2907 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2908 .xlevel = 0x8000001E,
2909 .model_id = "AMD EPYC Processor",
2910 .cache_info = &epyc_cache_info,
2911 },
2912 {
2913 .name = "EPYC-IBPB",
2914 .level = 0xd,
2915 .vendor = CPUID_VENDOR_AMD,
2916 .family = 23,
2917 .model = 1,
2918 .stepping = 2,
2919 .features[FEAT_1_EDX] =
2920 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2921 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2922 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2923 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2924 CPUID_VME | CPUID_FP87,
2925 .features[FEAT_1_ECX] =
2926 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2927 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2928 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2929 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2930 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2931 .features[FEAT_8000_0001_EDX] =
2932 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2933 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2934 CPUID_EXT2_SYSCALL,
2935 .features[FEAT_8000_0001_ECX] =
2936 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2937 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2938 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2939 CPUID_EXT3_TOPOEXT,
2940 .features[FEAT_8000_0008_EBX] =
2941 CPUID_8000_0008_EBX_IBPB,
2942 .features[FEAT_7_0_EBX] =
2943 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2944 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2945 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2946 CPUID_7_0_EBX_SHA_NI,
2947 /* Missing: XSAVES (not supported by some Linux versions,
2948 * including v4.1 to v4.12).
2949 * KVM doesn't yet expose any XSAVES state save component.
2950 */
2951 .features[FEAT_XSAVE] =
2952 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2953 CPUID_XSAVE_XGETBV1,
2954 .features[FEAT_6_EAX] =
2955 CPUID_6_EAX_ARAT,
2956 .features[FEAT_SVM] =
2957 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
2958 .xlevel = 0x8000001E,
2959 .model_id = "AMD EPYC Processor (with IBPB)",
2960 .cache_info = &epyc_cache_info,
2961 },
2962 {
2963 .name = "Dhyana",
2964 .level = 0xd,
2965 .vendor = CPUID_VENDOR_HYGON,
2966 .family = 24,
2967 .model = 0,
2968 .stepping = 1,
2969 .features[FEAT_1_EDX] =
2970 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2971 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2972 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2973 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2974 CPUID_VME | CPUID_FP87,
2975 .features[FEAT_1_ECX] =
2976 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2977 CPUID_EXT_XSAVE | CPUID_EXT_POPCNT |
2978 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2979 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2980 CPUID_EXT_MONITOR | CPUID_EXT_SSE3,
2981 .features[FEAT_8000_0001_EDX] =
2982 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2983 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2984 CPUID_EXT2_SYSCALL,
2985 .features[FEAT_8000_0001_ECX] =
2986 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2987 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2988 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM |
2989 CPUID_EXT3_TOPOEXT,
2990 .features[FEAT_8000_0008_EBX] =
2991 CPUID_8000_0008_EBX_IBPB,
2992 .features[FEAT_7_0_EBX] =
2993 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2994 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2995 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT,
2996 /*
2997 * Missing: XSAVES (not supported by some Linux versions,
2998 * including v4.1 to v4.12).
2999 * KVM doesn't yet expose any XSAVES state save component.
3000 */
3001 .features[FEAT_XSAVE] =
3002 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
3003 CPUID_XSAVE_XGETBV1,
3004 .features[FEAT_6_EAX] =
3005 CPUID_6_EAX_ARAT,
3006 .features[FEAT_SVM] =
3007 CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE,
3008 .xlevel = 0x8000001E,
3009 .model_id = "Hygon Dhyana Processor",
3010 .cache_info = &epyc_cache_info,
3011 },
3012 };
3013
3014 typedef struct PropValue {
3015 const char *prop, *value;
3016 } PropValue;
3017
3018 /* KVM-specific features that are automatically added/removed
3019 * from all CPU models when KVM is enabled.
3020 */
3021 static PropValue kvm_default_props[] = {
3022 { "kvmclock", "on" },
3023 { "kvm-nopiodelay", "on" },
3024 { "kvm-asyncpf", "on" },
3025 { "kvm-steal-time", "on" },
3026 { "kvm-pv-eoi", "on" },
3027 { "kvmclock-stable-bit", "on" },
3028 { "x2apic", "on" },
3029 { "acpi", "off" },
3030 { "monitor", "off" },
3031 { "svm", "off" },
3032 { NULL, NULL },
3033 };
3034
3035 /* TCG-specific defaults that override all CPU models when using TCG
3036 */
3037 static PropValue tcg_default_props[] = {
3038 { "vme", "off" },
3039 { NULL, NULL },
3040 };
3041
3042
3043 void x86_cpu_change_kvm_default(const char *prop, const char *value)
3044 {
3045 PropValue *pv;
3046 for (pv = kvm_default_props; pv->prop; pv++) {
3047 if (!strcmp(pv->prop, prop)) {
3048 pv->value = value;
3049 break;
3050 }
3051 }
3052
3053 /* It is valid to call this function only for properties that
3054 * are already present in the kvm_default_props table.
3055 */
3056 assert(pv->prop);
3057 }
3058
3059 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3060 bool migratable_only);
3061
3062 static bool lmce_supported(void)
3063 {
3064 uint64_t mce_cap = 0;
3065
3066 #ifdef CONFIG_KVM
3067 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
3068 return false;
3069 }
3070 #endif
3071
3072 return !!(mce_cap & MCG_LMCE_P);
3073 }
3074
3075 #define CPUID_MODEL_ID_SZ 48
3076
3077 /**
3078 * cpu_x86_fill_model_id:
3079 * Get CPUID model ID string from host CPU.
3080 *
3081 * @str should have at least CPUID_MODEL_ID_SZ bytes
3082 *
3083 * The function does NOT add a null terminator to the string
3084 * automatically.
3085 */
3086 static int cpu_x86_fill_model_id(char *str)
3087 {
3088 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
3089 int i;
3090
3091 for (i = 0; i < 3; i++) {
3092 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
3093 memcpy(str + i * 16 + 0, &eax, 4);
3094 memcpy(str + i * 16 + 4, &ebx, 4);
3095 memcpy(str + i * 16 + 8, &ecx, 4);
3096 memcpy(str + i * 16 + 12, &edx, 4);
3097 }
3098 return 0;
3099 }
3100
3101 static Property max_x86_cpu_properties[] = {
3102 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
3103 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
3104 DEFINE_PROP_END_OF_LIST()
3105 };
3106
3107 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
3108 {
3109 DeviceClass *dc = DEVICE_CLASS(oc);
3110 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3111
3112 xcc->ordering = 9;
3113
3114 xcc->model_description =
3115 "Enables all features supported by the accelerator in the current host";
3116
3117 dc->props = max_x86_cpu_properties;
3118 }
3119
3120 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
3121
3122 static void max_x86_cpu_initfn(Object *obj)
3123 {
3124 X86CPU *cpu = X86_CPU(obj);
3125 CPUX86State *env = &cpu->env;
3126 KVMState *s = kvm_state;
3127
3128 /* We can't fill the features array here because we don't know yet if
3129 * "migratable" is true or false.
3130 */
3131 cpu->max_features = true;
3132
3133 if (accel_uses_host_cpuid()) {
3134 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
3135 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
3136 int family, model, stepping;
3137
3138 host_vendor_fms(vendor, &family, &model, &stepping);
3139 cpu_x86_fill_model_id(model_id);
3140
3141 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
3142 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
3143 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
3144 object_property_set_int(OBJECT(cpu), stepping, "stepping",
3145 &error_abort);
3146 object_property_set_str(OBJECT(cpu), model_id, "model-id",
3147 &error_abort);
3148
3149 if (kvm_enabled()) {
3150 env->cpuid_min_level =
3151 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
3152 env->cpuid_min_xlevel =
3153 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
3154 env->cpuid_min_xlevel2 =
3155 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
3156 } else {
3157 env->cpuid_min_level =
3158 hvf_get_supported_cpuid(0x0, 0, R_EAX);
3159 env->cpuid_min_xlevel =
3160 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
3161 env->cpuid_min_xlevel2 =
3162 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
3163 }
3164
3165 if (lmce_supported()) {
3166 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
3167 }
3168 } else {
3169 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
3170 "vendor", &error_abort);
3171 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
3172 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
3173 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
3174 object_property_set_str(OBJECT(cpu),
3175 "QEMU TCG CPU version " QEMU_HW_VERSION,
3176 "model-id", &error_abort);
3177 }
3178
3179 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
3180 }
3181
3182 static const TypeInfo max_x86_cpu_type_info = {
3183 .name = X86_CPU_TYPE_NAME("max"),
3184 .parent = TYPE_X86_CPU,
3185 .instance_init = max_x86_cpu_initfn,
3186 .class_init = max_x86_cpu_class_init,
3187 };
3188
3189 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
3190 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
3191 {
3192 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3193
3194 xcc->host_cpuid_required = true;
3195 xcc->ordering = 8;
3196
3197 #if defined(CONFIG_KVM)
3198 xcc->model_description =
3199 "KVM processor with all supported host features ";
3200 #elif defined(CONFIG_HVF)
3201 xcc->model_description =
3202 "HVF processor with all supported host features ";
3203 #endif
3204 }
3205
3206 static const TypeInfo host_x86_cpu_type_info = {
3207 .name = X86_CPU_TYPE_NAME("host"),
3208 .parent = X86_CPU_TYPE_NAME("max"),
3209 .class_init = host_x86_cpu_class_init,
3210 };
3211
3212 #endif
3213
3214 static char *feature_word_description(FeatureWordInfo *f, uint32_t bit)
3215 {
3216 assert(f->type == CPUID_FEATURE_WORD || f->type == MSR_FEATURE_WORD);
3217
3218 switch (f->type) {
3219 case CPUID_FEATURE_WORD:
3220 {
3221 const char *reg = get_register_name_32(f->cpuid.reg);
3222 assert(reg);
3223 return g_strdup_printf("CPUID.%02XH:%s",
3224 f->cpuid.eax, reg);
3225 }
3226 case MSR_FEATURE_WORD:
3227 return g_strdup_printf("MSR(%02XH)",
3228 f->msr.index);
3229 }
3230
3231 return NULL;
3232 }
3233
3234 static void report_unavailable_features(FeatureWord w, uint32_t mask)
3235 {
3236 FeatureWordInfo *f = &feature_word_info[w];
3237 int i;
3238 char *feat_word_str;
3239
3240 for (i = 0; i < 32; ++i) {
3241 if ((1UL << i) & mask) {
3242 feat_word_str = feature_word_description(f, i);
3243 warn_report("%s doesn't support requested feature: %s%s%s [bit %d]",
3244 accel_uses_host_cpuid() ? "host" : "TCG",
3245 feat_word_str,
3246 f->feat_names[i] ? "." : "",
3247 f->feat_names[i] ? f->feat_names[i] : "", i);
3248 g_free(feat_word_str);
3249 }
3250 }
3251 }
3252
3253 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
3254 const char *name, void *opaque,
3255 Error **errp)
3256 {
3257 X86CPU *cpu = X86_CPU(obj);
3258 CPUX86State *env = &cpu->env;
3259 int64_t value;
3260
3261 value = (env->cpuid_version >> 8) & 0xf;
3262 if (value == 0xf) {
3263 value += (env->cpuid_version >> 20) & 0xff;
3264 }
3265 visit_type_int(v, name, &value, errp);
3266 }
3267
3268 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
3269 const char *name, void *opaque,
3270 Error **errp)
3271 {
3272 X86CPU *cpu = X86_CPU(obj);
3273 CPUX86State *env = &cpu->env;
3274 const int64_t min = 0;
3275 const int64_t max = 0xff + 0xf;
3276 Error *local_err = NULL;
3277 int64_t value;
3278
3279 visit_type_int(v, name, &value, &local_err);
3280 if (local_err) {
3281 error_propagate(errp, local_err);
3282 return;
3283 }
3284 if (value < min || value > max) {
3285 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3286 name ? name : "null", value, min, max);
3287 return;
3288 }
3289
3290 env->cpuid_version &= ~0xff00f00;
3291 if (value > 0x0f) {
3292 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
3293 } else {
3294 env->cpuid_version |= value << 8;
3295 }
3296 }
3297
3298 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
3299 const char *name, void *opaque,
3300 Error **errp)
3301 {
3302 X86CPU *cpu = X86_CPU(obj);
3303 CPUX86State *env = &cpu->env;
3304 int64_t value;
3305
3306 value = (env->cpuid_version >> 4) & 0xf;
3307 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
3308 visit_type_int(v, name, &value, errp);
3309 }
3310
3311 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
3312 const char *name, void *opaque,
3313 Error **errp)
3314 {
3315 X86CPU *cpu = X86_CPU(obj);
3316 CPUX86State *env = &cpu->env;
3317 const int64_t min = 0;
3318 const int64_t max = 0xff;
3319 Error *local_err = NULL;
3320 int64_t value;
3321
3322 visit_type_int(v, name, &value, &local_err);
3323 if (local_err) {
3324 error_propagate(errp, local_err);
3325 return;
3326 }
3327 if (value < min || value > max) {
3328 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3329 name ? name : "null", value, min, max);
3330 return;
3331 }
3332
3333 env->cpuid_version &= ~0xf00f0;
3334 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
3335 }
3336
3337 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
3338 const char *name, void *opaque,
3339 Error **errp)
3340 {
3341 X86CPU *cpu = X86_CPU(obj);
3342 CPUX86State *env = &cpu->env;
3343 int64_t value;
3344
3345 value = env->cpuid_version & 0xf;
3346 visit_type_int(v, name, &value, errp);
3347 }
3348
3349 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
3350 const char *name, void *opaque,
3351 Error **errp)
3352 {
3353 X86CPU *cpu = X86_CPU(obj);
3354 CPUX86State *env = &cpu->env;
3355 const int64_t min = 0;
3356 const int64_t max = 0xf;
3357 Error *local_err = NULL;
3358 int64_t value;
3359
3360 visit_type_int(v, name, &value, &local_err);
3361 if (local_err) {
3362 error_propagate(errp, local_err);
3363 return;
3364 }
3365 if (value < min || value > max) {
3366 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3367 name ? name : "null", value, min, max);
3368 return;
3369 }
3370
3371 env->cpuid_version &= ~0xf;
3372 env->cpuid_version |= value & 0xf;
3373 }
3374
3375 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
3376 {
3377 X86CPU *cpu = X86_CPU(obj);
3378 CPUX86State *env = &cpu->env;
3379 char *value;
3380
3381 value = g_malloc(CPUID_VENDOR_SZ + 1);
3382 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
3383 env->cpuid_vendor3);
3384 return value;
3385 }
3386
3387 static void x86_cpuid_set_vendor(Object *obj, const char *value,
3388 Error **errp)
3389 {
3390 X86CPU *cpu = X86_CPU(obj);
3391 CPUX86State *env = &cpu->env;
3392 int i;
3393
3394 if (strlen(value) != CPUID_VENDOR_SZ) {
3395 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
3396 return;
3397 }
3398
3399 env->cpuid_vendor1 = 0;
3400 env->cpuid_vendor2 = 0;
3401 env->cpuid_vendor3 = 0;
3402 for (i = 0; i < 4; i++) {
3403 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
3404 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
3405 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
3406 }
3407 }
3408
3409 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
3410 {
3411 X86CPU *cpu = X86_CPU(obj);
3412 CPUX86State *env = &cpu->env;
3413 char *value;
3414 int i;
3415
3416 value = g_malloc(48 + 1);
3417 for (i = 0; i < 48; i++) {
3418 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
3419 }
3420 value[48] = '\0';
3421 return value;
3422 }
3423
3424 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
3425 Error **errp)
3426 {
3427 X86CPU *cpu = X86_CPU(obj);
3428 CPUX86State *env = &cpu->env;
3429 int c, len, i;
3430
3431 if (model_id == NULL) {
3432 model_id = "";
3433 }
3434 len = strlen(model_id);
3435 memset(env->cpuid_model, 0, 48);
3436 for (i = 0; i < 48; i++) {
3437 if (i >= len) {
3438 c = '\0';
3439 } else {
3440 c = (uint8_t)model_id[i];
3441 }
3442 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
3443 }
3444 }
3445
3446 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
3447 void *opaque, Error **errp)
3448 {
3449 X86CPU *cpu = X86_CPU(obj);
3450 int64_t value;
3451
3452 value = cpu->env.tsc_khz * 1000;
3453 visit_type_int(v, name, &value, errp);
3454 }
3455
3456 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
3457 void *opaque, Error **errp)
3458 {
3459 X86CPU *cpu = X86_CPU(obj);
3460 const int64_t min = 0;
3461 const int64_t max = INT64_MAX;
3462 Error *local_err = NULL;
3463 int64_t value;
3464
3465 visit_type_int(v, name, &value, &local_err);
3466 if (local_err) {
3467 error_propagate(errp, local_err);
3468 return;
3469 }
3470 if (value < min || value > max) {
3471 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
3472 name ? name : "null", value, min, max);
3473 return;
3474 }
3475
3476 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
3477 }
3478
3479 /* Generic getter for "feature-words" and "filtered-features" properties */
3480 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
3481 const char *name, void *opaque,
3482 Error **errp)
3483 {
3484 uint32_t *array = (uint32_t *)opaque;
3485 FeatureWord w;
3486 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
3487 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
3488 X86CPUFeatureWordInfoList *list = NULL;
3489
3490 for (w = 0; w < FEATURE_WORDS; w++) {
3491 FeatureWordInfo *wi = &feature_word_info[w];
3492 /*
3493 * We didn't have MSR features when "feature-words" was
3494 * introduced. Therefore skipped other type entries.
3495 */
3496 if (wi->type != CPUID_FEATURE_WORD) {
3497 continue;
3498 }
3499 X86CPUFeatureWordInfo *qwi = &word_infos[w];
3500 qwi->cpuid_input_eax = wi->cpuid.eax;
3501 qwi->has_cpuid_input_ecx = wi->cpuid.needs_ecx;
3502 qwi->cpuid_input_ecx = wi->cpuid.ecx;
3503 qwi->cpuid_register = x86_reg_info_32[wi->cpuid.reg].qapi_enum;
3504 qwi->features = array[w];
3505
3506 /* List will be in reverse order, but order shouldn't matter */
3507 list_entries[w].next = list;
3508 list_entries[w].value = &word_infos[w];
3509 list = &list_entries[w];
3510 }
3511
3512 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
3513 }
3514
3515 /* Convert all '_' in a feature string option name to '-', to make feature
3516 * name conform to QOM property naming rule, which uses '-' instead of '_'.
3517 */
3518 static inline void feat2prop(char *s)
3519 {
3520 while ((s = strchr(s, '_'))) {
3521 *s = '-';
3522 }
3523 }
3524
3525 /* Return the feature property name for a feature flag bit */
3526 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
3527 {
3528 /* XSAVE components are automatically enabled by other features,
3529 * so return the original feature name instead
3530 */
3531 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
3532 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
3533
3534 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
3535 x86_ext_save_areas[comp].bits) {
3536 w = x86_ext_save_areas[comp].feature;
3537 bitnr = ctz32(x86_ext_save_areas[comp].bits);
3538 }
3539 }
3540
3541 assert(bitnr < 32);
3542 assert(w < FEATURE_WORDS);
3543 return feature_word_info[w].feat_names[bitnr];
3544 }
3545
3546 /* Compatibily hack to maintain legacy +-feat semantic,
3547 * where +-feat overwrites any feature set by
3548 * feat=on|feat even if the later is parsed after +-feat
3549 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3550 */
3551 static GList *plus_features, *minus_features;
3552
3553 static gint compare_string(gconstpointer a, gconstpointer b)
3554 {
3555 return g_strcmp0(a, b);
3556 }
3557
3558 /* Parse "+feature,-feature,feature=foo" CPU feature string
3559 */
3560 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3561 Error **errp)
3562 {
3563 char *featurestr; /* Single 'key=value" string being parsed */
3564 static bool cpu_globals_initialized;
3565 bool ambiguous = false;
3566
3567 if (cpu_globals_initialized) {
3568 return;
3569 }
3570 cpu_globals_initialized = true;
3571
3572 if (!features) {
3573 return;
3574 }
3575
3576 for (featurestr = strtok(features, ",");
3577 featurestr;
3578 featurestr = strtok(NULL, ",")) {
3579 const char *name;
3580 const char *val = NULL;
3581 char *eq = NULL;
3582 char num[32];
3583 GlobalProperty *prop;
3584
3585 /* Compatibility syntax: */
3586 if (featurestr[0] == '+') {
3587 plus_features = g_list_append(plus_features,
3588 g_strdup(featurestr + 1));
3589 continue;
3590 } else if (featurestr[0] == '-') {
3591 minus_features = g_list_append(minus_features,
3592 g_strdup(featurestr + 1));
3593 continue;
3594 }
3595
3596 eq = strchr(featurestr, '=');
3597 if (eq) {
3598 *eq++ = 0;
3599 val = eq;
3600 } else {
3601 val = "on";
3602 }
3603
3604 feat2prop(featurestr);
3605 name = featurestr;
3606
3607 if (g_list_find_custom(plus_features, name, compare_string)) {
3608 warn_report("Ambiguous CPU model string. "
3609 "Don't mix both \"+%s\" and \"%s=%s\"",
3610 name, name, val);
3611 ambiguous = true;
3612 }
3613 if (g_list_find_custom(minus_features, name, compare_string)) {
3614 warn_report("Ambiguous CPU model string. "
3615 "Don't mix both \"-%s\" and \"%s=%s\"",
3616 name, name, val);
3617 ambiguous = true;
3618 }
3619
3620 /* Special case: */
3621 if (!strcmp(name, "tsc-freq")) {
3622 int ret;
3623 uint64_t tsc_freq;
3624
3625 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3626 if (ret < 0 || tsc_freq > INT64_MAX) {
3627 error_setg(errp, "bad numerical value %s", val);
3628 return;
3629 }
3630 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3631 val = num;
3632 name = "tsc-frequency";
3633 }
3634
3635 prop = g_new0(typeof(*prop), 1);
3636 prop->driver = typename;
3637 prop->property = g_strdup(name);
3638 prop->value = g_strdup(val);
3639 qdev_prop_register_global(prop);
3640 }
3641
3642 if (ambiguous) {
3643 warn_report("Compatibility of ambiguous CPU model "
3644 "strings won't be kept on future QEMU versions");
3645 }
3646 }
3647
3648 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3649 static int x86_cpu_filter_features(X86CPU *cpu);
3650
3651 /* Build a list with the name of all features on a feature word array */
3652 static void x86_cpu_list_feature_names(FeatureWordArray features,
3653 strList **feat_names)
3654 {
3655 FeatureWord w;
3656 strList **next = feat_names;
3657
3658 for (w = 0; w < FEATURE_WORDS; w++) {
3659 uint32_t filtered = features[w];
3660 int i;
3661 for (i = 0; i < 32; i++) {
3662 if (filtered & (1UL << i)) {
3663 strList *new = g_new0(strList, 1);
3664 new->value = g_strdup(x86_cpu_feature_name(w, i));
3665 *next = new;
3666 next = &new->next;
3667 }
3668 }
3669 }
3670 }
3671
3672 static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v,
3673 const char *name, void *opaque,
3674 Error **errp)
3675 {
3676 X86CPU *xc = X86_CPU(obj);
3677 strList *result = NULL;
3678
3679 x86_cpu_list_feature_names(xc->filtered_features, &result);
3680 visit_type_strList(v, "unavailable-features", &result, errp);
3681 }
3682
3683 /* Check for missing features that may prevent the CPU class from
3684 * running using the current machine and accelerator.
3685 */
3686 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3687 strList **missing_feats)
3688 {
3689 X86CPU *xc;
3690 Error *err = NULL;
3691 strList **next = missing_feats;
3692
3693 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3694 strList *new = g_new0(strList, 1);
3695 new->value = g_strdup("kvm");
3696 *missing_feats = new;
3697 return;
3698 }
3699
3700 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3701
3702 x86_cpu_expand_features(xc, &err);
3703 if (err) {
3704 /* Errors at x86_cpu_expand_features should never happen,
3705 * but in case it does, just report the model as not
3706 * runnable at all using the "type" property.
3707 */
3708 strList *new = g_new0(strList, 1);
3709 new->value = g_strdup("type");
3710 *next = new;
3711 next = &new->next;
3712 }
3713
3714 x86_cpu_filter_features(xc);
3715
3716 x86_cpu_list_feature_names(xc->filtered_features, next);
3717
3718 object_unref(OBJECT(xc));
3719 }
3720
3721 /* Print all cpuid feature names in featureset
3722 */
3723 static void listflags(GList *features)
3724 {
3725 size_t len = 0;
3726 GList *tmp;
3727
3728 for (tmp = features; tmp; tmp = tmp->next) {
3729 const char *name = tmp->data;
3730 if ((len + strlen(name) + 1) >= 75) {
3731 qemu_printf("\n");
3732 len = 0;
3733 }
3734 qemu_printf("%s%s", len == 0 ? " " : " ", name);
3735 len += strlen(name) + 1;
3736 }
3737 qemu_printf("\n");
3738 }
3739
3740 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3741 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3742 {
3743 ObjectClass *class_a = (ObjectClass *)a;
3744 ObjectClass *class_b = (ObjectClass *)b;
3745 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3746 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3747 char *name_a, *name_b;
3748 int ret;
3749
3750 if (cc_a->ordering != cc_b->ordering) {
3751 ret = cc_a->ordering - cc_b->ordering;
3752 } else {
3753 name_a = x86_cpu_class_get_model_name(cc_a);
3754 name_b = x86_cpu_class_get_model_name(cc_b);
3755 ret = strcmp(name_a, name_b);
3756 g_free(name_a);
3757 g_free(name_b);
3758 }
3759 return ret;
3760 }
3761
3762 static GSList *get_sorted_cpu_model_list(void)
3763 {
3764 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3765 list = g_slist_sort(list, x86_cpu_list_compare);
3766 return list;
3767 }
3768
3769 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3770 {
3771 ObjectClass *oc = data;
3772 X86CPUClass *cc = X86_CPU_CLASS(oc);
3773 char *name = x86_cpu_class_get_model_name(cc);
3774 const char *desc = cc->model_description;
3775 if (!desc && cc->cpu_def) {
3776 desc = cc->cpu_def->model_id;
3777 }
3778
3779 qemu_printf("x86 %-20s %-48s\n", name, desc);
3780 g_free(name);
3781 }
3782
3783 /* list available CPU models and flags */
3784 void x86_cpu_list(void)
3785 {
3786 int i, j;
3787 GSList *list;
3788 GList *names = NULL;
3789
3790 qemu_printf("Available CPUs:\n");
3791 list = get_sorted_cpu_model_list();
3792 g_slist_foreach(list, x86_cpu_list_entry, NULL);
3793 g_slist_free(list);
3794
3795 names = NULL;
3796 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3797 FeatureWordInfo *fw = &feature_word_info[i];
3798 for (j = 0; j < 32; j++) {
3799 if (fw->feat_names[j]) {
3800 names = g_list_append(names, (gpointer)fw->feat_names[j]);
3801 }
3802 }
3803 }
3804
3805 names = g_list_sort(names, (GCompareFunc)strcmp);
3806
3807 qemu_printf("\nRecognized CPUID flags:\n");
3808 listflags(names);
3809 qemu_printf("\n");
3810 g_list_free(names);
3811 }
3812
3813 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3814 {
3815 ObjectClass *oc = data;
3816 X86CPUClass *cc = X86_CPU_CLASS(oc);
3817 CpuDefinitionInfoList **cpu_list = user_data;
3818 CpuDefinitionInfoList *entry;
3819 CpuDefinitionInfo *info;
3820
3821 info = g_malloc0(sizeof(*info));
3822 info->name = x86_cpu_class_get_model_name(cc);
3823 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3824 info->has_unavailable_features = true;
3825 info->q_typename = g_strdup(object_class_get_name(oc));
3826 info->migration_safe = cc->migration_safe;
3827 info->has_migration_safe = true;
3828 info->q_static = cc->static_model;
3829
3830 entry = g_malloc0(sizeof(*entry));
3831 entry->value = info;
3832 entry->next = *cpu_list;
3833 *cpu_list = entry;
3834 }
3835
3836 CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
3837 {
3838 CpuDefinitionInfoList *cpu_list = NULL;
3839 GSList *list = get_sorted_cpu_model_list();
3840 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3841 g_slist_free(list);
3842 return cpu_list;
3843 }
3844
3845 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3846 bool migratable_only)
3847 {
3848 FeatureWordInfo *wi = &feature_word_info[w];
3849 uint32_t r = 0;
3850
3851 if (kvm_enabled()) {
3852 switch (wi->type) {
3853 case CPUID_FEATURE_WORD:
3854 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid.eax,
3855 wi->cpuid.ecx,
3856 wi->cpuid.reg);
3857 break;
3858 case MSR_FEATURE_WORD:
3859 r = kvm_arch_get_supported_msr_feature(kvm_state,
3860 wi->msr.index);
3861 break;
3862 }
3863 } else if (hvf_enabled()) {
3864 if (wi->type != CPUID_FEATURE_WORD) {
3865 return 0;
3866 }
3867 r = hvf_get_supported_cpuid(wi->cpuid.eax,
3868 wi->cpuid.ecx,
3869 wi->cpuid.reg);
3870 } else if (tcg_enabled()) {
3871 r = wi->tcg_features;
3872 } else {
3873 return ~0;
3874 }
3875 if (migratable_only) {
3876 r &= x86_cpu_get_migratable_flags(w);
3877 }
3878 return r;
3879 }
3880
3881 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3882 {
3883 FeatureWord w;
3884
3885 for (w = 0; w < FEATURE_WORDS; w++) {
3886 report_unavailable_features(w, cpu->filtered_features[w]);
3887 }
3888 }
3889
3890 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3891 {
3892 PropValue *pv;
3893 for (pv = props; pv->prop; pv++) {
3894 if (!pv->value) {
3895 continue;
3896 }
3897 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3898 &error_abort);
3899 }
3900 }
3901
3902 /* Load data from X86CPUDefinition into a X86CPU object
3903 */
3904 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3905 {
3906 CPUX86State *env = &cpu->env;
3907 const char *vendor;
3908 char host_vendor[CPUID_VENDOR_SZ + 1];
3909 FeatureWord w;
3910
3911 /*NOTE: any property set by this function should be returned by
3912 * x86_cpu_static_props(), so static expansion of
3913 * query-cpu-model-expansion is always complete.
3914 */
3915
3916 /* CPU models only set _minimum_ values for level/xlevel: */
3917 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3918 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3919
3920 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3921 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3922 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3923 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3924 for (w = 0; w < FEATURE_WORDS; w++) {
3925 env->features[w] = def->features[w];
3926 }
3927
3928 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3929 cpu->legacy_cache = !def->cache_info;
3930
3931 /* Special cases not set in the X86CPUDefinition structs: */
3932 /* TODO: in-kernel irqchip for hvf */
3933 if (kvm_enabled()) {
3934 if (!kvm_irqchip_in_kernel()) {
3935 x86_cpu_change_kvm_default("x2apic", "off");
3936 }
3937
3938 x86_cpu_apply_props(cpu, kvm_default_props);
3939 } else if (tcg_enabled()) {
3940 x86_cpu_apply_props(cpu, tcg_default_props);
3941 }
3942
3943 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3944
3945 /* sysenter isn't supported in compatibility mode on AMD,
3946 * syscall isn't supported in compatibility mode on Intel.
3947 * Normally we advertise the actual CPU vendor, but you can
3948 * override this using the 'vendor' property if you want to use
3949 * KVM's sysenter/syscall emulation in compatibility mode and
3950 * when doing cross vendor migration
3951 */
3952 vendor = def->vendor;
3953 if (accel_uses_host_cpuid()) {
3954 uint32_t ebx = 0, ecx = 0, edx = 0;
3955 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3956 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3957 vendor = host_vendor;
3958 }
3959
3960 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3961
3962 }
3963
3964 #ifndef CONFIG_USER_ONLY
3965 /* Return a QDict containing keys for all properties that can be included
3966 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3967 * must be included in the dictionary.
3968 */
3969 static QDict *x86_cpu_static_props(void)
3970 {
3971 FeatureWord w;
3972 int i;
3973 static const char *props[] = {
3974 "min-level",
3975 "min-xlevel",
3976 "family",
3977 "model",
3978 "stepping",
3979 "model-id",
3980 "vendor",
3981 "lmce",
3982 NULL,
3983 };
3984 static QDict *d;
3985
3986 if (d) {
3987 return d;
3988 }
3989
3990 d = qdict_new();
3991 for (i = 0; props[i]; i++) {
3992 qdict_put_null(d, props[i]);
3993 }
3994
3995 for (w = 0; w < FEATURE_WORDS; w++) {
3996 FeatureWordInfo *fi = &feature_word_info[w];
3997 int bit;
3998 for (bit = 0; bit < 32; bit++) {
3999 if (!fi->feat_names[bit]) {
4000 continue;
4001 }
4002 qdict_put_null(d, fi->feat_names[bit]);
4003 }
4004 }
4005
4006 return d;
4007 }
4008
4009 /* Add an entry to @props dict, with the value for property. */
4010 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
4011 {
4012 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
4013 &error_abort);
4014
4015 qdict_put_obj(props, prop, value);
4016 }
4017
4018 /* Convert CPU model data from X86CPU object to a property dictionary
4019 * that can recreate exactly the same CPU model.
4020 */
4021 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
4022 {
4023 QDict *sprops = x86_cpu_static_props();
4024 const QDictEntry *e;
4025
4026 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
4027 const char *prop = qdict_entry_key(e);
4028 x86_cpu_expand_prop(cpu, props, prop);
4029 }
4030 }
4031
4032 /* Convert CPU model data from X86CPU object to a property dictionary
4033 * that can recreate exactly the same CPU model, including every
4034 * writeable QOM property.
4035 */
4036 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
4037 {
4038 ObjectPropertyIterator iter;
4039 ObjectProperty *prop;
4040
4041 object_property_iter_init(&iter, OBJECT(cpu));
4042 while ((prop = object_property_iter_next(&iter))) {
4043 /* skip read-only or write-only properties */
4044 if (!prop->get || !prop->set) {
4045 continue;
4046 }
4047
4048 /* "hotplugged" is the only property that is configurable
4049 * on the command-line but will be set differently on CPUs
4050 * created using "-cpu ... -smp ..." and by CPUs created
4051 * on the fly by x86_cpu_from_model() for querying. Skip it.
4052 */
4053 if (!strcmp(prop->name, "hotplugged")) {
4054 continue;
4055 }
4056 x86_cpu_expand_prop(cpu, props, prop->name);
4057 }
4058 }
4059
4060 static void object_apply_props(Object *obj, QDict *props, Error **errp)
4061 {
4062 const QDictEntry *prop;
4063 Error *err = NULL;
4064
4065 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
4066 object_property_set_qobject(obj, qdict_entry_value(prop),
4067 qdict_entry_key(prop), &err);
4068 if (err) {
4069 break;
4070 }
4071 }
4072
4073 error_propagate(errp, err);
4074 }
4075
4076 /* Create X86CPU object according to model+props specification */
4077 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
4078 {
4079 X86CPU *xc = NULL;
4080 X86CPUClass *xcc;
4081 Error *err = NULL;
4082
4083 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
4084 if (xcc == NULL) {
4085 error_setg(&err, "CPU model '%s' not found", model);
4086 goto out;
4087 }
4088
4089 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
4090 if (props) {
4091 object_apply_props(OBJECT(xc), props, &err);
4092 if (err) {
4093 goto out;
4094 }
4095 }
4096
4097 x86_cpu_expand_features(xc, &err);
4098 if (err) {
4099 goto out;
4100 }
4101
4102 out:
4103 if (err) {
4104 error_propagate(errp, err);
4105 object_unref(OBJECT(xc));
4106 xc = NULL;
4107 }
4108 return xc;
4109 }
4110
4111 CpuModelExpansionInfo *
4112 qmp_query_cpu_model_expansion(CpuModelExpansionType type,
4113 CpuModelInfo *model,
4114 Error **errp)
4115 {
4116 X86CPU *xc = NULL;
4117 Error *err = NULL;
4118 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
4119 QDict *props = NULL;
4120 const char *base_name;
4121
4122 xc = x86_cpu_from_model(model->name,
4123 model->has_props ?
4124 qobject_to(QDict, model->props) :
4125 NULL, &err);
4126 if (err) {
4127 goto out;
4128 }
4129
4130 props = qdict_new();
4131 ret->model = g_new0(CpuModelInfo, 1);
4132 ret->model->props = QOBJECT(props);
4133 ret->model->has_props = true;
4134
4135 switch (type) {
4136 case CPU_MODEL_EXPANSION_TYPE_STATIC:
4137 /* Static expansion will be based on "base" only */
4138 base_name = "base";
4139 x86_cpu_to_dict(xc, props);
4140 break;
4141 case CPU_MODEL_EXPANSION_TYPE_FULL:
4142 /* As we don't return every single property, full expansion needs
4143 * to keep the original model name+props, and add extra
4144 * properties on top of that.
4145 */
4146 base_name = model->name;
4147 x86_cpu_to_dict_full(xc, props);
4148 break;
4149 default:
4150 error_setg(&err, "Unsupported expansion type");
4151 goto out;
4152 }
4153
4154 x86_cpu_to_dict(xc, props);
4155
4156 ret->model->name = g_strdup(base_name);
4157
4158 out:
4159 object_unref(OBJECT(xc));
4160 if (err) {
4161 error_propagate(errp, err);
4162 qapi_free_CpuModelExpansionInfo(ret);
4163 ret = NULL;
4164 }
4165 return ret;
4166 }
4167 #endif /* !CONFIG_USER_ONLY */
4168
4169 static gchar *x86_gdb_arch_name(CPUState *cs)
4170 {
4171 #ifdef TARGET_X86_64
4172 return g_strdup("i386:x86-64");
4173 #else
4174 return g_strdup("i386");
4175 #endif
4176 }
4177
4178 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
4179 {
4180 X86CPUDefinition *cpudef = data;
4181 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4182
4183 xcc->cpu_def = cpudef;
4184 xcc->migration_safe = true;
4185 }
4186
4187 static void x86_register_cpudef_type(X86CPUDefinition *def)
4188 {
4189 char *typename = x86_cpu_type_name(def->name);
4190 TypeInfo ti = {
4191 .name = typename,
4192 .parent = TYPE_X86_CPU,
4193 .class_init = x86_cpu_cpudef_class_init,
4194 .class_data = def,
4195 };
4196
4197 /* AMD aliases are handled at runtime based on CPUID vendor, so
4198 * they shouldn't be set on the CPU model table.
4199 */
4200 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
4201 /* catch mistakes instead of silently truncating model_id when too long */
4202 assert(def->model_id && strlen(def->model_id) <= 48);
4203
4204
4205 type_register(&ti);
4206 g_free(typename);
4207 }
4208
4209 #if !defined(CONFIG_USER_ONLY)
4210
4211 void cpu_clear_apic_feature(CPUX86State *env)
4212 {
4213 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
4214 }
4215
4216 #endif /* !CONFIG_USER_ONLY */
4217
4218 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
4219 uint32_t *eax, uint32_t *ebx,
4220 uint32_t *ecx, uint32_t *edx)
4221 {
4222 X86CPU *cpu = env_archcpu(env);
4223 CPUState *cs = env_cpu(env);
4224 uint32_t die_offset;
4225 uint32_t limit;
4226 uint32_t signature[3];
4227
4228 /* Calculate & apply limits for different index ranges */
4229 if (index >= 0xC0000000) {
4230 limit = env->cpuid_xlevel2;
4231 } else if (index >= 0x80000000) {
4232 limit = env->cpuid_xlevel;
4233 } else if (index >= 0x40000000) {
4234 limit = 0x40000001;
4235 } else {
4236 limit = env->cpuid_level;
4237 }
4238
4239 if (index > limit) {
4240 /* Intel documentation states that invalid EAX input will
4241 * return the same information as EAX=cpuid_level
4242 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
4243 */
4244 index = env->cpuid_level;
4245 }
4246
4247 switch(index) {
4248 case 0:
4249 *eax = env->cpuid_level;
4250 *ebx = env->cpuid_vendor1;
4251 *edx = env->cpuid_vendor2;
4252 *ecx = env->cpuid_vendor3;
4253 break;
4254 case 1:
4255 *eax = env->cpuid_version;
4256 *ebx = (cpu->apic_id << 24) |
4257 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
4258 *ecx = env->features[FEAT_1_ECX];
4259 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
4260 *ecx |= CPUID_EXT_OSXSAVE;
4261 }
4262 *edx = env->features[FEAT_1_EDX];
4263 if (cs->nr_cores * cs->nr_threads > 1) {
4264 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
4265 *edx |= CPUID_HT;
4266 }
4267 break;
4268 case 2:
4269 /* cache info: needed for Pentium Pro compatibility */
4270 if (cpu->cache_info_passthrough) {
4271 host_cpuid(index, 0, eax, ebx, ecx, edx);
4272 break;
4273 }
4274 *eax = 1; /* Number of CPUID[EAX=2] calls required */
4275 *ebx = 0;
4276 if (!cpu->enable_l3_cache) {
4277 *ecx = 0;
4278 } else {
4279 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
4280 }
4281 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
4282 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
4283 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
4284 break;
4285 case 4:
4286 /* cache info: needed for Core compatibility */
4287 if (cpu->cache_info_passthrough) {
4288 host_cpuid(index, count, eax, ebx, ecx, edx);
4289 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
4290 *eax &= ~0xFC000000;
4291 if ((*eax & 31) && cs->nr_cores > 1) {
4292 *eax |= (cs->nr_cores - 1) << 26;
4293 }
4294 } else {
4295 *eax = 0;
4296 switch (count) {
4297 case 0: /* L1 dcache info */
4298 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
4299 1, cs->nr_cores,
4300 eax, ebx, ecx, edx);
4301 break;
4302 case 1: /* L1 icache info */
4303 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
4304 1, cs->nr_cores,
4305 eax, ebx, ecx, edx);
4306 break;
4307 case 2: /* L2 cache info */
4308 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
4309 cs->nr_threads, cs->nr_cores,
4310 eax, ebx, ecx, edx);
4311 break;
4312 case 3: /* L3 cache info */
4313 die_offset = apicid_die_offset(env->nr_dies,
4314 cs->nr_cores, cs->nr_threads);
4315 if (cpu->enable_l3_cache) {
4316 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
4317 (1 << die_offset), cs->nr_cores,
4318 eax, ebx, ecx, edx);
4319 break;
4320 }
4321 /* fall through */
4322 default: /* end of info */
4323 *eax = *ebx = *ecx = *edx = 0;
4324 break;
4325 }
4326 }
4327 break;
4328 case 5:
4329 /* MONITOR/MWAIT Leaf */
4330 *eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */
4331 *ebx = cpu->mwait.ebx; /* Largest monitor-line size in bytes */
4332 *ecx = cpu->mwait.ecx; /* flags */
4333 *edx = cpu->mwait.edx; /* mwait substates */
4334 break;
4335 case 6:
4336 /* Thermal and Power Leaf */
4337 *eax = env->features[FEAT_6_EAX];
4338 *ebx = 0;
4339 *ecx = 0;
4340 *edx = 0;
4341 break;
4342 case 7:
4343 /* Structured Extended Feature Flags Enumeration Leaf */
4344 if (count == 0) {
4345 *eax = 0; /* Maximum ECX value for sub-leaves */
4346 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
4347 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
4348 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
4349 *ecx |= CPUID_7_0_ECX_OSPKE;
4350 }
4351 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
4352 } else {
4353 *eax = 0;
4354 *ebx = 0;
4355 *ecx = 0;
4356 *edx = 0;
4357 }
4358 break;
4359 case 9:
4360 /* Direct Cache Access Information Leaf */
4361 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
4362 *ebx = 0;
4363 *ecx = 0;
4364 *edx = 0;
4365 break;
4366 case 0xA:
4367 /* Architectural Performance Monitoring Leaf */
4368 if (kvm_enabled() && cpu->enable_pmu) {
4369 KVMState *s = cs->kvm_state;
4370
4371 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
4372 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
4373 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
4374 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
4375 } else if (hvf_enabled() && cpu->enable_pmu) {
4376 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
4377 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
4378 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
4379 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
4380 } else {
4381 *eax = 0;
4382 *ebx = 0;
4383 *ecx = 0;
4384 *edx = 0;
4385 }
4386 break;
4387 case 0xB:
4388 /* Extended Topology Enumeration Leaf */
4389 if (!cpu->enable_cpuid_0xb) {
4390 *eax = *ebx = *ecx = *edx = 0;
4391 break;
4392 }
4393
4394 *ecx = count & 0xff;
4395 *edx = cpu->apic_id;
4396
4397 switch (count) {
4398 case 0:
4399 *eax = apicid_core_offset(env->nr_dies,
4400 cs->nr_cores, cs->nr_threads);
4401 *ebx = cs->nr_threads;
4402 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
4403 break;
4404 case 1:
4405 *eax = apicid_pkg_offset(env->nr_dies,
4406 cs->nr_cores, cs->nr_threads);
4407 *ebx = cs->nr_cores * cs->nr_threads;
4408 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
4409 break;
4410 default:
4411 *eax = 0;
4412 *ebx = 0;
4413 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
4414 }
4415
4416 assert(!(*eax & ~0x1f));
4417 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
4418 break;
4419 case 0xD: {
4420 /* Processor Extended State */
4421 *eax = 0;
4422 *ebx = 0;
4423 *ecx = 0;
4424 *edx = 0;
4425 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4426 break;
4427 }
4428
4429 if (count == 0) {
4430 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
4431 *eax = env->features[FEAT_XSAVE_COMP_LO];
4432 *edx = env->features[FEAT_XSAVE_COMP_HI];
4433 *ebx = xsave_area_size(env->xcr0);
4434 } else if (count == 1) {
4435 *eax = env->features[FEAT_XSAVE];
4436 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
4437 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
4438 const ExtSaveArea *esa = &x86_ext_save_areas[count];
4439 *eax = esa->size;
4440 *ebx = esa->offset;
4441 }
4442 }
4443 break;
4444 }
4445 case 0x14: {
4446 /* Intel Processor Trace Enumeration */
4447 *eax = 0;
4448 *ebx = 0;
4449 *ecx = 0;
4450 *edx = 0;
4451 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
4452 !kvm_enabled()) {
4453 break;
4454 }
4455
4456 if (count == 0) {
4457 *eax = INTEL_PT_MAX_SUBLEAF;
4458 *ebx = INTEL_PT_MINIMAL_EBX;
4459 *ecx = INTEL_PT_MINIMAL_ECX;
4460 } else if (count == 1) {
4461 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
4462 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
4463 }
4464 break;
4465 }
4466 case 0x40000000:
4467 /*
4468 * CPUID code in kvm_arch_init_vcpu() ignores stuff
4469 * set here, but we restrict to TCG none the less.
4470 */
4471 if (tcg_enabled() && cpu->expose_tcg) {
4472 memcpy(signature, "TCGTCGTCGTCG", 12);
4473 *eax = 0x40000001;
4474 *ebx = signature[0];
4475 *ecx = signature[1];
4476 *edx = signature[2];
4477 } else {
4478 *eax = 0;
4479 *ebx = 0;
4480 *ecx = 0;
4481 *edx = 0;
4482 }
4483 break;
4484 case 0x40000001:
4485 *eax = 0;
4486 *ebx = 0;
4487 *ecx = 0;
4488 *edx = 0;
4489 break;
4490 case 0x80000000:
4491 *eax = env->cpuid_xlevel;
4492 *ebx = env->cpuid_vendor1;
4493 *edx = env->cpuid_vendor2;
4494 *ecx = env->cpuid_vendor3;
4495 break;
4496 case 0x80000001:
4497 *eax = env->cpuid_version;
4498 *ebx = 0;
4499 *ecx = env->features[FEAT_8000_0001_ECX];
4500 *edx = env->features[FEAT_8000_0001_EDX];
4501
4502 /* The Linux kernel checks for the CMPLegacy bit and
4503 * discards multiple thread information if it is set.
4504 * So don't set it here for Intel to make Linux guests happy.
4505 */
4506 if (cs->nr_cores * cs->nr_threads > 1) {
4507 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
4508 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
4509 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
4510 *ecx |= 1 << 1; /* CmpLegacy bit */
4511 }
4512 }
4513 break;
4514 case 0x80000002:
4515 case 0x80000003:
4516 case 0x80000004:
4517 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
4518 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
4519 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
4520 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
4521 break;
4522 case 0x80000005:
4523 /* cache info (L1 cache) */
4524 if (cpu->cache_info_passthrough) {
4525 host_cpuid(index, 0, eax, ebx, ecx, edx);
4526 break;
4527 }
4528 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
4529 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
4530 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
4531 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
4532 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
4533 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
4534 break;
4535 case 0x80000006:
4536 /* cache info (L2 cache) */
4537 if (cpu->cache_info_passthrough) {
4538 host_cpuid(index, 0, eax, ebx, ecx, edx);
4539 break;
4540 }
4541 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
4542 (L2_DTLB_2M_ENTRIES << 16) | \
4543 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
4544 (L2_ITLB_2M_ENTRIES);
4545 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
4546 (L2_DTLB_4K_ENTRIES << 16) | \
4547 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
4548 (L2_ITLB_4K_ENTRIES);
4549 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
4550 cpu->enable_l3_cache ?
4551 env->cache_info_amd.l3_cache : NULL,
4552 ecx, edx);
4553 break;
4554 case 0x80000007:
4555 *eax = 0;
4556 *ebx = 0;
4557 *ecx = 0;
4558 *edx = env->features[FEAT_8000_0007_EDX];
4559 break;
4560 case 0x80000008:
4561 /* virtual & phys address size in low 2 bytes. */
4562 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4563 /* 64 bit processor */
4564 *eax = cpu->phys_bits; /* configurable physical bits */
4565 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
4566 *eax |= 0x00003900; /* 57 bits virtual */
4567 } else {
4568 *eax |= 0x00003000; /* 48 bits virtual */
4569 }
4570 } else {
4571 *eax = cpu->phys_bits;
4572 }
4573 *ebx = env->features[FEAT_8000_0008_EBX];
4574 *ecx = 0;
4575 *edx = 0;
4576 if (cs->nr_cores * cs->nr_threads > 1) {
4577 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
4578 }
4579 break;
4580 case 0x8000000A:
4581 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4582 *eax = 0x00000001; /* SVM Revision */
4583 *ebx = 0x00000010; /* nr of ASIDs */
4584 *ecx = 0;
4585 *edx = env->features[FEAT_SVM]; /* optional features */
4586 } else {
4587 *eax = 0;
4588 *ebx = 0;
4589 *ecx = 0;
4590 *edx = 0;
4591 }
4592 break;
4593 case 0x8000001D:
4594 *eax = 0;
4595 if (cpu->cache_info_passthrough) {
4596 host_cpuid(index, count, eax, ebx, ecx, edx);
4597 break;
4598 }
4599 switch (count) {
4600 case 0: /* L1 dcache info */
4601 encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache, cs,
4602 eax, ebx, ecx, edx);
4603 break;
4604 case 1: /* L1 icache info */
4605 encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache, cs,
4606 eax, ebx, ecx, edx);
4607 break;
4608 case 2: /* L2 cache info */
4609 encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache, cs,
4610 eax, ebx, ecx, edx);
4611 break;
4612 case 3: /* L3 cache info */
4613 encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache, cs,
4614 eax, ebx, ecx, edx);
4615 break;
4616 default: /* end of info */
4617 *eax = *ebx = *ecx = *edx = 0;
4618 break;
4619 }
4620 break;
4621 case 0x8000001E:
4622 assert(cpu->core_id <= 255);
4623 encode_topo_cpuid8000001e(cs, cpu,
4624 eax, ebx, ecx, edx);
4625 break;
4626 case 0xC0000000:
4627 *eax = env->cpuid_xlevel2;
4628 *ebx = 0;
4629 *ecx = 0;
4630 *edx = 0;
4631 break;
4632 case 0xC0000001:
4633 /* Support for VIA CPU's CPUID instruction */
4634 *eax = env->cpuid_version;
4635 *ebx = 0;
4636 *ecx = 0;
4637 *edx = env->features[FEAT_C000_0001_EDX];
4638 break;
4639 case 0xC0000002:
4640 case 0xC0000003:
4641 case 0xC0000004:
4642 /* Reserved for the future, and now filled with zero */
4643 *eax = 0;
4644 *ebx = 0;
4645 *ecx = 0;
4646 *edx = 0;
4647 break;
4648 case 0x8000001F:
4649 *eax = sev_enabled() ? 0x2 : 0;
4650 *ebx = sev_get_cbit_position();
4651 *ebx |= sev_get_reduced_phys_bits() << 6;
4652 *ecx = 0;
4653 *edx = 0;
4654 break;
4655 default:
4656 /* reserved values: zero */
4657 *eax = 0;
4658 *ebx = 0;
4659 *ecx = 0;
4660 *edx = 0;
4661 break;
4662 }
4663 }
4664
4665 /* CPUClass::reset() */
4666 static void x86_cpu_reset(CPUState *s)
4667 {
4668 X86CPU *cpu = X86_CPU(s);
4669 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4670 CPUX86State *env = &cpu->env;
4671 target_ulong cr4;
4672 uint64_t xcr0;
4673 int i;
4674
4675 xcc->parent_reset(s);
4676
4677 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4678
4679 env->old_exception = -1;
4680
4681 /* init to reset state */
4682
4683 env->hflags2 |= HF2_GIF_MASK;
4684
4685 cpu_x86_update_cr0(env, 0x60000010);
4686 env->a20_mask = ~0x0;
4687 env->smbase = 0x30000;
4688 env->msr_smi_count = 0;
4689
4690 env->idt.limit = 0xffff;
4691 env->gdt.limit = 0xffff;
4692 env->ldt.limit = 0xffff;
4693 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4694 env->tr.limit = 0xffff;
4695 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4696
4697 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4698 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4699 DESC_R_MASK | DESC_A_MASK);
4700 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4701 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4702 DESC_A_MASK);
4703 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4704 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4705 DESC_A_MASK);
4706 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4707 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4708 DESC_A_MASK);
4709 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4710 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4711 DESC_A_MASK);
4712 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4713 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4714 DESC_A_MASK);
4715
4716 env->eip = 0xfff0;
4717 env->regs[R_EDX] = env->cpuid_version;
4718
4719 env->eflags = 0x2;
4720
4721 /* FPU init */
4722 for (i = 0; i < 8; i++) {
4723 env->fptags[i] = 1;
4724 }
4725 cpu_set_fpuc(env, 0x37f);
4726
4727 env->mxcsr = 0x1f80;
4728 /* All units are in INIT state. */
4729 env->xstate_bv = 0;
4730
4731 env->pat = 0x0007040600070406ULL;
4732 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4733 if (env->features[FEAT_1_ECX] & CPUID_EXT_MONITOR) {
4734 env->msr_ia32_misc_enable |= MSR_IA32_MISC_ENABLE_MWAIT;
4735 }
4736
4737 memset(env->dr, 0, sizeof(env->dr));
4738 env->dr[6] = DR6_FIXED_1;
4739 env->dr[7] = DR7_FIXED_1;
4740 cpu_breakpoint_remove_all(s, BP_CPU);
4741 cpu_watchpoint_remove_all(s, BP_CPU);
4742
4743 cr4 = 0;
4744 xcr0 = XSTATE_FP_MASK;
4745
4746 #ifdef CONFIG_USER_ONLY
4747 /* Enable all the features for user-mode. */
4748 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4749 xcr0 |= XSTATE_SSE_MASK;
4750 }
4751 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4752 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4753 if (env->features[esa->feature] & esa->bits) {
4754 xcr0 |= 1ull << i;
4755 }
4756 }
4757
4758 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4759 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4760 }
4761 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4762 cr4 |= CR4_FSGSBASE_MASK;
4763 }
4764 #endif
4765
4766 env->xcr0 = xcr0;
4767 cpu_x86_update_cr4(env, cr4);
4768
4769 /*
4770 * SDM 11.11.5 requires:
4771 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4772 * - IA32_MTRR_PHYSMASKn.V = 0
4773 * All other bits are undefined. For simplification, zero it all.
4774 */
4775 env->mtrr_deftype = 0;
4776 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4777 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4778
4779 env->interrupt_injected = -1;
4780 env->exception_nr = -1;
4781 env->exception_pending = 0;
4782 env->exception_injected = 0;
4783 env->exception_has_payload = false;
4784 env->exception_payload = 0;
4785 env->nmi_injected = false;
4786 #if !defined(CONFIG_USER_ONLY)
4787 /* We hard-wire the BSP to the first CPU. */
4788 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4789
4790 s->halted = !cpu_is_bsp(cpu);
4791
4792 if (kvm_enabled()) {
4793 kvm_arch_reset_vcpu(cpu);
4794 }
4795 else if (hvf_enabled()) {
4796 hvf_reset_vcpu(s);
4797 }
4798 #endif
4799 }
4800
4801 #ifndef CONFIG_USER_ONLY
4802 bool cpu_is_bsp(X86CPU *cpu)
4803 {
4804 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4805 }
4806
4807 /* TODO: remove me, when reset over QOM tree is implemented */
4808 static void x86_cpu_machine_reset_cb(void *opaque)
4809 {
4810 X86CPU *cpu = opaque;
4811 cpu_reset(CPU(cpu));
4812 }
4813 #endif
4814
4815 static void mce_init(X86CPU *cpu)
4816 {
4817 CPUX86State *cenv = &cpu->env;
4818 unsigned int bank;
4819
4820 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4821 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4822 (CPUID_MCE | CPUID_MCA)) {
4823 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4824 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4825 cenv->mcg_ctl = ~(uint64_t)0;
4826 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4827 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4828 }
4829 }
4830 }
4831
4832 #ifndef CONFIG_USER_ONLY
4833 APICCommonClass *apic_get_class(void)
4834 {
4835 const char *apic_type = "apic";
4836
4837 /* TODO: in-kernel irqchip for hvf */
4838 if (kvm_apic_in_kernel()) {
4839 apic_type = "kvm-apic";
4840 } else if (xen_enabled()) {
4841 apic_type = "xen-apic";
4842 }
4843
4844 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4845 }
4846
4847 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4848 {
4849 APICCommonState *apic;
4850 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4851
4852 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4853
4854 object_property_add_child(OBJECT(cpu), "lapic",
4855 OBJECT(cpu->apic_state), &error_abort);
4856 object_unref(OBJECT(cpu->apic_state));
4857
4858 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4859 /* TODO: convert to link<> */
4860 apic = APIC_COMMON(cpu->apic_state);
4861 apic->cpu = cpu;
4862 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4863 }
4864
4865 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4866 {
4867 APICCommonState *apic;
4868 static bool apic_mmio_map_once;
4869
4870 if (cpu->apic_state == NULL) {
4871 return;
4872 }
4873 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4874 errp);
4875
4876 /* Map APIC MMIO area */
4877 apic = APIC_COMMON(cpu->apic_state);
4878 if (!apic_mmio_map_once) {
4879 memory_region_add_subregion_overlap(get_system_memory(),
4880 apic->apicbase &
4881 MSR_IA32_APICBASE_BASE,
4882 &apic->io_memory,
4883 0x1000);
4884 apic_mmio_map_once = true;
4885 }
4886 }
4887
4888 static void x86_cpu_machine_done(Notifier *n, void *unused)
4889 {
4890 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4891 MemoryRegion *smram =
4892 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4893
4894 if (smram) {
4895 cpu->smram = g_new(MemoryRegion, 1);
4896 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4897 smram, 0, 1ull << 32);
4898 memory_region_set_enabled(cpu->smram, true);
4899 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4900 }
4901 }
4902 #else
4903 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4904 {
4905 }
4906 #endif
4907
4908 /* Note: Only safe for use on x86(-64) hosts */
4909 static uint32_t x86_host_phys_bits(void)
4910 {
4911 uint32_t eax;
4912 uint32_t host_phys_bits;
4913
4914 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4915 if (eax >= 0x80000008) {
4916 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4917 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4918 * at 23:16 that can specify a maximum physical address bits for
4919 * the guest that can override this value; but I've not seen
4920 * anything with that set.
4921 */
4922 host_phys_bits = eax & 0xff;
4923 } else {
4924 /* It's an odd 64 bit machine that doesn't have the leaf for
4925 * physical address bits; fall back to 36 that's most older
4926 * Intel.
4927 */
4928 host_phys_bits = 36;
4929 }
4930
4931 return host_phys_bits;
4932 }
4933
4934 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4935 {
4936 if (*min < value) {
4937 *min = value;
4938 }
4939 }
4940
4941 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4942 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4943 {
4944 CPUX86State *env = &cpu->env;
4945 FeatureWordInfo *fi = &feature_word_info[w];
4946 uint32_t eax = fi->cpuid.eax;
4947 uint32_t region = eax & 0xF0000000;
4948
4949 assert(feature_word_info[w].type == CPUID_FEATURE_WORD);
4950 if (!env->features[w]) {
4951 return;
4952 }
4953
4954 switch (region) {
4955 case 0x00000000:
4956 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4957 break;
4958 case 0x80000000:
4959 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4960 break;
4961 case 0xC0000000:
4962 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4963 break;
4964 }
4965 }
4966
4967 /* Calculate XSAVE components based on the configured CPU feature flags */
4968 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4969 {
4970 CPUX86State *env = &cpu->env;
4971 int i;
4972 uint64_t mask;
4973
4974 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4975 return;
4976 }
4977
4978 mask = 0;
4979 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4980 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4981 if (env->features[esa->feature] & esa->bits) {
4982 mask |= (1ULL << i);
4983 }
4984 }
4985
4986 env->features[FEAT_XSAVE_COMP_LO] = mask;
4987 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4988 }
4989
4990 /***** Steps involved on loading and filtering CPUID data
4991 *
4992 * When initializing and realizing a CPU object, the steps
4993 * involved in setting up CPUID data are:
4994 *
4995 * 1) Loading CPU model definition (X86CPUDefinition). This is
4996 * implemented by x86_cpu_load_def() and should be completely
4997 * transparent, as it is done automatically by instance_init.
4998 * No code should need to look at X86CPUDefinition structs
4999 * outside instance_init.
5000 *
5001 * 2) CPU expansion. This is done by realize before CPUID
5002 * filtering, and will make sure host/accelerator data is
5003 * loaded for CPU models that depend on host capabilities
5004 * (e.g. "host"). Done by x86_cpu_expand_features().
5005 *
5006 * 3) CPUID filtering. This initializes extra data related to
5007 * CPUID, and checks if the host supports all capabilities
5008 * required by the CPU. Runnability of a CPU model is
5009 * determined at this step. Done by x86_cpu_filter_features().
5010 *
5011 * Some operations don't require all steps to be performed.
5012 * More precisely:
5013 *
5014 * - CPU instance creation (instance_init) will run only CPU
5015 * model loading. CPU expansion can't run at instance_init-time
5016 * because host/accelerator data may be not available yet.
5017 * - CPU realization will perform both CPU model expansion and CPUID
5018 * filtering, and return an error in case one of them fails.
5019 * - query-cpu-definitions needs to run all 3 steps. It needs
5020 * to run CPUID filtering, as the 'unavailable-features'
5021 * field is set based on the filtering results.
5022 * - The query-cpu-model-expansion QMP command only needs to run
5023 * CPU model loading and CPU expansion. It should not filter
5024 * any CPUID data based on host capabilities.
5025 */
5026
5027 /* Expand CPU configuration data, based on configured features
5028 * and host/accelerator capabilities when appropriate.
5029 */
5030 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
5031 {
5032 CPUX86State *env = &cpu->env;
5033 FeatureWord w;
5034 GList *l;
5035 Error *local_err = NULL;
5036
5037 /*TODO: Now cpu->max_features doesn't overwrite features
5038 * set using QOM properties, and we can convert
5039 * plus_features & minus_features to global properties
5040 * inside x86_cpu_parse_featurestr() too.
5041 */
5042 if (cpu->max_features) {
5043 for (w = 0; w < FEATURE_WORDS; w++) {
5044 /* Override only features that weren't set explicitly
5045 * by the user.
5046 */
5047 env->features[w] |=
5048 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
5049 ~env->user_features[w] & \
5050 ~feature_word_info[w].no_autoenable_flags;
5051 }
5052 }
5053
5054 for (l = plus_features; l; l = l->next) {
5055 const char *prop = l->data;
5056 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
5057 if (local_err) {
5058 goto out;
5059 }
5060 }
5061
5062 for (l = minus_features; l; l = l->next) {
5063 const char *prop = l->data;
5064 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
5065 if (local_err) {
5066 goto out;
5067 }
5068 }
5069
5070 if (!kvm_enabled() || !cpu->expose_kvm) {
5071 env->features[FEAT_KVM] = 0;
5072 }
5073
5074 x86_cpu_enable_xsave_components(cpu);
5075
5076 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
5077 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
5078 if (cpu->full_cpuid_auto_level) {
5079 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
5080 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
5081 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
5082 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
5083 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
5084 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
5085 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
5086 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
5087 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
5088 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
5089 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
5090
5091 /* Intel Processor Trace requires CPUID[0x14] */
5092 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5093 kvm_enabled() && cpu->intel_pt_auto_level) {
5094 x86_cpu_adjust_level(cpu, &cpu->env.cpuid_min_level, 0x14);
5095 }
5096
5097 /* SVM requires CPUID[0x8000000A] */
5098 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
5099 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
5100 }
5101
5102 /* SEV requires CPUID[0x8000001F] */
5103 if (sev_enabled()) {
5104 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
5105 }
5106 }
5107
5108 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
5109 if (env->cpuid_level == UINT32_MAX) {
5110 env->cpuid_level = env->cpuid_min_level;
5111 }
5112 if (env->cpuid_xlevel == UINT32_MAX) {
5113 env->cpuid_xlevel = env->cpuid_min_xlevel;
5114 }
5115 if (env->cpuid_xlevel2 == UINT32_MAX) {
5116 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
5117 }
5118
5119 out:
5120 if (local_err != NULL) {
5121 error_propagate(errp, local_err);
5122 }
5123 }
5124
5125 /*
5126 * Finishes initialization of CPUID data, filters CPU feature
5127 * words based on host availability of each feature.
5128 *
5129 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
5130 */
5131 static int x86_cpu_filter_features(X86CPU *cpu)
5132 {
5133 CPUX86State *env = &cpu->env;
5134 FeatureWord w;
5135 int rv = 0;
5136
5137 for (w = 0; w < FEATURE_WORDS; w++) {
5138 uint32_t host_feat =
5139 x86_cpu_get_supported_feature_word(w, false);
5140 uint32_t requested_features = env->features[w];
5141 env->features[w] &= host_feat;
5142 cpu->filtered_features[w] = requested_features & ~env->features[w];
5143 if (cpu->filtered_features[w]) {
5144 rv = 1;
5145 }
5146 }
5147
5148 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
5149 kvm_enabled()) {
5150 KVMState *s = CPU(cpu)->kvm_state;
5151 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
5152 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
5153 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
5154 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
5155 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
5156
5157 if (!eax_0 ||
5158 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
5159 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
5160 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
5161 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
5162 INTEL_PT_ADDR_RANGES_NUM) ||
5163 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
5164 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
5165 (ecx_0 & INTEL_PT_IP_LIP)) {
5166 /*
5167 * Processor Trace capabilities aren't configurable, so if the
5168 * host can't emulate the capabilities we report on
5169 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
5170 */
5171 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
5172 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
5173 rv = 1;
5174 }
5175 }
5176
5177 return rv;
5178 }
5179
5180 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
5181 {
5182 CPUState *cs = CPU(dev);
5183 X86CPU *cpu = X86_CPU(dev);
5184 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5185 CPUX86State *env = &cpu->env;
5186 Error *local_err = NULL;
5187 static bool ht_warned;
5188
5189 if (xcc->host_cpuid_required) {
5190 if (!accel_uses_host_cpuid()) {
5191 char *name = x86_cpu_class_get_model_name(xcc);
5192 error_setg(&local_err, "CPU model '%s' requires KVM", name);
5193 g_free(name);
5194 goto out;
5195 }
5196
5197 if (enable_cpu_pm) {
5198 host_cpuid(5, 0, &cpu->mwait.eax, &cpu->mwait.ebx,
5199 &cpu->mwait.ecx, &cpu->mwait.edx);
5200 env->features[FEAT_1_ECX] |= CPUID_EXT_MONITOR;
5201 }
5202 }
5203
5204 /* mwait extended info: needed for Core compatibility */
5205 /* We always wake on interrupt even if host does not have the capability */
5206 cpu->mwait.ecx |= CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
5207
5208 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
5209 error_setg(errp, "apic-id property was not initialized properly");
5210 return;
5211 }
5212
5213 x86_cpu_expand_features(cpu, &local_err);
5214 if (local_err) {
5215 goto out;
5216 }
5217
5218 if (x86_cpu_filter_features(cpu) &&
5219 (cpu->check_cpuid || cpu->enforce_cpuid)) {
5220 x86_cpu_report_filtered_features(cpu);
5221 if (cpu->enforce_cpuid) {
5222 error_setg(&local_err,
5223 accel_uses_host_cpuid() ?
5224 "Host doesn't support requested features" :
5225 "TCG doesn't support requested features");
5226 goto out;
5227 }
5228 }
5229
5230 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
5231 * CPUID[1].EDX.
5232 */
5233 if (IS_AMD_CPU(env)) {
5234 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
5235 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
5236 & CPUID_EXT2_AMD_ALIASES);
5237 }
5238
5239 /* For 64bit systems think about the number of physical bits to present.
5240 * ideally this should be the same as the host; anything other than matching
5241 * the host can cause incorrect guest behaviour.
5242 * QEMU used to pick the magic value of 40 bits that corresponds to
5243 * consumer AMD devices but nothing else.
5244 */
5245 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
5246 if (accel_uses_host_cpuid()) {
5247 uint32_t host_phys_bits = x86_host_phys_bits();
5248 static bool warned;
5249
5250 /* Print a warning if the user set it to a value that's not the
5251 * host value.
5252 */
5253 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
5254 !warned) {
5255 warn_report("Host physical bits (%u)"
5256 " does not match phys-bits property (%u)",
5257 host_phys_bits, cpu->phys_bits);
5258 warned = true;
5259 }
5260
5261 if (cpu->host_phys_bits) {
5262 /* The user asked for us to use the host physical bits */
5263 cpu->phys_bits = host_phys_bits;
5264 if (cpu->host_phys_bits_limit &&
5265 cpu->phys_bits > cpu->host_phys_bits_limit) {
5266 cpu->phys_bits = cpu->host_phys_bits_limit;
5267 }
5268 }
5269
5270 if (cpu->phys_bits &&
5271 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
5272 cpu->phys_bits < 32)) {
5273 error_setg(errp, "phys-bits should be between 32 and %u "
5274 " (but is %u)",
5275 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
5276 return;
5277 }
5278 } else {
5279 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
5280 error_setg(errp, "TCG only supports phys-bits=%u",
5281 TCG_PHYS_ADDR_BITS);
5282 return;
5283 }
5284 }
5285 /* 0 means it was not explicitly set by the user (or by machine
5286 * compat_props or by the host code above). In this case, the default
5287 * is the value used by TCG (40).
5288 */
5289 if (cpu->phys_bits == 0) {
5290 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
5291 }
5292 } else {
5293 /* For 32 bit systems don't use the user set value, but keep
5294 * phys_bits consistent with what we tell the guest.
5295 */
5296 if (cpu->phys_bits != 0) {
5297 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
5298 return;
5299 }
5300
5301 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
5302 cpu->phys_bits = 36;
5303 } else {
5304 cpu->phys_bits = 32;
5305 }
5306 }
5307
5308 /* Cache information initialization */
5309 if (!cpu->legacy_cache) {
5310 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
5311 char *name = x86_cpu_class_get_model_name(xcc);
5312 error_setg(errp,
5313 "CPU model '%s' doesn't support legacy-cache=off", name);
5314 g_free(name);
5315 return;
5316 }
5317 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
5318 *xcc->cpu_def->cache_info;
5319 } else {
5320 /* Build legacy cache information */
5321 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
5322 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
5323 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
5324 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
5325
5326 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
5327 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
5328 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
5329 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
5330
5331 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
5332 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
5333 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
5334 env->cache_info_amd.l3_cache = &legacy_l3_cache;
5335 }
5336
5337
5338 cpu_exec_realizefn(cs, &local_err);
5339 if (local_err != NULL) {
5340 error_propagate(errp, local_err);
5341 return;
5342 }
5343
5344 #ifndef CONFIG_USER_ONLY
5345 MachineState *ms = MACHINE(qdev_get_machine());
5346 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
5347
5348 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || ms->smp.cpus > 1) {
5349 x86_cpu_apic_create(cpu, &local_err);
5350 if (local_err != NULL) {
5351 goto out;
5352 }
5353 }
5354 #endif
5355
5356 mce_init(cpu);
5357
5358 #ifndef CONFIG_USER_ONLY
5359 if (tcg_enabled()) {
5360 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
5361 cpu->cpu_as_root = g_new(MemoryRegion, 1);
5362
5363 /* Outer container... */
5364 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
5365 memory_region_set_enabled(cpu->cpu_as_root, true);
5366
5367 /* ... with two regions inside: normal system memory with low
5368 * priority, and...
5369 */
5370 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
5371 get_system_memory(), 0, ~0ull);
5372 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
5373 memory_region_set_enabled(cpu->cpu_as_mem, true);
5374
5375 cs->num_ases = 2;
5376 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
5377 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
5378
5379 /* ... SMRAM with higher priority, linked from /machine/smram. */
5380 cpu->machine_done.notify = x86_cpu_machine_done;
5381 qemu_add_machine_init_done_notifier(&cpu->machine_done);
5382 }
5383 #endif
5384
5385 qemu_init_vcpu(cs);
5386
5387 /*
5388 * Most Intel and certain AMD CPUs support hyperthreading. Even though QEMU
5389 * fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
5390 * based on inputs (sockets,cores,threads), it is still better to give
5391 * users a warning.
5392 *
5393 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
5394 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
5395 */
5396 if (IS_AMD_CPU(env) &&
5397 !(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
5398 cs->nr_threads > 1 && !ht_warned) {
5399 warn_report("This family of AMD CPU doesn't support "
5400 "hyperthreading(%d)",
5401 cs->nr_threads);
5402 error_printf("Please configure -smp options properly"
5403 " or try enabling topoext feature.\n");
5404 ht_warned = true;
5405 }
5406
5407 x86_cpu_apic_realize(cpu, &local_err);
5408 if (local_err != NULL) {
5409 goto out;
5410 }
5411 cpu_reset(cs);
5412
5413 xcc->parent_realize(dev, &local_err);
5414
5415 out:
5416 if (local_err != NULL) {
5417 error_propagate(errp, local_err);
5418 return;
5419 }
5420 }
5421
5422 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
5423 {
5424 X86CPU *cpu = X86_CPU(dev);
5425 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
5426 Error *local_err = NULL;
5427
5428 #ifndef CONFIG_USER_ONLY
5429 cpu_remove_sync(CPU(dev));
5430 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
5431 #endif
5432
5433 if (cpu->apic_state) {
5434 object_unparent(OBJECT(cpu->apic_state));
5435 cpu->apic_state = NULL;
5436 }
5437
5438 xcc->parent_unrealize(dev, &local_err);
5439 if (local_err != NULL) {
5440 error_propagate(errp, local_err);
5441 return;
5442 }
5443 }
5444
5445 typedef struct BitProperty {
5446 FeatureWord w;
5447 uint32_t mask;
5448 } BitProperty;
5449
5450 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
5451 void *opaque, Error **errp)
5452 {
5453 X86CPU *cpu = X86_CPU(obj);
5454 BitProperty *fp = opaque;
5455 uint32_t f = cpu->env.features[fp->w];
5456 bool value = (f & fp->mask) == fp->mask;
5457 visit_type_bool(v, name, &value, errp);
5458 }
5459
5460 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
5461 void *opaque, Error **errp)
5462 {
5463 DeviceState *dev = DEVICE(obj);
5464 X86CPU *cpu = X86_CPU(obj);
5465 BitProperty *fp = opaque;
5466 Error *local_err = NULL;
5467 bool value;
5468
5469 if (dev->realized) {
5470 qdev_prop_set_after_realize(dev, name, errp);
5471 return;
5472 }
5473
5474 visit_type_bool(v, name, &value, &local_err);
5475 if (local_err) {
5476 error_propagate(errp, local_err);
5477 return;
5478 }
5479
5480 if (value) {
5481 cpu->env.features[fp->w] |= fp->mask;
5482 } else {
5483 cpu->env.features[fp->w] &= ~fp->mask;
5484 }
5485 cpu->env.user_features[fp->w] |= fp->mask;
5486 }
5487
5488 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
5489 void *opaque)
5490 {
5491 BitProperty *prop = opaque;
5492 g_free(prop);
5493 }
5494
5495 /* Register a boolean property to get/set a single bit in a uint32_t field.
5496 *
5497 * The same property name can be registered multiple times to make it affect
5498 * multiple bits in the same FeatureWord. In that case, the getter will return
5499 * true only if all bits are set.
5500 */
5501 static void x86_cpu_register_bit_prop(X86CPU *cpu,
5502 const char *prop_name,
5503 FeatureWord w,
5504 int bitnr)
5505 {
5506 BitProperty *fp;
5507 ObjectProperty *op;
5508 uint32_t mask = (1UL << bitnr);
5509
5510 op = object_property_find(OBJECT(cpu), prop_name, NULL);
5511 if (op) {
5512 fp = op->opaque;
5513 assert(fp->w == w);
5514 fp->mask |= mask;
5515 } else {
5516 fp = g_new0(BitProperty, 1);
5517 fp->w = w;
5518 fp->mask = mask;
5519 object_property_add(OBJECT(cpu), prop_name, "bool",
5520 x86_cpu_get_bit_prop,
5521 x86_cpu_set_bit_prop,
5522 x86_cpu_release_bit_prop, fp, &error_abort);
5523 }
5524 }
5525
5526 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
5527 FeatureWord w,
5528 int bitnr)
5529 {
5530 FeatureWordInfo *fi = &feature_word_info[w];
5531 const char *name = fi->feat_names[bitnr];
5532
5533 if (!name) {
5534 return;
5535 }
5536
5537 /* Property names should use "-" instead of "_".
5538 * Old names containing underscores are registered as aliases
5539 * using object_property_add_alias()
5540 */
5541 assert(!strchr(name, '_'));
5542 /* aliases don't use "|" delimiters anymore, they are registered
5543 * manually using object_property_add_alias() */
5544 assert(!strchr(name, '|'));
5545 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
5546 }
5547
5548 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
5549 {
5550 X86CPU *cpu = X86_CPU(cs);
5551 CPUX86State *env = &cpu->env;
5552 GuestPanicInformation *panic_info = NULL;
5553
5554 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
5555 panic_info = g_malloc0(sizeof(GuestPanicInformation));
5556
5557 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
5558
5559 assert(HV_CRASH_PARAMS >= 5);
5560 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
5561 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
5562 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
5563 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
5564 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
5565 }
5566
5567 return panic_info;
5568 }
5569 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
5570 const char *name, void *opaque,
5571 Error **errp)
5572 {
5573 CPUState *cs = CPU(obj);
5574 GuestPanicInformation *panic_info;
5575
5576 if (!cs->crash_occurred) {
5577 error_setg(errp, "No crash occured");
5578 return;
5579 }
5580
5581 panic_info = x86_cpu_get_crash_info(cs);
5582 if (panic_info == NULL) {
5583 error_setg(errp, "No crash information");
5584 return;
5585 }
5586
5587 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
5588 errp);
5589 qapi_free_GuestPanicInformation(panic_info);
5590 }
5591
5592 static void x86_cpu_initfn(Object *obj)
5593 {
5594 X86CPU *cpu = X86_CPU(obj);
5595 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
5596 CPUX86State *env = &cpu->env;
5597 FeatureWord w;
5598
5599 env->nr_dies = 1;
5600 cpu_set_cpustate_pointers(cpu);
5601
5602 object_property_add(obj, "family", "int",
5603 x86_cpuid_version_get_family,
5604 x86_cpuid_version_set_family, NULL, NULL, NULL);
5605 object_property_add(obj, "model", "int",
5606 x86_cpuid_version_get_model,
5607 x86_cpuid_version_set_model, NULL, NULL, NULL);
5608 object_property_add(obj, "stepping", "int",
5609 x86_cpuid_version_get_stepping,
5610 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
5611 object_property_add_str(obj, "vendor",
5612 x86_cpuid_get_vendor,
5613 x86_cpuid_set_vendor, NULL);
5614 object_property_add_str(obj, "model-id",
5615 x86_cpuid_get_model_id,
5616 x86_cpuid_set_model_id, NULL);
5617 object_property_add(obj, "tsc-frequency", "int",
5618 x86_cpuid_get_tsc_freq,
5619 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
5620 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
5621 x86_cpu_get_feature_words,
5622 NULL, NULL, (void *)env->features, NULL);
5623 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
5624 x86_cpu_get_feature_words,
5625 NULL, NULL, (void *)cpu->filtered_features, NULL);
5626 /*
5627 * The "unavailable-features" property has the same semantics as
5628 * CpuDefinitionInfo.unavailable-features on the "query-cpu-definitions"
5629 * QMP command: they list the features that would have prevented the
5630 * CPU from running if the "enforce" flag was set.
5631 */
5632 object_property_add(obj, "unavailable-features", "strList",
5633 x86_cpu_get_unavailable_features,
5634 NULL, NULL, NULL, &error_abort);
5635
5636 object_property_add(obj, "crash-information", "GuestPanicInformation",
5637 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
5638
5639 for (w = 0; w < FEATURE_WORDS; w++) {
5640 int bitnr;
5641
5642 for (bitnr = 0; bitnr < 32; bitnr++) {
5643 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
5644 }
5645 }
5646
5647 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
5648 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
5649 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
5650 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
5651 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
5652 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
5653 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
5654
5655 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
5656 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
5657 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
5658 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5659 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5660 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5661 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5662 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5663 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5664 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5665 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5666 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5667 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5668 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5669 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5670 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5671 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5672 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5673 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5674 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5675 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5676
5677 if (xcc->cpu_def) {
5678 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5679 }
5680 }
5681
5682 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5683 {
5684 X86CPU *cpu = X86_CPU(cs);
5685
5686 return cpu->apic_id;
5687 }
5688
5689 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5690 {
5691 X86CPU *cpu = X86_CPU(cs);
5692
5693 return cpu->env.cr[0] & CR0_PG_MASK;
5694 }
5695
5696 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5697 {
5698 X86CPU *cpu = X86_CPU(cs);
5699
5700 cpu->env.eip = value;
5701 }
5702
5703 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5704 {
5705 X86CPU *cpu = X86_CPU(cs);
5706
5707 cpu->env.eip = tb->pc - tb->cs_base;
5708 }
5709
5710 int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
5711 {
5712 X86CPU *cpu = X86_CPU(cs);
5713 CPUX86State *env = &cpu->env;
5714
5715 #if !defined(CONFIG_USER_ONLY)
5716 if (interrupt_request & CPU_INTERRUPT_POLL) {
5717 return CPU_INTERRUPT_POLL;
5718 }
5719 #endif
5720 if (interrupt_request & CPU_INTERRUPT_SIPI) {
5721 return CPU_INTERRUPT_SIPI;
5722 }
5723
5724 if (env->hflags2 & HF2_GIF_MASK) {
5725 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
5726 !(env->hflags & HF_SMM_MASK)) {
5727 return CPU_INTERRUPT_SMI;
5728 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
5729 !(env->hflags2 & HF2_NMI_MASK)) {
5730 return CPU_INTERRUPT_NMI;
5731 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
5732 return CPU_INTERRUPT_MCE;
5733 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
5734 (((env->hflags2 & HF2_VINTR_MASK) &&
5735 (env->hflags2 & HF2_HIF_MASK)) ||
5736 (!(env->hflags2 & HF2_VINTR_MASK) &&
5737 (env->eflags & IF_MASK &&
5738 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
5739 return CPU_INTERRUPT_HARD;
5740 #if !defined(CONFIG_USER_ONLY)
5741 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
5742 (env->eflags & IF_MASK) &&
5743 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
5744 return CPU_INTERRUPT_VIRQ;
5745 #endif
5746 }
5747 }
5748
5749 return 0;
5750 }
5751
5752 static bool x86_cpu_has_work(CPUState *cs)
5753 {
5754 return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
5755 }
5756
5757 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5758 {
5759 X86CPU *cpu = X86_CPU(cs);
5760 CPUX86State *env = &cpu->env;
5761
5762 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5763 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5764 : bfd_mach_i386_i8086);
5765 info->print_insn = print_insn_i386;
5766
5767 info->cap_arch = CS_ARCH_X86;
5768 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5769 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5770 : CS_MODE_16);
5771 info->cap_insn_unit = 1;
5772 info->cap_insn_split = 8;
5773 }
5774
5775 void x86_update_hflags(CPUX86State *env)
5776 {
5777 uint32_t hflags;
5778 #define HFLAG_COPY_MASK \
5779 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5780 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5781 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5782 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5783
5784 hflags = env->hflags & HFLAG_COPY_MASK;
5785 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5786 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5787 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5788 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5789 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5790
5791 if (env->cr[4] & CR4_OSFXSR_MASK) {
5792 hflags |= HF_OSFXSR_MASK;
5793 }
5794
5795 if (env->efer & MSR_EFER_LMA) {
5796 hflags |= HF_LMA_MASK;
5797 }
5798
5799 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5800 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5801 } else {
5802 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5803 (DESC_B_SHIFT - HF_CS32_SHIFT);
5804 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5805 (DESC_B_SHIFT - HF_SS32_SHIFT);
5806 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5807 !(hflags & HF_CS32_MASK)) {
5808 hflags |= HF_ADDSEG_MASK;
5809 } else {
5810 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5811 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5812 }
5813 }
5814 env->hflags = hflags;
5815 }
5816
5817 static Property x86_cpu_properties[] = {
5818 #ifdef CONFIG_USER_ONLY
5819 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5820 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5821 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5822 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5823 DEFINE_PROP_INT32("die-id", X86CPU, die_id, 0),
5824 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5825 #else
5826 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5827 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5828 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5829 DEFINE_PROP_INT32("die-id", X86CPU, die_id, -1),
5830 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5831 #endif
5832 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5833 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5834
5835 DEFINE_PROP_UINT32("hv-spinlocks", X86CPU, hyperv_spinlock_attempts,
5836 HYPERV_SPINLOCK_NEVER_RETRY),
5837 DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
5838 HYPERV_FEAT_RELAXED, 0),
5839 DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
5840 HYPERV_FEAT_VAPIC, 0),
5841 DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
5842 HYPERV_FEAT_TIME, 0),
5843 DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
5844 HYPERV_FEAT_CRASH, 0),
5845 DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
5846 HYPERV_FEAT_RESET, 0),
5847 DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
5848 HYPERV_FEAT_VPINDEX, 0),
5849 DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
5850 HYPERV_FEAT_RUNTIME, 0),
5851 DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
5852 HYPERV_FEAT_SYNIC, 0),
5853 DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
5854 HYPERV_FEAT_STIMER, 0),
5855 DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
5856 HYPERV_FEAT_FREQUENCIES, 0),
5857 DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
5858 HYPERV_FEAT_REENLIGHTENMENT, 0),
5859 DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
5860 HYPERV_FEAT_TLBFLUSH, 0),
5861 DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
5862 HYPERV_FEAT_EVMCS, 0),
5863 DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
5864 HYPERV_FEAT_IPI, 0),
5865 DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
5866 HYPERV_FEAT_STIMER_DIRECT, 0),
5867 DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
5868
5869 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5870 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5871 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5872 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5873 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5874 DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0),
5875 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5876 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5877 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5878 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5879 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5880 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5881 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5882 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5883 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5884 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5885 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5886 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5887 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5888 false),
5889 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5890 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5891 DEFINE_PROP_BOOL("x-migrate-smi-count", X86CPU, migrate_smi_count,
5892 true),
5893 /*
5894 * lecacy_cache defaults to true unless the CPU model provides its
5895 * own cache information (see x86_cpu_load_def()).
5896 */
5897 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5898
5899 /*
5900 * From "Requirements for Implementing the Microsoft
5901 * Hypervisor Interface":
5902 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5903 *
5904 * "Starting with Windows Server 2012 and Windows 8, if
5905 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5906 * the hypervisor imposes no specific limit to the number of VPs.
5907 * In this case, Windows Server 2012 guest VMs may use more than
5908 * 64 VPs, up to the maximum supported number of processors applicable
5909 * to the specific Windows version being used."
5910 */
5911 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5912 DEFINE_PROP_BOOL("x-hv-synic-kvm-only", X86CPU, hyperv_synic_kvm_only,
5913 false),
5914 DEFINE_PROP_BOOL("x-intel-pt-auto-level", X86CPU, intel_pt_auto_level,
5915 true),
5916 DEFINE_PROP_END_OF_LIST()
5917 };
5918
5919 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5920 {
5921 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5922 CPUClass *cc = CPU_CLASS(oc);
5923 DeviceClass *dc = DEVICE_CLASS(oc);
5924
5925 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5926 &xcc->parent_realize);
5927 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5928 &xcc->parent_unrealize);
5929 dc->props = x86_cpu_properties;
5930
5931 xcc->parent_reset = cc->reset;
5932 cc->reset = x86_cpu_reset;
5933 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5934
5935 cc->class_by_name = x86_cpu_class_by_name;
5936 cc->parse_features = x86_cpu_parse_featurestr;
5937 cc->has_work = x86_cpu_has_work;
5938 #ifdef CONFIG_TCG
5939 cc->do_interrupt = x86_cpu_do_interrupt;
5940 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5941 #endif
5942 cc->dump_state = x86_cpu_dump_state;
5943 cc->get_crash_info = x86_cpu_get_crash_info;
5944 cc->set_pc = x86_cpu_set_pc;
5945 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5946 cc->gdb_read_register = x86_cpu_gdb_read_register;
5947 cc->gdb_write_register = x86_cpu_gdb_write_register;
5948 cc->get_arch_id = x86_cpu_get_arch_id;
5949 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5950 #ifndef CONFIG_USER_ONLY
5951 cc->asidx_from_attrs = x86_asidx_from_attrs;
5952 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5953 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5954 cc->write_elf64_note = x86_cpu_write_elf64_note;
5955 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5956 cc->write_elf32_note = x86_cpu_write_elf32_note;
5957 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5958 cc->vmsd = &vmstate_x86_cpu;
5959 #endif
5960 cc->gdb_arch_name = x86_gdb_arch_name;
5961 #ifdef TARGET_X86_64
5962 cc->gdb_core_xml_file = "i386-64bit.xml";
5963 cc->gdb_num_core_regs = 66;
5964 #else
5965 cc->gdb_core_xml_file = "i386-32bit.xml";
5966 cc->gdb_num_core_regs = 50;
5967 #endif
5968 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5969 cc->debug_excp_handler = breakpoint_handler;
5970 #endif
5971 cc->cpu_exec_enter = x86_cpu_exec_enter;
5972 cc->cpu_exec_exit = x86_cpu_exec_exit;
5973 #ifdef CONFIG_TCG
5974 cc->tcg_initialize = tcg_x86_init;
5975 cc->tlb_fill = x86_cpu_tlb_fill;
5976 #endif
5977 cc->disas_set_info = x86_disas_set_info;
5978
5979 dc->user_creatable = true;
5980 }
5981
5982 static const TypeInfo x86_cpu_type_info = {
5983 .name = TYPE_X86_CPU,
5984 .parent = TYPE_CPU,
5985 .instance_size = sizeof(X86CPU),
5986 .instance_init = x86_cpu_initfn,
5987 .abstract = true,
5988 .class_size = sizeof(X86CPUClass),
5989 .class_init = x86_cpu_common_class_init,
5990 };
5991
5992
5993 /* "base" CPU model, used by query-cpu-model-expansion */
5994 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5995 {
5996 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5997
5998 xcc->static_model = true;
5999 xcc->migration_safe = true;
6000 xcc->model_description = "base CPU model type with no features enabled";
6001 xcc->ordering = 8;
6002 }
6003
6004 static const TypeInfo x86_base_cpu_type_info = {
6005 .name = X86_CPU_TYPE_NAME("base"),
6006 .parent = TYPE_X86_CPU,
6007 .class_init = x86_cpu_base_class_init,
6008 };
6009
6010 static void x86_cpu_register_types(void)
6011 {
6012 int i;
6013
6014 type_register_static(&x86_cpu_type_info);
6015 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
6016 x86_register_cpudef_type(&builtin_x86_defs[i]);
6017 }
6018 type_register_static(&max_x86_cpu_type_info);
6019 type_register_static(&x86_base_cpu_type_info);
6020 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
6021 type_register_static(&host_x86_cpu_type_info);
6022 #endif
6023 }
6024
6025 type_init(x86_cpu_register_types)