]> git.proxmox.com Git - qemu.git/blob - target-i386/helper.c
Update to a hopefully more future proof FSF address
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
25
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "qemu-common.h"
29 #include "kvm.h"
30
31 //#define DEBUG_MMU
32
33 /* feature flags taken from "Intel Processor Identification and the CPUID
34 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
35 * about feature names, the Linux name is used. */
36 static const char *feature_name[] = {
37 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
38 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
39 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
40 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
41 };
42 static const char *ext_feature_name[] = {
43 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
44 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
45 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
46 NULL, NULL, NULL, NULL, NULL, NULL, NULL, "hypervisor",
47 };
48 static const char *ext2_feature_name[] = {
49 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
50 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mtrr", "pge", "mca", "cmov",
51 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
52 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
53 };
54 static const char *ext3_feature_name[] = {
55 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
56 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
57 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
58 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
59 };
60
61 static void add_flagname_to_bitmaps(const char *flagname, uint32_t *features,
62 uint32_t *ext_features,
63 uint32_t *ext2_features,
64 uint32_t *ext3_features)
65 {
66 int i;
67 int found = 0;
68
69 for ( i = 0 ; i < 32 ; i++ )
70 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
71 *features |= 1 << i;
72 found = 1;
73 }
74 for ( i = 0 ; i < 32 ; i++ )
75 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
76 *ext_features |= 1 << i;
77 found = 1;
78 }
79 for ( i = 0 ; i < 32 ; i++ )
80 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
81 *ext2_features |= 1 << i;
82 found = 1;
83 }
84 for ( i = 0 ; i < 32 ; i++ )
85 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
86 *ext3_features |= 1 << i;
87 found = 1;
88 }
89 if (!found) {
90 fprintf(stderr, "CPU feature %s not found\n", flagname);
91 }
92 }
93
94 typedef struct x86_def_t {
95 const char *name;
96 uint32_t level;
97 uint32_t vendor1, vendor2, vendor3;
98 int family;
99 int model;
100 int stepping;
101 uint32_t features, ext_features, ext2_features, ext3_features;
102 uint32_t xlevel;
103 char model_id[48];
104 int vendor_override;
105 } x86_def_t;
106
107 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
108 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
109 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX)
110 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
111 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
112 CPUID_PSE36 | CPUID_FXSR)
113 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
114 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
115 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
116 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
117 CPUID_PAE | CPUID_SEP | CPUID_APIC)
118 static x86_def_t x86_defs[] = {
119 #ifdef TARGET_X86_64
120 {
121 .name = "qemu64",
122 .level = 2,
123 .vendor1 = CPUID_VENDOR_AMD_1,
124 .vendor2 = CPUID_VENDOR_AMD_2,
125 .vendor3 = CPUID_VENDOR_AMD_3,
126 .family = 6,
127 .model = 2,
128 .stepping = 3,
129 .features = PPRO_FEATURES |
130 /* these features are needed for Win64 and aren't fully implemented */
131 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
132 /* this feature is needed for Solaris and isn't fully implemented */
133 CPUID_PSE36,
134 .ext_features = CPUID_EXT_SSE3,
135 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
136 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
137 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
138 .ext3_features = CPUID_EXT3_SVM,
139 .xlevel = 0x8000000A,
140 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
141 },
142 {
143 .name = "phenom",
144 .level = 5,
145 .vendor1 = CPUID_VENDOR_AMD_1,
146 .vendor2 = CPUID_VENDOR_AMD_2,
147 .vendor3 = CPUID_VENDOR_AMD_3,
148 .family = 16,
149 .model = 2,
150 .stepping = 3,
151 /* Missing: CPUID_VME, CPUID_HT */
152 .features = PPRO_FEATURES |
153 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
154 CPUID_PSE36,
155 /* Missing: CPUID_EXT_CX16, CPUID_EXT_POPCNT */
156 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
157 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
158 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
159 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
160 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
161 CPUID_EXT2_FFXSR,
162 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
163 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
164 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
165 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
166 .ext3_features = CPUID_EXT3_SVM,
167 .xlevel = 0x8000001A,
168 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
169 },
170 {
171 .name = "core2duo",
172 .level = 10,
173 .family = 6,
174 .model = 15,
175 .stepping = 11,
176 /* The original CPU also implements these features:
177 CPUID_VME, CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
178 CPUID_TM, CPUID_PBE */
179 .features = PPRO_FEATURES |
180 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
181 CPUID_PSE36,
182 /* The original CPU also implements these ext features:
183 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
184 CPUID_EXT_TM2, CPUID_EXT_CX16, CPUID_EXT_XTPR, CPUID_EXT_PDCM */
185 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3,
186 .ext2_features = CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
187 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
188 .xlevel = 0x80000008,
189 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
190 },
191 #endif
192 {
193 .name = "qemu32",
194 .level = 2,
195 .family = 6,
196 .model = 3,
197 .stepping = 3,
198 .features = PPRO_FEATURES,
199 .ext_features = CPUID_EXT_SSE3,
200 .xlevel = 0,
201 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
202 },
203 {
204 .name = "coreduo",
205 .level = 10,
206 .family = 6,
207 .model = 14,
208 .stepping = 8,
209 /* The original CPU also implements these features:
210 CPUID_DTS, CPUID_ACPI, CPUID_SS, CPUID_HT,
211 CPUID_TM, CPUID_PBE */
212 .features = PPRO_FEATURES | CPUID_VME |
213 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA,
214 /* The original CPU also implements these ext features:
215 CPUID_EXT_VMX, CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_XTPR,
216 CPUID_EXT_PDCM */
217 .ext_features = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
218 .ext2_features = CPUID_EXT2_NX,
219 .xlevel = 0x80000008,
220 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
221 },
222 {
223 .name = "486",
224 .level = 0,
225 .family = 4,
226 .model = 0,
227 .stepping = 0,
228 .features = I486_FEATURES,
229 .xlevel = 0,
230 },
231 {
232 .name = "pentium",
233 .level = 1,
234 .family = 5,
235 .model = 4,
236 .stepping = 3,
237 .features = PENTIUM_FEATURES,
238 .xlevel = 0,
239 },
240 {
241 .name = "pentium2",
242 .level = 2,
243 .family = 6,
244 .model = 5,
245 .stepping = 2,
246 .features = PENTIUM2_FEATURES,
247 .xlevel = 0,
248 },
249 {
250 .name = "pentium3",
251 .level = 2,
252 .family = 6,
253 .model = 7,
254 .stepping = 3,
255 .features = PENTIUM3_FEATURES,
256 .xlevel = 0,
257 },
258 {
259 .name = "athlon",
260 .level = 2,
261 .vendor1 = CPUID_VENDOR_AMD_1,
262 .vendor2 = CPUID_VENDOR_AMD_2,
263 .vendor3 = CPUID_VENDOR_AMD_3,
264 .family = 6,
265 .model = 2,
266 .stepping = 3,
267 .features = PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR | CPUID_MCA,
268 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
269 .xlevel = 0x80000008,
270 /* XXX: put another string ? */
271 .model_id = "QEMU Virtual CPU version " QEMU_VERSION,
272 },
273 {
274 .name = "n270",
275 /* original is on level 10 */
276 .level = 5,
277 .family = 6,
278 .model = 28,
279 .stepping = 2,
280 .features = PPRO_FEATURES |
281 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME,
282 /* Missing: CPUID_DTS | CPUID_ACPI | CPUID_SS |
283 * CPUID_HT | CPUID_TM | CPUID_PBE */
284 /* Some CPUs got no CPUID_SEP */
285 .ext_features = CPUID_EXT_MONITOR |
286 CPUID_EXT_SSE3 /* PNI */ | CPUID_EXT_SSSE3,
287 /* Missing: CPUID_EXT_DSCPL | CPUID_EXT_EST |
288 * CPUID_EXT_TM2 | CPUID_EXT_XTPR */
289 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) | CPUID_EXT2_NX,
290 /* Missing: .ext3_features = CPUID_EXT3_LAHF_LM */
291 .xlevel = 0x8000000A,
292 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
293 },
294 };
295
296 static void host_cpuid(uint32_t function, uint32_t count, uint32_t *eax,
297 uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
298
299 static int cpu_x86_fill_model_id(char *str)
300 {
301 uint32_t eax, ebx, ecx, edx;
302 int i;
303
304 for (i = 0; i < 3; i++) {
305 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
306 memcpy(str + i * 16 + 0, &eax, 4);
307 memcpy(str + i * 16 + 4, &ebx, 4);
308 memcpy(str + i * 16 + 8, &ecx, 4);
309 memcpy(str + i * 16 + 12, &edx, 4);
310 }
311 return 0;
312 }
313
314 static int cpu_x86_fill_host(x86_def_t *x86_cpu_def)
315 {
316 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
317
318 x86_cpu_def->name = "host";
319 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
320 x86_cpu_def->level = eax;
321 x86_cpu_def->vendor1 = ebx;
322 x86_cpu_def->vendor2 = edx;
323 x86_cpu_def->vendor3 = ecx;
324
325 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
326 x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
327 x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
328 x86_cpu_def->stepping = eax & 0x0F;
329 x86_cpu_def->ext_features = ecx;
330 x86_cpu_def->features = edx;
331
332 host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx);
333 x86_cpu_def->xlevel = eax;
334
335 host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx);
336 x86_cpu_def->ext2_features = edx;
337 x86_cpu_def->ext3_features = ecx;
338 cpu_x86_fill_model_id(x86_cpu_def->model_id);
339 x86_cpu_def->vendor_override = 0;
340
341 return 0;
342 }
343
344 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
345 {
346 unsigned int i;
347 x86_def_t *def;
348
349 char *s = strdup(cpu_model);
350 char *featurestr, *name = strtok(s, ",");
351 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
352 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
353 int family = -1, model = -1, stepping = -1;
354
355 def = NULL;
356 for (i = 0; i < ARRAY_SIZE(x86_defs); i++) {
357 if (strcmp(name, x86_defs[i].name) == 0) {
358 def = &x86_defs[i];
359 break;
360 }
361 }
362 if (!def) {
363 if (strcmp(name, "host") != 0) {
364 goto error;
365 }
366 cpu_x86_fill_host(x86_cpu_def);
367 } else {
368 memcpy(x86_cpu_def, def, sizeof(*def));
369 }
370
371 add_flagname_to_bitmaps("hypervisor", &plus_features,
372 &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
373
374 featurestr = strtok(NULL, ",");
375
376 while (featurestr) {
377 char *val;
378 if (featurestr[0] == '+') {
379 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
380 } else if (featurestr[0] == '-') {
381 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
382 } else if ((val = strchr(featurestr, '='))) {
383 *val = 0; val++;
384 if (!strcmp(featurestr, "family")) {
385 char *err;
386 family = strtol(val, &err, 10);
387 if (!*val || *err || family < 0) {
388 fprintf(stderr, "bad numerical value %s\n", val);
389 goto error;
390 }
391 x86_cpu_def->family = family;
392 } else if (!strcmp(featurestr, "model")) {
393 char *err;
394 model = strtol(val, &err, 10);
395 if (!*val || *err || model < 0 || model > 0xff) {
396 fprintf(stderr, "bad numerical value %s\n", val);
397 goto error;
398 }
399 x86_cpu_def->model = model;
400 } else if (!strcmp(featurestr, "stepping")) {
401 char *err;
402 stepping = strtol(val, &err, 10);
403 if (!*val || *err || stepping < 0 || stepping > 0xf) {
404 fprintf(stderr, "bad numerical value %s\n", val);
405 goto error;
406 }
407 x86_cpu_def->stepping = stepping;
408 } else if (!strcmp(featurestr, "vendor")) {
409 if (strlen(val) != 12) {
410 fprintf(stderr, "vendor string must be 12 chars long\n");
411 goto error;
412 }
413 x86_cpu_def->vendor1 = 0;
414 x86_cpu_def->vendor2 = 0;
415 x86_cpu_def->vendor3 = 0;
416 for(i = 0; i < 4; i++) {
417 x86_cpu_def->vendor1 |= ((uint8_t)val[i ]) << (8 * i);
418 x86_cpu_def->vendor2 |= ((uint8_t)val[i + 4]) << (8 * i);
419 x86_cpu_def->vendor3 |= ((uint8_t)val[i + 8]) << (8 * i);
420 }
421 x86_cpu_def->vendor_override = 1;
422 } else if (!strcmp(featurestr, "model_id")) {
423 pstrcpy(x86_cpu_def->model_id, sizeof(x86_cpu_def->model_id),
424 val);
425 } else {
426 fprintf(stderr, "unrecognized feature %s\n", featurestr);
427 goto error;
428 }
429 } else {
430 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
431 goto error;
432 }
433 featurestr = strtok(NULL, ",");
434 }
435 x86_cpu_def->features |= plus_features;
436 x86_cpu_def->ext_features |= plus_ext_features;
437 x86_cpu_def->ext2_features |= plus_ext2_features;
438 x86_cpu_def->ext3_features |= plus_ext3_features;
439 x86_cpu_def->features &= ~minus_features;
440 x86_cpu_def->ext_features &= ~minus_ext_features;
441 x86_cpu_def->ext2_features &= ~minus_ext2_features;
442 x86_cpu_def->ext3_features &= ~minus_ext3_features;
443 free(s);
444 return 0;
445
446 error:
447 free(s);
448 return -1;
449 }
450
451 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
452 {
453 unsigned int i;
454
455 for (i = 0; i < ARRAY_SIZE(x86_defs); i++)
456 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
457 }
458
459 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
460 {
461 x86_def_t def1, *def = &def1;
462
463 if (cpu_x86_find_by_name(def, cpu_model) < 0)
464 return -1;
465 if (def->vendor1) {
466 env->cpuid_vendor1 = def->vendor1;
467 env->cpuid_vendor2 = def->vendor2;
468 env->cpuid_vendor3 = def->vendor3;
469 } else {
470 env->cpuid_vendor1 = CPUID_VENDOR_INTEL_1;
471 env->cpuid_vendor2 = CPUID_VENDOR_INTEL_2;
472 env->cpuid_vendor3 = CPUID_VENDOR_INTEL_3;
473 }
474 env->cpuid_vendor_override = def->vendor_override;
475 env->cpuid_level = def->level;
476 if (def->family > 0x0f)
477 env->cpuid_version = 0xf00 | ((def->family - 0x0f) << 20);
478 else
479 env->cpuid_version = def->family << 8;
480 env->cpuid_version |= ((def->model & 0xf) << 4) | ((def->model >> 4) << 16);
481 env->cpuid_version |= def->stepping;
482 env->cpuid_features = def->features;
483 env->pat = 0x0007040600070406ULL;
484 env->cpuid_ext_features = def->ext_features;
485 env->cpuid_ext2_features = def->ext2_features;
486 env->cpuid_xlevel = def->xlevel;
487 env->cpuid_ext3_features = def->ext3_features;
488 {
489 const char *model_id = def->model_id;
490 int c, len, i;
491 if (!model_id)
492 model_id = "";
493 len = strlen(model_id);
494 for(i = 0; i < 48; i++) {
495 if (i >= len)
496 c = '\0';
497 else
498 c = (uint8_t)model_id[i];
499 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
500 }
501 }
502 return 0;
503 }
504
505 /* NOTE: must be called outside the CPU execute loop */
506 void cpu_reset(CPUX86State *env)
507 {
508 int i;
509
510 if (qemu_loglevel_mask(CPU_LOG_RESET)) {
511 qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
512 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
513 }
514
515 memset(env, 0, offsetof(CPUX86State, breakpoints));
516
517 tlb_flush(env, 1);
518
519 env->old_exception = -1;
520
521 /* init to reset state */
522
523 #ifdef CONFIG_SOFTMMU
524 env->hflags |= HF_SOFTMMU_MASK;
525 #endif
526 env->hflags2 |= HF2_GIF_MASK;
527
528 cpu_x86_update_cr0(env, 0x60000010);
529 env->a20_mask = ~0x0;
530 env->smbase = 0x30000;
531
532 env->idt.limit = 0xffff;
533 env->gdt.limit = 0xffff;
534 env->ldt.limit = 0xffff;
535 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
536 env->tr.limit = 0xffff;
537 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
538
539 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
540 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
541 DESC_R_MASK | DESC_A_MASK);
542 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
543 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
544 DESC_A_MASK);
545 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
546 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
547 DESC_A_MASK);
548 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
549 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
550 DESC_A_MASK);
551 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
552 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
553 DESC_A_MASK);
554 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
555 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
556 DESC_A_MASK);
557
558 env->eip = 0xfff0;
559 env->regs[R_EDX] = env->cpuid_version;
560
561 env->eflags = 0x2;
562
563 /* FPU init */
564 for(i = 0;i < 8; i++)
565 env->fptags[i] = 1;
566 env->fpuc = 0x37f;
567
568 env->mxcsr = 0x1f80;
569
570 memset(env->dr, 0, sizeof(env->dr));
571 env->dr[6] = DR6_FIXED_1;
572 env->dr[7] = DR7_FIXED_1;
573 cpu_breakpoint_remove_all(env, BP_CPU);
574 cpu_watchpoint_remove_all(env, BP_CPU);
575 }
576
577 void cpu_x86_close(CPUX86State *env)
578 {
579 qemu_free(env);
580 }
581
582 /***********************************************************/
583 /* x86 debug */
584
585 static const char *cc_op_str[] = {
586 "DYNAMIC",
587 "EFLAGS",
588
589 "MULB",
590 "MULW",
591 "MULL",
592 "MULQ",
593
594 "ADDB",
595 "ADDW",
596 "ADDL",
597 "ADDQ",
598
599 "ADCB",
600 "ADCW",
601 "ADCL",
602 "ADCQ",
603
604 "SUBB",
605 "SUBW",
606 "SUBL",
607 "SUBQ",
608
609 "SBBB",
610 "SBBW",
611 "SBBL",
612 "SBBQ",
613
614 "LOGICB",
615 "LOGICW",
616 "LOGICL",
617 "LOGICQ",
618
619 "INCB",
620 "INCW",
621 "INCL",
622 "INCQ",
623
624 "DECB",
625 "DECW",
626 "DECL",
627 "DECQ",
628
629 "SHLB",
630 "SHLW",
631 "SHLL",
632 "SHLQ",
633
634 "SARB",
635 "SARW",
636 "SARL",
637 "SARQ",
638 };
639
640 static void
641 cpu_x86_dump_seg_cache(CPUState *env, FILE *f,
642 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
643 const char *name, struct SegmentCache *sc)
644 {
645 #ifdef TARGET_X86_64
646 if (env->hflags & HF_CS64_MASK) {
647 cpu_fprintf(f, "%-3s=%04x %016" PRIx64 " %08x %08x", name,
648 sc->selector, sc->base, sc->limit, sc->flags);
649 } else
650 #endif
651 {
652 cpu_fprintf(f, "%-3s=%04x %08x %08x %08x", name, sc->selector,
653 (uint32_t)sc->base, sc->limit, sc->flags);
654 }
655
656 if (!(env->hflags & HF_PE_MASK) || !(sc->flags & DESC_P_MASK))
657 goto done;
658
659 cpu_fprintf(f, " DPL=%d ", (sc->flags & DESC_DPL_MASK) >> DESC_DPL_SHIFT);
660 if (sc->flags & DESC_S_MASK) {
661 if (sc->flags & DESC_CS_MASK) {
662 cpu_fprintf(f, (sc->flags & DESC_L_MASK) ? "CS64" :
663 ((sc->flags & DESC_B_MASK) ? "CS32" : "CS16"));
664 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_C_MASK) ? 'C' : '-',
665 (sc->flags & DESC_R_MASK) ? 'R' : '-');
666 } else {
667 cpu_fprintf(f, (sc->flags & DESC_B_MASK) ? "DS " : "DS16");
668 cpu_fprintf(f, " [%c%c", (sc->flags & DESC_E_MASK) ? 'E' : '-',
669 (sc->flags & DESC_W_MASK) ? 'W' : '-');
670 }
671 cpu_fprintf(f, "%c]", (sc->flags & DESC_A_MASK) ? 'A' : '-');
672 } else {
673 static const char *sys_type_name[2][16] = {
674 { /* 32 bit mode */
675 "Reserved", "TSS16-avl", "LDT", "TSS16-busy",
676 "CallGate16", "TaskGate", "IntGate16", "TrapGate16",
677 "Reserved", "TSS32-avl", "Reserved", "TSS32-busy",
678 "CallGate32", "Reserved", "IntGate32", "TrapGate32"
679 },
680 { /* 64 bit mode */
681 "<hiword>", "Reserved", "LDT", "Reserved", "Reserved",
682 "Reserved", "Reserved", "Reserved", "Reserved",
683 "TSS64-avl", "Reserved", "TSS64-busy", "CallGate64",
684 "Reserved", "IntGate64", "TrapGate64"
685 }
686 };
687 cpu_fprintf(f, sys_type_name[(env->hflags & HF_LMA_MASK) ? 1 : 0]
688 [(sc->flags & DESC_TYPE_MASK)
689 >> DESC_TYPE_SHIFT]);
690 }
691 done:
692 cpu_fprintf(f, "\n");
693 }
694
695 void cpu_dump_state(CPUState *env, FILE *f,
696 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
697 int flags)
698 {
699 int eflags, i, nb;
700 char cc_op_name[32];
701 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
702
703 if (kvm_enabled())
704 kvm_arch_get_registers(env);
705
706 eflags = env->eflags;
707 #ifdef TARGET_X86_64
708 if (env->hflags & HF_CS64_MASK) {
709 cpu_fprintf(f,
710 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
711 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
712 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
713 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
714 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
715 env->regs[R_EAX],
716 env->regs[R_EBX],
717 env->regs[R_ECX],
718 env->regs[R_EDX],
719 env->regs[R_ESI],
720 env->regs[R_EDI],
721 env->regs[R_EBP],
722 env->regs[R_ESP],
723 env->regs[8],
724 env->regs[9],
725 env->regs[10],
726 env->regs[11],
727 env->regs[12],
728 env->regs[13],
729 env->regs[14],
730 env->regs[15],
731 env->eip, eflags,
732 eflags & DF_MASK ? 'D' : '-',
733 eflags & CC_O ? 'O' : '-',
734 eflags & CC_S ? 'S' : '-',
735 eflags & CC_Z ? 'Z' : '-',
736 eflags & CC_A ? 'A' : '-',
737 eflags & CC_P ? 'P' : '-',
738 eflags & CC_C ? 'C' : '-',
739 env->hflags & HF_CPL_MASK,
740 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
741 (int)(env->a20_mask >> 20) & 1,
742 (env->hflags >> HF_SMM_SHIFT) & 1,
743 env->halted);
744 } else
745 #endif
746 {
747 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
748 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
749 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
750 (uint32_t)env->regs[R_EAX],
751 (uint32_t)env->regs[R_EBX],
752 (uint32_t)env->regs[R_ECX],
753 (uint32_t)env->regs[R_EDX],
754 (uint32_t)env->regs[R_ESI],
755 (uint32_t)env->regs[R_EDI],
756 (uint32_t)env->regs[R_EBP],
757 (uint32_t)env->regs[R_ESP],
758 (uint32_t)env->eip, eflags,
759 eflags & DF_MASK ? 'D' : '-',
760 eflags & CC_O ? 'O' : '-',
761 eflags & CC_S ? 'S' : '-',
762 eflags & CC_Z ? 'Z' : '-',
763 eflags & CC_A ? 'A' : '-',
764 eflags & CC_P ? 'P' : '-',
765 eflags & CC_C ? 'C' : '-',
766 env->hflags & HF_CPL_MASK,
767 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
768 (int)(env->a20_mask >> 20) & 1,
769 (env->hflags >> HF_SMM_SHIFT) & 1,
770 env->halted);
771 }
772
773 for(i = 0; i < 6; i++) {
774 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, seg_name[i],
775 &env->segs[i]);
776 }
777 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "LDT", &env->ldt);
778 cpu_x86_dump_seg_cache(env, f, cpu_fprintf, "TR", &env->tr);
779
780 #ifdef TARGET_X86_64
781 if (env->hflags & HF_LMA_MASK) {
782 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
783 env->gdt.base, env->gdt.limit);
784 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
785 env->idt.base, env->idt.limit);
786 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
787 (uint32_t)env->cr[0],
788 env->cr[2],
789 env->cr[3],
790 (uint32_t)env->cr[4]);
791 for(i = 0; i < 4; i++)
792 cpu_fprintf(f, "DR%d=%016" PRIx64 " ", i, env->dr[i]);
793 cpu_fprintf(f, "\nDR6=%016" PRIx64 " DR7=%016" PRIx64 "\n",
794 env->dr[6], env->dr[7]);
795 } else
796 #endif
797 {
798 cpu_fprintf(f, "GDT= %08x %08x\n",
799 (uint32_t)env->gdt.base, env->gdt.limit);
800 cpu_fprintf(f, "IDT= %08x %08x\n",
801 (uint32_t)env->idt.base, env->idt.limit);
802 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
803 (uint32_t)env->cr[0],
804 (uint32_t)env->cr[2],
805 (uint32_t)env->cr[3],
806 (uint32_t)env->cr[4]);
807 for(i = 0; i < 4; i++)
808 cpu_fprintf(f, "DR%d=%08x ", i, env->dr[i]);
809 cpu_fprintf(f, "\nDR6=%08x DR7=%08x\n", env->dr[6], env->dr[7]);
810 }
811 if (flags & X86_DUMP_CCOP) {
812 if ((unsigned)env->cc_op < CC_OP_NB)
813 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
814 else
815 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
816 #ifdef TARGET_X86_64
817 if (env->hflags & HF_CS64_MASK) {
818 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
819 env->cc_src, env->cc_dst,
820 cc_op_name);
821 } else
822 #endif
823 {
824 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
825 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
826 cc_op_name);
827 }
828 }
829 if (flags & X86_DUMP_FPU) {
830 int fptag;
831 fptag = 0;
832 for(i = 0; i < 8; i++) {
833 fptag |= ((!env->fptags[i]) << i);
834 }
835 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
836 env->fpuc,
837 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
838 env->fpstt,
839 fptag,
840 env->mxcsr);
841 for(i=0;i<8;i++) {
842 #if defined(USE_X86LDOUBLE)
843 union {
844 long double d;
845 struct {
846 uint64_t lower;
847 uint16_t upper;
848 } l;
849 } tmp;
850 tmp.d = env->fpregs[i].d;
851 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
852 i, tmp.l.lower, tmp.l.upper);
853 #else
854 cpu_fprintf(f, "FPR%d=%016" PRIx64,
855 i, env->fpregs[i].mmx.q);
856 #endif
857 if ((i & 1) == 1)
858 cpu_fprintf(f, "\n");
859 else
860 cpu_fprintf(f, " ");
861 }
862 if (env->hflags & HF_CS64_MASK)
863 nb = 16;
864 else
865 nb = 8;
866 for(i=0;i<nb;i++) {
867 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
868 i,
869 env->xmm_regs[i].XMM_L(3),
870 env->xmm_regs[i].XMM_L(2),
871 env->xmm_regs[i].XMM_L(1),
872 env->xmm_regs[i].XMM_L(0));
873 if ((i & 1) == 1)
874 cpu_fprintf(f, "\n");
875 else
876 cpu_fprintf(f, " ");
877 }
878 }
879 }
880
881 /***********************************************************/
882 /* x86 mmu */
883 /* XXX: add PGE support */
884
885 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
886 {
887 a20_state = (a20_state != 0);
888 if (a20_state != ((env->a20_mask >> 20) & 1)) {
889 #if defined(DEBUG_MMU)
890 printf("A20 update: a20=%d\n", a20_state);
891 #endif
892 /* if the cpu is currently executing code, we must unlink it and
893 all the potentially executing TB */
894 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
895
896 /* when a20 is changed, all the MMU mappings are invalid, so
897 we must flush everything */
898 tlb_flush(env, 1);
899 env->a20_mask = (~0x100000) | (a20_state << 20);
900 }
901 }
902
903 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
904 {
905 int pe_state;
906
907 #if defined(DEBUG_MMU)
908 printf("CR0 update: CR0=0x%08x\n", new_cr0);
909 #endif
910 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
911 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
912 tlb_flush(env, 1);
913 }
914
915 #ifdef TARGET_X86_64
916 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
917 (env->efer & MSR_EFER_LME)) {
918 /* enter in long mode */
919 /* XXX: generate an exception */
920 if (!(env->cr[4] & CR4_PAE_MASK))
921 return;
922 env->efer |= MSR_EFER_LMA;
923 env->hflags |= HF_LMA_MASK;
924 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
925 (env->efer & MSR_EFER_LMA)) {
926 /* exit long mode */
927 env->efer &= ~MSR_EFER_LMA;
928 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
929 env->eip &= 0xffffffff;
930 }
931 #endif
932 env->cr[0] = new_cr0 | CR0_ET_MASK;
933
934 /* update PE flag in hidden flags */
935 pe_state = (env->cr[0] & CR0_PE_MASK);
936 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
937 /* ensure that ADDSEG is always set in real mode */
938 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
939 /* update FPU flags */
940 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
941 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
942 }
943
944 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
945 the PDPT */
946 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
947 {
948 env->cr[3] = new_cr3;
949 if (env->cr[0] & CR0_PG_MASK) {
950 #if defined(DEBUG_MMU)
951 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
952 #endif
953 tlb_flush(env, 0);
954 }
955 }
956
957 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
958 {
959 #if defined(DEBUG_MMU)
960 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
961 #endif
962 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
963 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
964 tlb_flush(env, 1);
965 }
966 /* SSE handling */
967 if (!(env->cpuid_features & CPUID_SSE))
968 new_cr4 &= ~CR4_OSFXSR_MASK;
969 if (new_cr4 & CR4_OSFXSR_MASK)
970 env->hflags |= HF_OSFXSR_MASK;
971 else
972 env->hflags &= ~HF_OSFXSR_MASK;
973
974 env->cr[4] = new_cr4;
975 }
976
977 #if defined(CONFIG_USER_ONLY)
978
979 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
980 int is_write, int mmu_idx, int is_softmmu)
981 {
982 /* user mode only emulation */
983 is_write &= 1;
984 env->cr[2] = addr;
985 env->error_code = (is_write << PG_ERROR_W_BIT);
986 env->error_code |= PG_ERROR_U_MASK;
987 env->exception_index = EXCP0E_PAGE;
988 return 1;
989 }
990
991 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
992 {
993 return addr;
994 }
995
996 #else
997
998 /* XXX: This value should match the one returned by CPUID
999 * and in exec.c */
1000 #if defined(CONFIG_KQEMU)
1001 #define PHYS_ADDR_MASK 0xfffff000LL
1002 #else
1003 # if defined(TARGET_X86_64)
1004 # define PHYS_ADDR_MASK 0xfffffff000LL
1005 # else
1006 # define PHYS_ADDR_MASK 0xffffff000LL
1007 # endif
1008 #endif
1009
1010 /* return value:
1011 -1 = cannot handle fault
1012 0 = nothing more to do
1013 1 = generate PF fault
1014 2 = soft MMU activation required for this block
1015 */
1016 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
1017 int is_write1, int mmu_idx, int is_softmmu)
1018 {
1019 uint64_t ptep, pte;
1020 target_ulong pde_addr, pte_addr;
1021 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
1022 target_phys_addr_t paddr;
1023 uint32_t page_offset;
1024 target_ulong vaddr, virt_addr;
1025
1026 is_user = mmu_idx == MMU_USER_IDX;
1027 #if defined(DEBUG_MMU)
1028 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
1029 addr, is_write1, is_user, env->eip);
1030 #endif
1031 is_write = is_write1 & 1;
1032
1033 if (!(env->cr[0] & CR0_PG_MASK)) {
1034 pte = addr;
1035 virt_addr = addr & TARGET_PAGE_MASK;
1036 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
1037 page_size = 4096;
1038 goto do_mapping;
1039 }
1040
1041 if (env->cr[4] & CR4_PAE_MASK) {
1042 uint64_t pde, pdpe;
1043 target_ulong pdpe_addr;
1044
1045 #ifdef TARGET_X86_64
1046 if (env->hflags & HF_LMA_MASK) {
1047 uint64_t pml4e_addr, pml4e;
1048 int32_t sext;
1049
1050 /* test virtual address sign extension */
1051 sext = (int64_t)addr >> 47;
1052 if (sext != 0 && sext != -1) {
1053 env->error_code = 0;
1054 env->exception_index = EXCP0D_GPF;
1055 return 1;
1056 }
1057
1058 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1059 env->a20_mask;
1060 pml4e = ldq_phys(pml4e_addr);
1061 if (!(pml4e & PG_PRESENT_MASK)) {
1062 error_code = 0;
1063 goto do_fault;
1064 }
1065 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
1066 error_code = PG_ERROR_RSVD_MASK;
1067 goto do_fault;
1068 }
1069 if (!(pml4e & PG_ACCESSED_MASK)) {
1070 pml4e |= PG_ACCESSED_MASK;
1071 stl_phys_notdirty(pml4e_addr, pml4e);
1072 }
1073 ptep = pml4e ^ PG_NX_MASK;
1074 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
1075 env->a20_mask;
1076 pdpe = ldq_phys(pdpe_addr);
1077 if (!(pdpe & PG_PRESENT_MASK)) {
1078 error_code = 0;
1079 goto do_fault;
1080 }
1081 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
1082 error_code = PG_ERROR_RSVD_MASK;
1083 goto do_fault;
1084 }
1085 ptep &= pdpe ^ PG_NX_MASK;
1086 if (!(pdpe & PG_ACCESSED_MASK)) {
1087 pdpe |= PG_ACCESSED_MASK;
1088 stl_phys_notdirty(pdpe_addr, pdpe);
1089 }
1090 } else
1091 #endif
1092 {
1093 /* XXX: load them when cr3 is loaded ? */
1094 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1095 env->a20_mask;
1096 pdpe = ldq_phys(pdpe_addr);
1097 if (!(pdpe & PG_PRESENT_MASK)) {
1098 error_code = 0;
1099 goto do_fault;
1100 }
1101 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
1102 }
1103
1104 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
1105 env->a20_mask;
1106 pde = ldq_phys(pde_addr);
1107 if (!(pde & PG_PRESENT_MASK)) {
1108 error_code = 0;
1109 goto do_fault;
1110 }
1111 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
1112 error_code = PG_ERROR_RSVD_MASK;
1113 goto do_fault;
1114 }
1115 ptep &= pde ^ PG_NX_MASK;
1116 if (pde & PG_PSE_MASK) {
1117 /* 2 MB page */
1118 page_size = 2048 * 1024;
1119 ptep ^= PG_NX_MASK;
1120 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1121 goto do_fault_protect;
1122 if (is_user) {
1123 if (!(ptep & PG_USER_MASK))
1124 goto do_fault_protect;
1125 if (is_write && !(ptep & PG_RW_MASK))
1126 goto do_fault_protect;
1127 } else {
1128 if ((env->cr[0] & CR0_WP_MASK) &&
1129 is_write && !(ptep & PG_RW_MASK))
1130 goto do_fault_protect;
1131 }
1132 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1133 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1134 pde |= PG_ACCESSED_MASK;
1135 if (is_dirty)
1136 pde |= PG_DIRTY_MASK;
1137 stl_phys_notdirty(pde_addr, pde);
1138 }
1139 /* align to page_size */
1140 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
1141 virt_addr = addr & ~(page_size - 1);
1142 } else {
1143 /* 4 KB page */
1144 if (!(pde & PG_ACCESSED_MASK)) {
1145 pde |= PG_ACCESSED_MASK;
1146 stl_phys_notdirty(pde_addr, pde);
1147 }
1148 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
1149 env->a20_mask;
1150 pte = ldq_phys(pte_addr);
1151 if (!(pte & PG_PRESENT_MASK)) {
1152 error_code = 0;
1153 goto do_fault;
1154 }
1155 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
1156 error_code = PG_ERROR_RSVD_MASK;
1157 goto do_fault;
1158 }
1159 /* combine pde and pte nx, user and rw protections */
1160 ptep &= pte ^ PG_NX_MASK;
1161 ptep ^= PG_NX_MASK;
1162 if ((ptep & PG_NX_MASK) && is_write1 == 2)
1163 goto do_fault_protect;
1164 if (is_user) {
1165 if (!(ptep & PG_USER_MASK))
1166 goto do_fault_protect;
1167 if (is_write && !(ptep & PG_RW_MASK))
1168 goto do_fault_protect;
1169 } else {
1170 if ((env->cr[0] & CR0_WP_MASK) &&
1171 is_write && !(ptep & PG_RW_MASK))
1172 goto do_fault_protect;
1173 }
1174 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1175 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1176 pte |= PG_ACCESSED_MASK;
1177 if (is_dirty)
1178 pte |= PG_DIRTY_MASK;
1179 stl_phys_notdirty(pte_addr, pte);
1180 }
1181 page_size = 4096;
1182 virt_addr = addr & ~0xfff;
1183 pte = pte & (PHYS_ADDR_MASK | 0xfff);
1184 }
1185 } else {
1186 uint32_t pde;
1187
1188 /* page directory entry */
1189 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
1190 env->a20_mask;
1191 pde = ldl_phys(pde_addr);
1192 if (!(pde & PG_PRESENT_MASK)) {
1193 error_code = 0;
1194 goto do_fault;
1195 }
1196 /* if PSE bit is set, then we use a 4MB page */
1197 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1198 page_size = 4096 * 1024;
1199 if (is_user) {
1200 if (!(pde & PG_USER_MASK))
1201 goto do_fault_protect;
1202 if (is_write && !(pde & PG_RW_MASK))
1203 goto do_fault_protect;
1204 } else {
1205 if ((env->cr[0] & CR0_WP_MASK) &&
1206 is_write && !(pde & PG_RW_MASK))
1207 goto do_fault_protect;
1208 }
1209 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
1210 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
1211 pde |= PG_ACCESSED_MASK;
1212 if (is_dirty)
1213 pde |= PG_DIRTY_MASK;
1214 stl_phys_notdirty(pde_addr, pde);
1215 }
1216
1217 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1218 ptep = pte;
1219 virt_addr = addr & ~(page_size - 1);
1220 } else {
1221 if (!(pde & PG_ACCESSED_MASK)) {
1222 pde |= PG_ACCESSED_MASK;
1223 stl_phys_notdirty(pde_addr, pde);
1224 }
1225
1226 /* page directory entry */
1227 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
1228 env->a20_mask;
1229 pte = ldl_phys(pte_addr);
1230 if (!(pte & PG_PRESENT_MASK)) {
1231 error_code = 0;
1232 goto do_fault;
1233 }
1234 /* combine pde and pte user and rw protections */
1235 ptep = pte & pde;
1236 if (is_user) {
1237 if (!(ptep & PG_USER_MASK))
1238 goto do_fault_protect;
1239 if (is_write && !(ptep & PG_RW_MASK))
1240 goto do_fault_protect;
1241 } else {
1242 if ((env->cr[0] & CR0_WP_MASK) &&
1243 is_write && !(ptep & PG_RW_MASK))
1244 goto do_fault_protect;
1245 }
1246 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1247 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1248 pte |= PG_ACCESSED_MASK;
1249 if (is_dirty)
1250 pte |= PG_DIRTY_MASK;
1251 stl_phys_notdirty(pte_addr, pte);
1252 }
1253 page_size = 4096;
1254 virt_addr = addr & ~0xfff;
1255 }
1256 }
1257 /* the page can be put in the TLB */
1258 prot = PAGE_READ;
1259 if (!(ptep & PG_NX_MASK))
1260 prot |= PAGE_EXEC;
1261 if (pte & PG_DIRTY_MASK) {
1262 /* only set write access if already dirty... otherwise wait
1263 for dirty access */
1264 if (is_user) {
1265 if (ptep & PG_RW_MASK)
1266 prot |= PAGE_WRITE;
1267 } else {
1268 if (!(env->cr[0] & CR0_WP_MASK) ||
1269 (ptep & PG_RW_MASK))
1270 prot |= PAGE_WRITE;
1271 }
1272 }
1273 do_mapping:
1274 pte = pte & env->a20_mask;
1275
1276 /* Even if 4MB pages, we map only one 4KB page in the cache to
1277 avoid filling it too fast */
1278 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1279 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1280 vaddr = virt_addr + page_offset;
1281
1282 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1283 return ret;
1284 do_fault_protect:
1285 error_code = PG_ERROR_P_MASK;
1286 do_fault:
1287 error_code |= (is_write << PG_ERROR_W_BIT);
1288 if (is_user)
1289 error_code |= PG_ERROR_U_MASK;
1290 if (is_write1 == 2 &&
1291 (env->efer & MSR_EFER_NXE) &&
1292 (env->cr[4] & CR4_PAE_MASK))
1293 error_code |= PG_ERROR_I_D_MASK;
1294 if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) {
1295 /* cr2 is not modified in case of exceptions */
1296 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
1297 addr);
1298 } else {
1299 env->cr[2] = addr;
1300 }
1301 env->error_code = error_code;
1302 env->exception_index = EXCP0E_PAGE;
1303 return 1;
1304 }
1305
1306 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1307 {
1308 target_ulong pde_addr, pte_addr;
1309 uint64_t pte;
1310 target_phys_addr_t paddr;
1311 uint32_t page_offset;
1312 int page_size;
1313
1314 if (env->cr[4] & CR4_PAE_MASK) {
1315 target_ulong pdpe_addr;
1316 uint64_t pde, pdpe;
1317
1318 #ifdef TARGET_X86_64
1319 if (env->hflags & HF_LMA_MASK) {
1320 uint64_t pml4e_addr, pml4e;
1321 int32_t sext;
1322
1323 /* test virtual address sign extension */
1324 sext = (int64_t)addr >> 47;
1325 if (sext != 0 && sext != -1)
1326 return -1;
1327
1328 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1329 env->a20_mask;
1330 pml4e = ldq_phys(pml4e_addr);
1331 if (!(pml4e & PG_PRESENT_MASK))
1332 return -1;
1333
1334 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1335 env->a20_mask;
1336 pdpe = ldq_phys(pdpe_addr);
1337 if (!(pdpe & PG_PRESENT_MASK))
1338 return -1;
1339 } else
1340 #endif
1341 {
1342 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1343 env->a20_mask;
1344 pdpe = ldq_phys(pdpe_addr);
1345 if (!(pdpe & PG_PRESENT_MASK))
1346 return -1;
1347 }
1348
1349 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1350 env->a20_mask;
1351 pde = ldq_phys(pde_addr);
1352 if (!(pde & PG_PRESENT_MASK)) {
1353 return -1;
1354 }
1355 if (pde & PG_PSE_MASK) {
1356 /* 2 MB page */
1357 page_size = 2048 * 1024;
1358 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1359 } else {
1360 /* 4 KB page */
1361 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1362 env->a20_mask;
1363 page_size = 4096;
1364 pte = ldq_phys(pte_addr);
1365 }
1366 if (!(pte & PG_PRESENT_MASK))
1367 return -1;
1368 } else {
1369 uint32_t pde;
1370
1371 if (!(env->cr[0] & CR0_PG_MASK)) {
1372 pte = addr;
1373 page_size = 4096;
1374 } else {
1375 /* page directory entry */
1376 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1377 pde = ldl_phys(pde_addr);
1378 if (!(pde & PG_PRESENT_MASK))
1379 return -1;
1380 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1381 pte = pde & ~0x003ff000; /* align to 4MB */
1382 page_size = 4096 * 1024;
1383 } else {
1384 /* page directory entry */
1385 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1386 pte = ldl_phys(pte_addr);
1387 if (!(pte & PG_PRESENT_MASK))
1388 return -1;
1389 page_size = 4096;
1390 }
1391 }
1392 pte = pte & env->a20_mask;
1393 }
1394
1395 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1396 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1397 return paddr;
1398 }
1399
1400 void hw_breakpoint_insert(CPUState *env, int index)
1401 {
1402 int type, err = 0;
1403
1404 switch (hw_breakpoint_type(env->dr[7], index)) {
1405 case 0:
1406 if (hw_breakpoint_enabled(env->dr[7], index))
1407 err = cpu_breakpoint_insert(env, env->dr[index], BP_CPU,
1408 &env->cpu_breakpoint[index]);
1409 break;
1410 case 1:
1411 type = BP_CPU | BP_MEM_WRITE;
1412 goto insert_wp;
1413 case 2:
1414 /* No support for I/O watchpoints yet */
1415 break;
1416 case 3:
1417 type = BP_CPU | BP_MEM_ACCESS;
1418 insert_wp:
1419 err = cpu_watchpoint_insert(env, env->dr[index],
1420 hw_breakpoint_len(env->dr[7], index),
1421 type, &env->cpu_watchpoint[index]);
1422 break;
1423 }
1424 if (err)
1425 env->cpu_breakpoint[index] = NULL;
1426 }
1427
1428 void hw_breakpoint_remove(CPUState *env, int index)
1429 {
1430 if (!env->cpu_breakpoint[index])
1431 return;
1432 switch (hw_breakpoint_type(env->dr[7], index)) {
1433 case 0:
1434 if (hw_breakpoint_enabled(env->dr[7], index))
1435 cpu_breakpoint_remove_by_ref(env, env->cpu_breakpoint[index]);
1436 break;
1437 case 1:
1438 case 3:
1439 cpu_watchpoint_remove_by_ref(env, env->cpu_watchpoint[index]);
1440 break;
1441 case 2:
1442 /* No support for I/O watchpoints yet */
1443 break;
1444 }
1445 }
1446
1447 int check_hw_breakpoints(CPUState *env, int force_dr6_update)
1448 {
1449 target_ulong dr6;
1450 int reg, type;
1451 int hit_enabled = 0;
1452
1453 dr6 = env->dr[6] & ~0xf;
1454 for (reg = 0; reg < 4; reg++) {
1455 type = hw_breakpoint_type(env->dr[7], reg);
1456 if ((type == 0 && env->dr[reg] == env->eip) ||
1457 ((type & 1) && env->cpu_watchpoint[reg] &&
1458 (env->cpu_watchpoint[reg]->flags & BP_WATCHPOINT_HIT))) {
1459 dr6 |= 1 << reg;
1460 if (hw_breakpoint_enabled(env->dr[7], reg))
1461 hit_enabled = 1;
1462 }
1463 }
1464 if (hit_enabled || force_dr6_update)
1465 env->dr[6] = dr6;
1466 return hit_enabled;
1467 }
1468
1469 static CPUDebugExcpHandler *prev_debug_excp_handler;
1470
1471 void raise_exception(int exception_index);
1472
1473 static void breakpoint_handler(CPUState *env)
1474 {
1475 CPUBreakpoint *bp;
1476
1477 if (env->watchpoint_hit) {
1478 if (env->watchpoint_hit->flags & BP_CPU) {
1479 env->watchpoint_hit = NULL;
1480 if (check_hw_breakpoints(env, 0))
1481 raise_exception(EXCP01_DB);
1482 else
1483 cpu_resume_from_signal(env, NULL);
1484 }
1485 } else {
1486 TAILQ_FOREACH(bp, &env->breakpoints, entry)
1487 if (bp->pc == env->eip) {
1488 if (bp->flags & BP_CPU) {
1489 check_hw_breakpoints(env, 1);
1490 raise_exception(EXCP01_DB);
1491 }
1492 break;
1493 }
1494 }
1495 if (prev_debug_excp_handler)
1496 prev_debug_excp_handler(env);
1497 }
1498
1499 /* This should come from sysemu.h - if we could include it here... */
1500 void qemu_system_reset_request(void);
1501
1502 void cpu_inject_x86_mce(CPUState *cenv, int bank, uint64_t status,
1503 uint64_t mcg_status, uint64_t addr, uint64_t misc)
1504 {
1505 uint64_t mcg_cap = cenv->mcg_cap;
1506 unsigned bank_num = mcg_cap & 0xff;
1507 uint64_t *banks = cenv->mce_banks;
1508
1509 if (bank >= bank_num || !(status & MCI_STATUS_VAL))
1510 return;
1511
1512 /*
1513 * if MSR_MCG_CTL is not all 1s, the uncorrected error
1514 * reporting is disabled
1515 */
1516 if ((status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
1517 cenv->mcg_ctl != ~(uint64_t)0)
1518 return;
1519 banks += 4 * bank;
1520 /*
1521 * if MSR_MCi_CTL is not all 1s, the uncorrected error
1522 * reporting is disabled for the bank
1523 */
1524 if ((status & MCI_STATUS_UC) && banks[0] != ~(uint64_t)0)
1525 return;
1526 if (status & MCI_STATUS_UC) {
1527 if ((cenv->mcg_status & MCG_STATUS_MCIP) ||
1528 !(cenv->cr[4] & CR4_MCE_MASK)) {
1529 fprintf(stderr, "injects mce exception while previous "
1530 "one is in progress!\n");
1531 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1532 qemu_system_reset_request();
1533 return;
1534 }
1535 if (banks[1] & MCI_STATUS_VAL)
1536 status |= MCI_STATUS_OVER;
1537 banks[2] = addr;
1538 banks[3] = misc;
1539 cenv->mcg_status = mcg_status;
1540 banks[1] = status;
1541 cpu_interrupt(cenv, CPU_INTERRUPT_MCE);
1542 } else if (!(banks[1] & MCI_STATUS_VAL)
1543 || !(banks[1] & MCI_STATUS_UC)) {
1544 if (banks[1] & MCI_STATUS_VAL)
1545 status |= MCI_STATUS_OVER;
1546 banks[2] = addr;
1547 banks[3] = misc;
1548 banks[1] = status;
1549 } else
1550 banks[1] |= MCI_STATUS_OVER;
1551 }
1552 #endif /* !CONFIG_USER_ONLY */
1553
1554 static void mce_init(CPUX86State *cenv)
1555 {
1556 unsigned int bank, bank_num;
1557
1558 if (((cenv->cpuid_version >> 8)&0xf) >= 6
1559 && (cenv->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)) {
1560 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
1561 cenv->mcg_ctl = ~(uint64_t)0;
1562 bank_num = cenv->mcg_cap & 0xff;
1563 cenv->mce_banks = qemu_mallocz(bank_num * sizeof(uint64_t) * 4);
1564 for (bank = 0; bank < bank_num; bank++)
1565 cenv->mce_banks[bank*4] = ~(uint64_t)0;
1566 }
1567 }
1568
1569 static void host_cpuid(uint32_t function, uint32_t count,
1570 uint32_t *eax, uint32_t *ebx,
1571 uint32_t *ecx, uint32_t *edx)
1572 {
1573 #if defined(CONFIG_KVM)
1574 uint32_t vec[4];
1575
1576 #ifdef __x86_64__
1577 asm volatile("cpuid"
1578 : "=a"(vec[0]), "=b"(vec[1]),
1579 "=c"(vec[2]), "=d"(vec[3])
1580 : "0"(function), "c"(count) : "cc");
1581 #else
1582 asm volatile("pusha \n\t"
1583 "cpuid \n\t"
1584 "mov %%eax, 0(%2) \n\t"
1585 "mov %%ebx, 4(%2) \n\t"
1586 "mov %%ecx, 8(%2) \n\t"
1587 "mov %%edx, 12(%2) \n\t"
1588 "popa"
1589 : : "a"(function), "c"(count), "S"(vec)
1590 : "memory", "cc");
1591 #endif
1592
1593 if (eax)
1594 *eax = vec[0];
1595 if (ebx)
1596 *ebx = vec[1];
1597 if (ecx)
1598 *ecx = vec[2];
1599 if (edx)
1600 *edx = vec[3];
1601 #endif
1602 }
1603
1604 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
1605 uint32_t *eax, uint32_t *ebx,
1606 uint32_t *ecx, uint32_t *edx)
1607 {
1608 /* test if maximum index reached */
1609 if (index & 0x80000000) {
1610 if (index > env->cpuid_xlevel)
1611 index = env->cpuid_level;
1612 } else {
1613 if (index > env->cpuid_level)
1614 index = env->cpuid_level;
1615 }
1616
1617 switch(index) {
1618 case 0:
1619 *eax = env->cpuid_level;
1620 *ebx = env->cpuid_vendor1;
1621 *edx = env->cpuid_vendor2;
1622 *ecx = env->cpuid_vendor3;
1623
1624 /* sysenter isn't supported on compatibility mode on AMD. and syscall
1625 * isn't supported in compatibility mode on Intel. so advertise the
1626 * actuall cpu, and say goodbye to migration between different vendors
1627 * is you use compatibility mode. */
1628 if (kvm_enabled() && !env->cpuid_vendor_override)
1629 host_cpuid(0, 0, NULL, ebx, ecx, edx);
1630 break;
1631 case 1:
1632 *eax = env->cpuid_version;
1633 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1634 *ecx = env->cpuid_ext_features;
1635 *edx = env->cpuid_features;
1636 break;
1637 case 2:
1638 /* cache info: needed for Pentium Pro compatibility */
1639 *eax = 1;
1640 *ebx = 0;
1641 *ecx = 0;
1642 *edx = 0x2c307d;
1643 break;
1644 case 4:
1645 /* cache info: needed for Core compatibility */
1646 switch (count) {
1647 case 0: /* L1 dcache info */
1648 *eax = 0x0000121;
1649 *ebx = 0x1c0003f;
1650 *ecx = 0x000003f;
1651 *edx = 0x0000001;
1652 break;
1653 case 1: /* L1 icache info */
1654 *eax = 0x0000122;
1655 *ebx = 0x1c0003f;
1656 *ecx = 0x000003f;
1657 *edx = 0x0000001;
1658 break;
1659 case 2: /* L2 cache info */
1660 *eax = 0x0000143;
1661 *ebx = 0x3c0003f;
1662 *ecx = 0x0000fff;
1663 *edx = 0x0000001;
1664 break;
1665 default: /* end of info */
1666 *eax = 0;
1667 *ebx = 0;
1668 *ecx = 0;
1669 *edx = 0;
1670 break;
1671 }
1672 break;
1673 case 5:
1674 /* mwait info: needed for Core compatibility */
1675 *eax = 0; /* Smallest monitor-line size in bytes */
1676 *ebx = 0; /* Largest monitor-line size in bytes */
1677 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1678 *edx = 0;
1679 break;
1680 case 6:
1681 /* Thermal and Power Leaf */
1682 *eax = 0;
1683 *ebx = 0;
1684 *ecx = 0;
1685 *edx = 0;
1686 break;
1687 case 9:
1688 /* Direct Cache Access Information Leaf */
1689 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
1690 *ebx = 0;
1691 *ecx = 0;
1692 *edx = 0;
1693 break;
1694 case 0xA:
1695 /* Architectural Performance Monitoring Leaf */
1696 *eax = 0;
1697 *ebx = 0;
1698 *ecx = 0;
1699 *edx = 0;
1700 break;
1701 case 0x80000000:
1702 *eax = env->cpuid_xlevel;
1703 *ebx = env->cpuid_vendor1;
1704 *edx = env->cpuid_vendor2;
1705 *ecx = env->cpuid_vendor3;
1706 break;
1707 case 0x80000001:
1708 *eax = env->cpuid_version;
1709 *ebx = 0;
1710 *ecx = env->cpuid_ext3_features;
1711 *edx = env->cpuid_ext2_features;
1712
1713 if (kvm_enabled()) {
1714 /* Nested SVM not yet supported in KVM */
1715 *ecx &= ~CPUID_EXT3_SVM;
1716 } else {
1717 /* AMD 3DNow! is not supported in QEMU */
1718 *edx &= ~(CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT);
1719 }
1720 break;
1721 case 0x80000002:
1722 case 0x80000003:
1723 case 0x80000004:
1724 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1725 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1726 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1727 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1728 break;
1729 case 0x80000005:
1730 /* cache info (L1 cache) */
1731 *eax = 0x01ff01ff;
1732 *ebx = 0x01ff01ff;
1733 *ecx = 0x40020140;
1734 *edx = 0x40020140;
1735 break;
1736 case 0x80000006:
1737 /* cache info (L2 cache) */
1738 *eax = 0;
1739 *ebx = 0x42004200;
1740 *ecx = 0x02008140;
1741 *edx = 0;
1742 break;
1743 case 0x80000008:
1744 /* virtual & phys address size in low 2 bytes. */
1745 /* XXX: This value must match the one used in the MMU code. */
1746 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1747 /* 64 bit processor */
1748 #if defined(CONFIG_KQEMU)
1749 *eax = 0x00003020; /* 48 bits virtual, 32 bits physical */
1750 #else
1751 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1752 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
1753 #endif
1754 } else {
1755 #if defined(CONFIG_KQEMU)
1756 *eax = 0x00000020; /* 32 bits physical */
1757 #else
1758 if (env->cpuid_features & CPUID_PSE36)
1759 *eax = 0x00000024; /* 36 bits physical */
1760 else
1761 *eax = 0x00000020; /* 32 bits physical */
1762 #endif
1763 }
1764 *ebx = 0;
1765 *ecx = 0;
1766 *edx = 0;
1767 break;
1768 case 0x8000000A:
1769 *eax = 0x00000001; /* SVM Revision */
1770 *ebx = 0x00000010; /* nr of ASIDs */
1771 *ecx = 0;
1772 *edx = 0; /* optional features */
1773 break;
1774 default:
1775 /* reserved values: zero */
1776 *eax = 0;
1777 *ebx = 0;
1778 *ecx = 0;
1779 *edx = 0;
1780 break;
1781 }
1782 }
1783
1784
1785 int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector,
1786 target_ulong *base, unsigned int *limit,
1787 unsigned int *flags)
1788 {
1789 SegmentCache *dt;
1790 target_ulong ptr;
1791 uint32_t e1, e2;
1792 int index;
1793
1794 if (selector & 0x4)
1795 dt = &env->ldt;
1796 else
1797 dt = &env->gdt;
1798 index = selector & ~7;
1799 ptr = dt->base + index;
1800 if ((index + 7) > dt->limit
1801 || cpu_memory_rw_debug(env, ptr, (uint8_t *)&e1, sizeof(e1), 0) != 0
1802 || cpu_memory_rw_debug(env, ptr+4, (uint8_t *)&e2, sizeof(e2), 0) != 0)
1803 return 0;
1804
1805 *base = ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1806 *limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1807 if (e2 & DESC_G_MASK)
1808 *limit = (*limit << 12) | 0xfff;
1809 *flags = e2;
1810
1811 return 1;
1812 }
1813
1814 CPUX86State *cpu_x86_init(const char *cpu_model)
1815 {
1816 CPUX86State *env;
1817 static int inited;
1818
1819 env = qemu_mallocz(sizeof(CPUX86State));
1820 cpu_exec_init(env);
1821 env->cpu_model_str = cpu_model;
1822
1823 /* init various static tables */
1824 if (!inited) {
1825 inited = 1;
1826 optimize_flags_init();
1827 #ifndef CONFIG_USER_ONLY
1828 prev_debug_excp_handler =
1829 cpu_set_debug_excp_handler(breakpoint_handler);
1830 #endif
1831 }
1832 if (cpu_x86_register(env, cpu_model) < 0) {
1833 cpu_x86_close(env);
1834 return NULL;
1835 }
1836 mce_init(env);
1837 cpu_reset(env);
1838 #ifdef CONFIG_KQEMU
1839 kqemu_init(env);
1840 #endif
1841
1842 qemu_init_vcpu(env);
1843
1844 return env;
1845 }
1846
1847 #if !defined(CONFIG_USER_ONLY)
1848 void do_cpu_init(CPUState *env)
1849 {
1850 int sipi = env->interrupt_request & CPU_INTERRUPT_SIPI;
1851 cpu_reset(env);
1852 env->interrupt_request = sipi;
1853 apic_init_reset(env);
1854 }
1855
1856 void do_cpu_sipi(CPUState *env)
1857 {
1858 apic_sipi(env);
1859 }
1860 #else
1861 void do_cpu_init(CPUState *env)
1862 {
1863 }
1864 void do_cpu_sipi(CPUState *env)
1865 {
1866 }
1867 #endif