]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/helper2.c
Handle cpu_model in copy_cpu(), by Kirill A. Shutemov.
[mirror_qemu.git] / target-i386 / helper2.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "svm.h"
31
32 //#define DEBUG_MMU
33
34 static int cpu_x86_register (CPUX86State *env, const char *cpu_model);
35
36 static void add_flagname_to_bitmaps(char *flagname, uint32_t *features,
37 uint32_t *ext_features,
38 uint32_t *ext2_features,
39 uint32_t *ext3_features)
40 {
41 int i;
42 /* feature flags taken from "Intel Processor Identification and the CPUID
43 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
44 * about feature names, the Linux name is used. */
45 const char *feature_name[] = {
46 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
47 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
48 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */, NULL, "ds" /* Intel dts */, "acpi", "mmx",
49 "fxsr", "sse", "sse2", "ss", "ht" /* Intel htt */, "tm", "ia64", "pbe",
50 };
51 const char *ext_feature_name[] = {
52 "pni" /* Intel,AMD sse3 */, NULL, NULL, "monitor", "ds_cpl", "vmx", NULL /* Linux smx */, "est",
53 "tm2", "ssse3", "cid", NULL, NULL, "cx16", "xtpr", NULL,
54 NULL, NULL, "dca", NULL, NULL, NULL, NULL, "popcnt",
55 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
56 };
57 const char *ext2_feature_name[] = {
58 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
59 "cx8" /* AMD CMPXCHG8B */, "apic", NULL, "syscall", "mttr", "pge", "mca", "cmov",
60 "pat", "pse36", NULL, NULL /* Linux mp */, "nx" /* Intel xd */, NULL, "mmxext", "mmx",
61 "fxsr", "fxsr_opt" /* AMD ffxsr */, "pdpe1gb" /* AMD Page1GB */, "rdtscp", NULL, "lm" /* Intel 64 */, "3dnowext", "3dnow",
62 };
63 const char *ext3_feature_name[] = {
64 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */, "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
65 "3dnowprefetch", "osvw", NULL /* Linux ibs */, NULL, "skinit", "wdt", NULL, NULL,
66 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
67 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
68 };
69
70 for ( i = 0 ; i < 32 ; i++ )
71 if (feature_name[i] && !strcmp (flagname, feature_name[i])) {
72 *features |= 1 << i;
73 return;
74 }
75 for ( i = 0 ; i < 32 ; i++ )
76 if (ext_feature_name[i] && !strcmp (flagname, ext_feature_name[i])) {
77 *ext_features |= 1 << i;
78 return;
79 }
80 for ( i = 0 ; i < 32 ; i++ )
81 if (ext2_feature_name[i] && !strcmp (flagname, ext2_feature_name[i])) {
82 *ext2_features |= 1 << i;
83 return;
84 }
85 for ( i = 0 ; i < 32 ; i++ )
86 if (ext3_feature_name[i] && !strcmp (flagname, ext3_feature_name[i])) {
87 *ext3_features |= 1 << i;
88 return;
89 }
90 fprintf(stderr, "CPU feature %s not found\n", flagname);
91 }
92
93 CPUX86State *cpu_x86_init(const char *cpu_model)
94 {
95 CPUX86State *env;
96 static int inited;
97
98 env = qemu_mallocz(sizeof(CPUX86State));
99 if (!env)
100 return NULL;
101 cpu_exec_init(env);
102 env->cpu_model_str = cpu_model;
103
104 /* init various static tables */
105 if (!inited) {
106 inited = 1;
107 optimize_flags_init();
108 }
109 if (cpu_x86_register(env, cpu_model) < 0) {
110 cpu_x86_close(env);
111 return NULL;
112 }
113 cpu_reset(env);
114 #ifdef USE_KQEMU
115 kqemu_init(env);
116 #endif
117 return env;
118 }
119
120 typedef struct x86_def_t {
121 const char *name;
122 uint32_t vendor1, vendor2, vendor3;
123 int family;
124 int model;
125 int stepping;
126 uint32_t features, ext_features, ext2_features, ext3_features;
127 uint32_t xlevel;
128 } x86_def_t;
129
130 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
131 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
132 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
133 CPUID_PAE | CPUID_SEP | CPUID_APIC)
134 static x86_def_t x86_defs[] = {
135 #ifdef TARGET_X86_64
136 {
137 .name = "qemu64",
138 .vendor1 = 0x68747541, /* "Auth" */
139 .vendor2 = 0x69746e65, /* "enti" */
140 .vendor3 = 0x444d4163, /* "cAMD" */
141 .family = 6,
142 .model = 2,
143 .stepping = 3,
144 .features = PPRO_FEATURES |
145 /* these features are needed for Win64 and aren't fully implemented */
146 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
147 /* this feature is needed for Solaris and isn't fully implemented */
148 CPUID_PSE36,
149 .ext_features = CPUID_EXT_SSE3,
150 .ext2_features = (PPRO_FEATURES & 0x0183F3FF) |
151 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
152 .ext3_features = CPUID_EXT3_SVM,
153 .xlevel = 0x80000008,
154 },
155 #endif
156 {
157 .name = "qemu32",
158 .family = 6,
159 .model = 3,
160 .stepping = 3,
161 .features = PPRO_FEATURES,
162 .ext_features = CPUID_EXT_SSE3,
163 .xlevel = 0,
164 },
165 {
166 .name = "486",
167 .family = 4,
168 .model = 0,
169 .stepping = 0,
170 .features = 0x0000000B,
171 .xlevel = 0,
172 },
173 {
174 .name = "pentium",
175 .family = 5,
176 .model = 4,
177 .stepping = 3,
178 .features = 0x008001BF,
179 .xlevel = 0,
180 },
181 {
182 .name = "pentium2",
183 .family = 6,
184 .model = 5,
185 .stepping = 2,
186 .features = 0x0183F9FF,
187 .xlevel = 0,
188 },
189 {
190 .name = "pentium3",
191 .family = 6,
192 .model = 7,
193 .stepping = 3,
194 .features = 0x0383F9FF,
195 .xlevel = 0,
196 },
197 };
198
199 static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model)
200 {
201 unsigned int i;
202 x86_def_t *def;
203
204 char *s = strdup(cpu_model);
205 char *featurestr, *name = strtok(s, ",");
206 uint32_t plus_features = 0, plus_ext_features = 0, plus_ext2_features = 0, plus_ext3_features = 0;
207 uint32_t minus_features = 0, minus_ext_features = 0, minus_ext2_features = 0, minus_ext3_features = 0;
208 int family = -1, model = -1, stepping = -1;
209
210 def = NULL;
211 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++) {
212 if (strcmp(name, x86_defs[i].name) == 0) {
213 def = &x86_defs[i];
214 break;
215 }
216 }
217 if (!def)
218 goto error;
219 memcpy(x86_cpu_def, def, sizeof(*def));
220
221 featurestr = strtok(NULL, ",");
222
223 while (featurestr) {
224 char *val;
225 if (featurestr[0] == '+') {
226 add_flagname_to_bitmaps(featurestr + 1, &plus_features, &plus_ext_features, &plus_ext2_features, &plus_ext3_features);
227 } else if (featurestr[0] == '-') {
228 add_flagname_to_bitmaps(featurestr + 1, &minus_features, &minus_ext_features, &minus_ext2_features, &minus_ext3_features);
229 } else if ((val = strchr(featurestr, '='))) {
230 *val = 0; val++;
231 if (!strcmp(featurestr, "family")) {
232 char *err;
233 family = strtol(val, &err, 10);
234 if (!*val || *err || family < 0) {
235 fprintf(stderr, "bad numerical value %s\n", val);
236 x86_cpu_def = 0;
237 goto error;
238 }
239 x86_cpu_def->family = family;
240 } else if (!strcmp(featurestr, "model")) {
241 char *err;
242 model = strtol(val, &err, 10);
243 if (!*val || *err || model < 0 || model > 0xf) {
244 fprintf(stderr, "bad numerical value %s\n", val);
245 x86_cpu_def = 0;
246 goto error;
247 }
248 x86_cpu_def->model = model;
249 } else if (!strcmp(featurestr, "stepping")) {
250 char *err;
251 stepping = strtol(val, &err, 10);
252 if (!*val || *err || stepping < 0 || stepping > 0xf) {
253 fprintf(stderr, "bad numerical value %s\n", val);
254 x86_cpu_def = 0;
255 goto error;
256 }
257 x86_cpu_def->stepping = stepping;
258 } else {
259 fprintf(stderr, "unregnized feature %s\n", featurestr);
260 x86_cpu_def = 0;
261 goto error;
262 }
263 } else {
264 fprintf(stderr, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr);
265 x86_cpu_def = 0;
266 goto error;
267 }
268 featurestr = strtok(NULL, ",");
269 }
270 x86_cpu_def->features |= plus_features;
271 x86_cpu_def->ext_features |= plus_ext_features;
272 x86_cpu_def->ext2_features |= plus_ext2_features;
273 x86_cpu_def->ext3_features |= plus_ext3_features;
274 x86_cpu_def->features &= ~minus_features;
275 x86_cpu_def->ext_features &= ~minus_ext_features;
276 x86_cpu_def->ext2_features &= ~minus_ext2_features;
277 x86_cpu_def->ext3_features &= ~minus_ext3_features;
278 free(s);
279 return 0;
280
281 error:
282 free(s);
283 return -1;
284 }
285
286 void x86_cpu_list (FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
287 {
288 unsigned int i;
289
290 for (i = 0; i < sizeof(x86_defs) / sizeof(x86_def_t); i++)
291 (*cpu_fprintf)(f, "x86 %16s\n", x86_defs[i].name);
292 }
293
294 static int cpu_x86_register (CPUX86State *env, const char *cpu_model)
295 {
296 x86_def_t def1, *def = &def1;
297
298 if (cpu_x86_find_by_name(def, cpu_model) < 0)
299 return -1;
300 if (def->vendor1) {
301 env->cpuid_vendor1 = def->vendor1;
302 env->cpuid_vendor2 = def->vendor2;
303 env->cpuid_vendor3 = def->vendor3;
304 } else {
305 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
306 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
307 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
308 }
309 env->cpuid_level = 2;
310 env->cpuid_version = (def->family << 8) | (def->model << 4) | def->stepping;
311 env->cpuid_features = def->features;
312 env->pat = 0x0007040600070406ULL;
313 env->cpuid_ext_features = def->ext_features;
314 env->cpuid_ext2_features = def->ext2_features;
315 env->cpuid_xlevel = def->xlevel;
316 env->cpuid_ext3_features = def->ext3_features;
317 {
318 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
319 int c, len, i;
320 len = strlen(model_id);
321 for(i = 0; i < 48; i++) {
322 if (i >= len)
323 c = '\0';
324 else
325 c = model_id[i];
326 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
327 }
328 }
329 return 0;
330 }
331
332 /* NOTE: must be called outside the CPU execute loop */
333 void cpu_reset(CPUX86State *env)
334 {
335 int i;
336
337 memset(env, 0, offsetof(CPUX86State, breakpoints));
338
339 tlb_flush(env, 1);
340
341 env->old_exception = -1;
342
343 /* init to reset state */
344
345 #ifdef CONFIG_SOFTMMU
346 env->hflags |= HF_SOFTMMU_MASK;
347 #endif
348 env->hflags |= HF_GIF_MASK;
349
350 cpu_x86_update_cr0(env, 0x60000010);
351 env->a20_mask = 0xffffffff;
352 env->smbase = 0x30000;
353
354 env->idt.limit = 0xffff;
355 env->gdt.limit = 0xffff;
356 env->ldt.limit = 0xffff;
357 env->ldt.flags = DESC_P_MASK;
358 env->tr.limit = 0xffff;
359 env->tr.flags = DESC_P_MASK;
360
361 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
362 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
363 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
364 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
365 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
366 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
367
368 env->eip = 0xfff0;
369 env->regs[R_EDX] = env->cpuid_version;
370
371 env->eflags = 0x2;
372
373 /* FPU init */
374 for(i = 0;i < 8; i++)
375 env->fptags[i] = 1;
376 env->fpuc = 0x37f;
377
378 env->mxcsr = 0x1f80;
379 }
380
381 void cpu_x86_close(CPUX86State *env)
382 {
383 free(env);
384 }
385
386 /***********************************************************/
387 /* x86 debug */
388
389 static const char *cc_op_str[] = {
390 "DYNAMIC",
391 "EFLAGS",
392
393 "MULB",
394 "MULW",
395 "MULL",
396 "MULQ",
397
398 "ADDB",
399 "ADDW",
400 "ADDL",
401 "ADDQ",
402
403 "ADCB",
404 "ADCW",
405 "ADCL",
406 "ADCQ",
407
408 "SUBB",
409 "SUBW",
410 "SUBL",
411 "SUBQ",
412
413 "SBBB",
414 "SBBW",
415 "SBBL",
416 "SBBQ",
417
418 "LOGICB",
419 "LOGICW",
420 "LOGICL",
421 "LOGICQ",
422
423 "INCB",
424 "INCW",
425 "INCL",
426 "INCQ",
427
428 "DECB",
429 "DECW",
430 "DECL",
431 "DECQ",
432
433 "SHLB",
434 "SHLW",
435 "SHLL",
436 "SHLQ",
437
438 "SARB",
439 "SARW",
440 "SARL",
441 "SARQ",
442 };
443
444 void cpu_dump_state(CPUState *env, FILE *f,
445 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
446 int flags)
447 {
448 int eflags, i, nb;
449 char cc_op_name[32];
450 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
451
452 eflags = env->eflags;
453 #ifdef TARGET_X86_64
454 if (env->hflags & HF_CS64_MASK) {
455 cpu_fprintf(f,
456 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
457 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
458 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
459 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
460 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
461 env->regs[R_EAX],
462 env->regs[R_EBX],
463 env->regs[R_ECX],
464 env->regs[R_EDX],
465 env->regs[R_ESI],
466 env->regs[R_EDI],
467 env->regs[R_EBP],
468 env->regs[R_ESP],
469 env->regs[8],
470 env->regs[9],
471 env->regs[10],
472 env->regs[11],
473 env->regs[12],
474 env->regs[13],
475 env->regs[14],
476 env->regs[15],
477 env->eip, eflags,
478 eflags & DF_MASK ? 'D' : '-',
479 eflags & CC_O ? 'O' : '-',
480 eflags & CC_S ? 'S' : '-',
481 eflags & CC_Z ? 'Z' : '-',
482 eflags & CC_A ? 'A' : '-',
483 eflags & CC_P ? 'P' : '-',
484 eflags & CC_C ? 'C' : '-',
485 env->hflags & HF_CPL_MASK,
486 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
487 (env->a20_mask >> 20) & 1,
488 (env->hflags >> HF_SMM_SHIFT) & 1,
489 (env->hflags >> HF_HALTED_SHIFT) & 1);
490 } else
491 #endif
492 {
493 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
494 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
495 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
496 (uint32_t)env->regs[R_EAX],
497 (uint32_t)env->regs[R_EBX],
498 (uint32_t)env->regs[R_ECX],
499 (uint32_t)env->regs[R_EDX],
500 (uint32_t)env->regs[R_ESI],
501 (uint32_t)env->regs[R_EDI],
502 (uint32_t)env->regs[R_EBP],
503 (uint32_t)env->regs[R_ESP],
504 (uint32_t)env->eip, eflags,
505 eflags & DF_MASK ? 'D' : '-',
506 eflags & CC_O ? 'O' : '-',
507 eflags & CC_S ? 'S' : '-',
508 eflags & CC_Z ? 'Z' : '-',
509 eflags & CC_A ? 'A' : '-',
510 eflags & CC_P ? 'P' : '-',
511 eflags & CC_C ? 'C' : '-',
512 env->hflags & HF_CPL_MASK,
513 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
514 (env->a20_mask >> 20) & 1,
515 (env->hflags >> HF_SMM_SHIFT) & 1,
516 (env->hflags >> HF_HALTED_SHIFT) & 1);
517 }
518
519 #ifdef TARGET_X86_64
520 if (env->hflags & HF_LMA_MASK) {
521 for(i = 0; i < 6; i++) {
522 SegmentCache *sc = &env->segs[i];
523 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
524 seg_name[i],
525 sc->selector,
526 sc->base,
527 sc->limit,
528 sc->flags);
529 }
530 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
531 env->ldt.selector,
532 env->ldt.base,
533 env->ldt.limit,
534 env->ldt.flags);
535 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
536 env->tr.selector,
537 env->tr.base,
538 env->tr.limit,
539 env->tr.flags);
540 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
541 env->gdt.base, env->gdt.limit);
542 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
543 env->idt.base, env->idt.limit);
544 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
545 (uint32_t)env->cr[0],
546 env->cr[2],
547 env->cr[3],
548 (uint32_t)env->cr[4]);
549 } else
550 #endif
551 {
552 for(i = 0; i < 6; i++) {
553 SegmentCache *sc = &env->segs[i];
554 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
555 seg_name[i],
556 sc->selector,
557 (uint32_t)sc->base,
558 sc->limit,
559 sc->flags);
560 }
561 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
562 env->ldt.selector,
563 (uint32_t)env->ldt.base,
564 env->ldt.limit,
565 env->ldt.flags);
566 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
567 env->tr.selector,
568 (uint32_t)env->tr.base,
569 env->tr.limit,
570 env->tr.flags);
571 cpu_fprintf(f, "GDT= %08x %08x\n",
572 (uint32_t)env->gdt.base, env->gdt.limit);
573 cpu_fprintf(f, "IDT= %08x %08x\n",
574 (uint32_t)env->idt.base, env->idt.limit);
575 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
576 (uint32_t)env->cr[0],
577 (uint32_t)env->cr[2],
578 (uint32_t)env->cr[3],
579 (uint32_t)env->cr[4]);
580 }
581 if (flags & X86_DUMP_CCOP) {
582 if ((unsigned)env->cc_op < CC_OP_NB)
583 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
584 else
585 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
586 #ifdef TARGET_X86_64
587 if (env->hflags & HF_CS64_MASK) {
588 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
589 env->cc_src, env->cc_dst,
590 cc_op_name);
591 } else
592 #endif
593 {
594 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
595 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
596 cc_op_name);
597 }
598 }
599 if (flags & X86_DUMP_FPU) {
600 int fptag;
601 fptag = 0;
602 for(i = 0; i < 8; i++) {
603 fptag |= ((!env->fptags[i]) << i);
604 }
605 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
606 env->fpuc,
607 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
608 env->fpstt,
609 fptag,
610 env->mxcsr);
611 for(i=0;i<8;i++) {
612 #if defined(USE_X86LDOUBLE)
613 union {
614 long double d;
615 struct {
616 uint64_t lower;
617 uint16_t upper;
618 } l;
619 } tmp;
620 tmp.d = env->fpregs[i].d;
621 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
622 i, tmp.l.lower, tmp.l.upper);
623 #else
624 cpu_fprintf(f, "FPR%d=%016" PRIx64,
625 i, env->fpregs[i].mmx.q);
626 #endif
627 if ((i & 1) == 1)
628 cpu_fprintf(f, "\n");
629 else
630 cpu_fprintf(f, " ");
631 }
632 if (env->hflags & HF_CS64_MASK)
633 nb = 16;
634 else
635 nb = 8;
636 for(i=0;i<nb;i++) {
637 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
638 i,
639 env->xmm_regs[i].XMM_L(3),
640 env->xmm_regs[i].XMM_L(2),
641 env->xmm_regs[i].XMM_L(1),
642 env->xmm_regs[i].XMM_L(0));
643 if ((i & 1) == 1)
644 cpu_fprintf(f, "\n");
645 else
646 cpu_fprintf(f, " ");
647 }
648 }
649 }
650
651 /***********************************************************/
652 /* x86 mmu */
653 /* XXX: add PGE support */
654
655 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
656 {
657 a20_state = (a20_state != 0);
658 if (a20_state != ((env->a20_mask >> 20) & 1)) {
659 #if defined(DEBUG_MMU)
660 printf("A20 update: a20=%d\n", a20_state);
661 #endif
662 /* if the cpu is currently executing code, we must unlink it and
663 all the potentially executing TB */
664 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
665
666 /* when a20 is changed, all the MMU mappings are invalid, so
667 we must flush everything */
668 tlb_flush(env, 1);
669 env->a20_mask = 0xffefffff | (a20_state << 20);
670 }
671 }
672
673 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
674 {
675 int pe_state;
676
677 #if defined(DEBUG_MMU)
678 printf("CR0 update: CR0=0x%08x\n", new_cr0);
679 #endif
680 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
681 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
682 tlb_flush(env, 1);
683 }
684
685 #ifdef TARGET_X86_64
686 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
687 (env->efer & MSR_EFER_LME)) {
688 /* enter in long mode */
689 /* XXX: generate an exception */
690 if (!(env->cr[4] & CR4_PAE_MASK))
691 return;
692 env->efer |= MSR_EFER_LMA;
693 env->hflags |= HF_LMA_MASK;
694 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
695 (env->efer & MSR_EFER_LMA)) {
696 /* exit long mode */
697 env->efer &= ~MSR_EFER_LMA;
698 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
699 env->eip &= 0xffffffff;
700 }
701 #endif
702 env->cr[0] = new_cr0 | CR0_ET_MASK;
703
704 /* update PE flag in hidden flags */
705 pe_state = (env->cr[0] & CR0_PE_MASK);
706 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
707 /* ensure that ADDSEG is always set in real mode */
708 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
709 /* update FPU flags */
710 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
711 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
712 }
713
714 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
715 the PDPT */
716 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
717 {
718 env->cr[3] = new_cr3;
719 if (env->cr[0] & CR0_PG_MASK) {
720 #if defined(DEBUG_MMU)
721 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
722 #endif
723 tlb_flush(env, 0);
724 }
725 }
726
727 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
728 {
729 #if defined(DEBUG_MMU)
730 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
731 #endif
732 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
733 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
734 tlb_flush(env, 1);
735 }
736 /* SSE handling */
737 if (!(env->cpuid_features & CPUID_SSE))
738 new_cr4 &= ~CR4_OSFXSR_MASK;
739 if (new_cr4 & CR4_OSFXSR_MASK)
740 env->hflags |= HF_OSFXSR_MASK;
741 else
742 env->hflags &= ~HF_OSFXSR_MASK;
743
744 env->cr[4] = new_cr4;
745 }
746
747 /* XXX: also flush 4MB pages */
748 void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
749 {
750 tlb_flush_page(env, addr);
751 }
752
753 #if defined(CONFIG_USER_ONLY)
754
755 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
756 int is_write, int mmu_idx, int is_softmmu)
757 {
758 /* user mode only emulation */
759 is_write &= 1;
760 env->cr[2] = addr;
761 env->error_code = (is_write << PG_ERROR_W_BIT);
762 env->error_code |= PG_ERROR_U_MASK;
763 env->exception_index = EXCP0E_PAGE;
764 return 1;
765 }
766
767 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
768 {
769 return addr;
770 }
771
772 #else
773
774 #define PHYS_ADDR_MASK 0xfffff000
775
776 /* return value:
777 -1 = cannot handle fault
778 0 = nothing more to do
779 1 = generate PF fault
780 2 = soft MMU activation required for this block
781 */
782 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
783 int is_write1, int mmu_idx, int is_softmmu)
784 {
785 uint64_t ptep, pte;
786 uint32_t pdpe_addr, pde_addr, pte_addr;
787 int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
788 unsigned long paddr, page_offset;
789 target_ulong vaddr, virt_addr;
790
791 is_user = mmu_idx == MMU_USER_IDX;
792 #if defined(DEBUG_MMU)
793 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
794 addr, is_write1, is_user, env->eip);
795 #endif
796 is_write = is_write1 & 1;
797
798 if (!(env->cr[0] & CR0_PG_MASK)) {
799 pte = addr;
800 virt_addr = addr & TARGET_PAGE_MASK;
801 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
802 page_size = 4096;
803 goto do_mapping;
804 }
805
806 if (env->cr[4] & CR4_PAE_MASK) {
807 uint64_t pde, pdpe;
808
809 /* XXX: we only use 32 bit physical addresses */
810 #ifdef TARGET_X86_64
811 if (env->hflags & HF_LMA_MASK) {
812 uint32_t pml4e_addr;
813 uint64_t pml4e;
814 int32_t sext;
815
816 /* test virtual address sign extension */
817 sext = (int64_t)addr >> 47;
818 if (sext != 0 && sext != -1) {
819 env->error_code = 0;
820 env->exception_index = EXCP0D_GPF;
821 return 1;
822 }
823
824 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
825 env->a20_mask;
826 pml4e = ldq_phys(pml4e_addr);
827 if (!(pml4e & PG_PRESENT_MASK)) {
828 error_code = 0;
829 goto do_fault;
830 }
831 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
832 error_code = PG_ERROR_RSVD_MASK;
833 goto do_fault;
834 }
835 if (!(pml4e & PG_ACCESSED_MASK)) {
836 pml4e |= PG_ACCESSED_MASK;
837 stl_phys_notdirty(pml4e_addr, pml4e);
838 }
839 ptep = pml4e ^ PG_NX_MASK;
840 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
841 env->a20_mask;
842 pdpe = ldq_phys(pdpe_addr);
843 if (!(pdpe & PG_PRESENT_MASK)) {
844 error_code = 0;
845 goto do_fault;
846 }
847 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
848 error_code = PG_ERROR_RSVD_MASK;
849 goto do_fault;
850 }
851 ptep &= pdpe ^ PG_NX_MASK;
852 if (!(pdpe & PG_ACCESSED_MASK)) {
853 pdpe |= PG_ACCESSED_MASK;
854 stl_phys_notdirty(pdpe_addr, pdpe);
855 }
856 } else
857 #endif
858 {
859 /* XXX: load them when cr3 is loaded ? */
860 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
861 env->a20_mask;
862 pdpe = ldq_phys(pdpe_addr);
863 if (!(pdpe & PG_PRESENT_MASK)) {
864 error_code = 0;
865 goto do_fault;
866 }
867 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
868 }
869
870 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
871 env->a20_mask;
872 pde = ldq_phys(pde_addr);
873 if (!(pde & PG_PRESENT_MASK)) {
874 error_code = 0;
875 goto do_fault;
876 }
877 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
878 error_code = PG_ERROR_RSVD_MASK;
879 goto do_fault;
880 }
881 ptep &= pde ^ PG_NX_MASK;
882 if (pde & PG_PSE_MASK) {
883 /* 2 MB page */
884 page_size = 2048 * 1024;
885 ptep ^= PG_NX_MASK;
886 if ((ptep & PG_NX_MASK) && is_write1 == 2)
887 goto do_fault_protect;
888 if (is_user) {
889 if (!(ptep & PG_USER_MASK))
890 goto do_fault_protect;
891 if (is_write && !(ptep & PG_RW_MASK))
892 goto do_fault_protect;
893 } else {
894 if ((env->cr[0] & CR0_WP_MASK) &&
895 is_write && !(ptep & PG_RW_MASK))
896 goto do_fault_protect;
897 }
898 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
899 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
900 pde |= PG_ACCESSED_MASK;
901 if (is_dirty)
902 pde |= PG_DIRTY_MASK;
903 stl_phys_notdirty(pde_addr, pde);
904 }
905 /* align to page_size */
906 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
907 virt_addr = addr & ~(page_size - 1);
908 } else {
909 /* 4 KB page */
910 if (!(pde & PG_ACCESSED_MASK)) {
911 pde |= PG_ACCESSED_MASK;
912 stl_phys_notdirty(pde_addr, pde);
913 }
914 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
915 env->a20_mask;
916 pte = ldq_phys(pte_addr);
917 if (!(pte & PG_PRESENT_MASK)) {
918 error_code = 0;
919 goto do_fault;
920 }
921 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
922 error_code = PG_ERROR_RSVD_MASK;
923 goto do_fault;
924 }
925 /* combine pde and pte nx, user and rw protections */
926 ptep &= pte ^ PG_NX_MASK;
927 ptep ^= PG_NX_MASK;
928 if ((ptep & PG_NX_MASK) && is_write1 == 2)
929 goto do_fault_protect;
930 if (is_user) {
931 if (!(ptep & PG_USER_MASK))
932 goto do_fault_protect;
933 if (is_write && !(ptep & PG_RW_MASK))
934 goto do_fault_protect;
935 } else {
936 if ((env->cr[0] & CR0_WP_MASK) &&
937 is_write && !(ptep & PG_RW_MASK))
938 goto do_fault_protect;
939 }
940 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
941 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
942 pte |= PG_ACCESSED_MASK;
943 if (is_dirty)
944 pte |= PG_DIRTY_MASK;
945 stl_phys_notdirty(pte_addr, pte);
946 }
947 page_size = 4096;
948 virt_addr = addr & ~0xfff;
949 pte = pte & (PHYS_ADDR_MASK | 0xfff);
950 }
951 } else {
952 uint32_t pde;
953
954 /* page directory entry */
955 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
956 env->a20_mask;
957 pde = ldl_phys(pde_addr);
958 if (!(pde & PG_PRESENT_MASK)) {
959 error_code = 0;
960 goto do_fault;
961 }
962 /* if PSE bit is set, then we use a 4MB page */
963 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
964 page_size = 4096 * 1024;
965 if (is_user) {
966 if (!(pde & PG_USER_MASK))
967 goto do_fault_protect;
968 if (is_write && !(pde & PG_RW_MASK))
969 goto do_fault_protect;
970 } else {
971 if ((env->cr[0] & CR0_WP_MASK) &&
972 is_write && !(pde & PG_RW_MASK))
973 goto do_fault_protect;
974 }
975 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
976 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
977 pde |= PG_ACCESSED_MASK;
978 if (is_dirty)
979 pde |= PG_DIRTY_MASK;
980 stl_phys_notdirty(pde_addr, pde);
981 }
982
983 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
984 ptep = pte;
985 virt_addr = addr & ~(page_size - 1);
986 } else {
987 if (!(pde & PG_ACCESSED_MASK)) {
988 pde |= PG_ACCESSED_MASK;
989 stl_phys_notdirty(pde_addr, pde);
990 }
991
992 /* page directory entry */
993 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
994 env->a20_mask;
995 pte = ldl_phys(pte_addr);
996 if (!(pte & PG_PRESENT_MASK)) {
997 error_code = 0;
998 goto do_fault;
999 }
1000 /* combine pde and pte user and rw protections */
1001 ptep = pte & pde;
1002 if (is_user) {
1003 if (!(ptep & PG_USER_MASK))
1004 goto do_fault_protect;
1005 if (is_write && !(ptep & PG_RW_MASK))
1006 goto do_fault_protect;
1007 } else {
1008 if ((env->cr[0] & CR0_WP_MASK) &&
1009 is_write && !(ptep & PG_RW_MASK))
1010 goto do_fault_protect;
1011 }
1012 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
1013 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
1014 pte |= PG_ACCESSED_MASK;
1015 if (is_dirty)
1016 pte |= PG_DIRTY_MASK;
1017 stl_phys_notdirty(pte_addr, pte);
1018 }
1019 page_size = 4096;
1020 virt_addr = addr & ~0xfff;
1021 }
1022 }
1023 /* the page can be put in the TLB */
1024 prot = PAGE_READ;
1025 if (!(ptep & PG_NX_MASK))
1026 prot |= PAGE_EXEC;
1027 if (pte & PG_DIRTY_MASK) {
1028 /* only set write access if already dirty... otherwise wait
1029 for dirty access */
1030 if (is_user) {
1031 if (ptep & PG_RW_MASK)
1032 prot |= PAGE_WRITE;
1033 } else {
1034 if (!(env->cr[0] & CR0_WP_MASK) ||
1035 (ptep & PG_RW_MASK))
1036 prot |= PAGE_WRITE;
1037 }
1038 }
1039 do_mapping:
1040 pte = pte & env->a20_mask;
1041
1042 /* Even if 4MB pages, we map only one 4KB page in the cache to
1043 avoid filling it too fast */
1044 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1045 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1046 vaddr = virt_addr + page_offset;
1047
1048 ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
1049 return ret;
1050 do_fault_protect:
1051 error_code = PG_ERROR_P_MASK;
1052 do_fault:
1053 error_code |= (is_write << PG_ERROR_W_BIT);
1054 if (is_user)
1055 error_code |= PG_ERROR_U_MASK;
1056 if (is_write1 == 2 &&
1057 (env->efer & MSR_EFER_NXE) &&
1058 (env->cr[4] & CR4_PAE_MASK))
1059 error_code |= PG_ERROR_I_D_MASK;
1060 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE)) {
1061 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), addr);
1062 } else {
1063 env->cr[2] = addr;
1064 }
1065 env->error_code = error_code;
1066 env->exception_index = EXCP0E_PAGE;
1067 /* the VMM will handle this */
1068 if (INTERCEPTEDl(_exceptions, 1 << EXCP0E_PAGE))
1069 return 2;
1070 return 1;
1071 }
1072
1073 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1074 {
1075 uint32_t pde_addr, pte_addr;
1076 uint32_t pde, pte, paddr, page_offset, page_size;
1077
1078 if (env->cr[4] & CR4_PAE_MASK) {
1079 uint32_t pdpe_addr, pde_addr, pte_addr;
1080 uint32_t pdpe;
1081
1082 /* XXX: we only use 32 bit physical addresses */
1083 #ifdef TARGET_X86_64
1084 if (env->hflags & HF_LMA_MASK) {
1085 uint32_t pml4e_addr, pml4e;
1086 int32_t sext;
1087
1088 /* test virtual address sign extension */
1089 sext = (int64_t)addr >> 47;
1090 if (sext != 0 && sext != -1)
1091 return -1;
1092
1093 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
1094 env->a20_mask;
1095 pml4e = ldl_phys(pml4e_addr);
1096 if (!(pml4e & PG_PRESENT_MASK))
1097 return -1;
1098
1099 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
1100 env->a20_mask;
1101 pdpe = ldl_phys(pdpe_addr);
1102 if (!(pdpe & PG_PRESENT_MASK))
1103 return -1;
1104 } else
1105 #endif
1106 {
1107 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
1108 env->a20_mask;
1109 pdpe = ldl_phys(pdpe_addr);
1110 if (!(pdpe & PG_PRESENT_MASK))
1111 return -1;
1112 }
1113
1114 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
1115 env->a20_mask;
1116 pde = ldl_phys(pde_addr);
1117 if (!(pde & PG_PRESENT_MASK)) {
1118 return -1;
1119 }
1120 if (pde & PG_PSE_MASK) {
1121 /* 2 MB page */
1122 page_size = 2048 * 1024;
1123 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
1124 } else {
1125 /* 4 KB page */
1126 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
1127 env->a20_mask;
1128 page_size = 4096;
1129 pte = ldl_phys(pte_addr);
1130 }
1131 } else {
1132 if (!(env->cr[0] & CR0_PG_MASK)) {
1133 pte = addr;
1134 page_size = 4096;
1135 } else {
1136 /* page directory entry */
1137 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
1138 pde = ldl_phys(pde_addr);
1139 if (!(pde & PG_PRESENT_MASK))
1140 return -1;
1141 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
1142 pte = pde & ~0x003ff000; /* align to 4MB */
1143 page_size = 4096 * 1024;
1144 } else {
1145 /* page directory entry */
1146 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
1147 pte = ldl_phys(pte_addr);
1148 if (!(pte & PG_PRESENT_MASK))
1149 return -1;
1150 page_size = 4096;
1151 }
1152 }
1153 pte = pte & env->a20_mask;
1154 }
1155
1156 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
1157 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
1158 return paddr;
1159 }
1160 #endif /* !CONFIG_USER_ONLY */