]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/helper2.c
C99 64 bit printf
[mirror_qemu.git] / target-i386 / helper2.c
CommitLineData
2c0262af
FB
1/*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdarg.h>
21#include <stdlib.h>
22#include <stdio.h>
23#include <string.h>
24#include <inttypes.h>
25#include <signal.h>
26#include <assert.h>
2c0262af
FB
27
28#include "cpu.h"
29#include "exec-all.h"
30
31//#define DEBUG_MMU
32
0e4b179d
FB
33#ifdef USE_CODE_COPY
34#include <asm/ldt.h>
35#include <linux/unistd.h>
73bdea19 36#include <linux/version.h>
0e4b179d 37
83fcb515
FB
38int modify_ldt(int func, void *ptr, unsigned long bytecount)
39{
40 return syscall(__NR_modify_ldt, func, ptr, bytecount);
41}
73bdea19
FB
42
43#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
44#define modify_ldt_ldt_s user_desc
0e4b179d 45#endif
73bdea19 46#endif /* USE_CODE_COPY */
0e4b179d 47
2c0262af
FB
48CPUX86State *cpu_x86_init(void)
49{
50 CPUX86State *env;
2c0262af
FB
51 static int inited;
52
173d6cfe 53 env = qemu_mallocz(sizeof(CPUX86State));
2c0262af
FB
54 if (!env)
55 return NULL;
173d6cfe
FB
56 cpu_exec_init(env);
57
ffddfee3
FB
58 /* init various static tables */
59 if (!inited) {
60 inited = 1;
61 optimize_flags_init();
62 }
63#ifdef USE_CODE_COPY
64 /* testing code for code copy case */
65 {
66 struct modify_ldt_ldt_s ldt;
2c0262af 67
ffddfee3
FB
68 ldt.entry_number = 1;
69 ldt.base_addr = (unsigned long)env;
70 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
71 ldt.seg_32bit = 1;
72 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
73 ldt.read_exec_only = 0;
74 ldt.limit_in_pages = 1;
75 ldt.seg_not_present = 0;
76 ldt.useable = 1;
77 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
78
79 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
ffddfee3
FB
80 }
81#endif
14ce26e7
FB
82 {
83 int family, model, stepping;
84#ifdef TARGET_X86_64
85 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
86 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
87 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
88 family = 6;
89 model = 2;
90 stepping = 3;
91#else
92 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
93 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
94 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
95#if 0
96 /* pentium 75-200 */
97 family = 5;
98 model = 2;
99 stepping = 11;
100#else
101 /* pentium pro */
102 family = 6;
bf079a1e 103 model = 3;
14ce26e7
FB
104 stepping = 3;
105#endif
106#endif
a6f37988 107 env->cpuid_level = 2;
14ce26e7
FB
108 env->cpuid_version = (family << 8) | (model << 4) | stepping;
109 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
110 CPUID_TSC | CPUID_MSR | CPUID_MCE |
8f091a59
FB
111 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
112 CPUID_PAT);
113 env->pat = 0x0007040600070406ULL;
465e9838 114 env->cpuid_ext_features = CPUID_EXT_SSE3;
bf079a1e 115 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
96b74a02 116 env->cpuid_features |= CPUID_APIC;
a6f37988
FB
117 env->cpuid_xlevel = 0;
118 {
119 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
120 int c, len, i;
121 len = strlen(model_id);
122 for(i = 0; i < 48; i++) {
123 if (i >= len)
124 c = '\0';
125 else
126 c = model_id[i];
127 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
128 }
129 }
14ce26e7
FB
130#ifdef TARGET_X86_64
131 /* currently not enabled for std i386 because not fully tested */
a6f37988 132 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
4b4f782c 133 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
a6f37988 134 env->cpuid_xlevel = 0x80000008;
8f091a59
FB
135
136 /* these features are needed for Win64 and aren't fully implemented */
137 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
14ce26e7
FB
138#endif
139 }
ffddfee3 140 cpu_reset(env);
bf079a1e
FB
141#ifdef USE_KQEMU
142 kqemu_init(env);
143#endif
ffddfee3
FB
144 return env;
145}
146
147/* NOTE: must be called outside the CPU execute loop */
148void cpu_reset(CPUX86State *env)
149{
150 int i;
151
152 memset(env, 0, offsetof(CPUX86State, breakpoints));
1ac157da
FB
153
154 tlb_flush(env, 1);
ffddfee3
FB
155
156 /* init to reset state */
157
2c0262af
FB
158#ifdef CONFIG_SOFTMMU
159 env->hflags |= HF_SOFTMMU_MASK;
160#endif
1ac157da
FB
161
162 cpu_x86_update_cr0(env, 0x60000010);
163 env->a20_mask = 0xffffffff;
164
165 env->idt.limit = 0xffff;
166 env->gdt.limit = 0xffff;
167 env->ldt.limit = 0xffff;
168 env->ldt.flags = DESC_P_MASK;
169 env->tr.limit = 0xffff;
170 env->tr.flags = DESC_P_MASK;
171
14ce26e7
FB
172 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
173 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
174 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
175 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
176 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
177 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
1ac157da
FB
178
179 env->eip = 0xfff0;
180 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
181
182 env->eflags = 0x2;
183
184 /* FPU init */
185 for(i = 0;i < 8; i++)
186 env->fptags[i] = 1;
187 env->fpuc = 0x37f;
664e0f19
FB
188
189 env->mxcsr = 0x1f80;
2c0262af
FB
190}
191
192void cpu_x86_close(CPUX86State *env)
193{
194 free(env);
195}
196
197/***********************************************************/
198/* x86 debug */
199
200static const char *cc_op_str[] = {
201 "DYNAMIC",
202 "EFLAGS",
14ce26e7 203
b7f0f463
FB
204 "MULB",
205 "MULW",
206 "MULL",
14ce26e7
FB
207 "MULQ",
208
2c0262af
FB
209 "ADDB",
210 "ADDW",
211 "ADDL",
14ce26e7
FB
212 "ADDQ",
213
2c0262af
FB
214 "ADCB",
215 "ADCW",
216 "ADCL",
14ce26e7
FB
217 "ADCQ",
218
2c0262af
FB
219 "SUBB",
220 "SUBW",
221 "SUBL",
14ce26e7
FB
222 "SUBQ",
223
2c0262af
FB
224 "SBBB",
225 "SBBW",
226 "SBBL",
14ce26e7
FB
227 "SBBQ",
228
2c0262af
FB
229 "LOGICB",
230 "LOGICW",
231 "LOGICL",
14ce26e7
FB
232 "LOGICQ",
233
2c0262af
FB
234 "INCB",
235 "INCW",
236 "INCL",
14ce26e7
FB
237 "INCQ",
238
2c0262af
FB
239 "DECB",
240 "DECW",
241 "DECL",
14ce26e7
FB
242 "DECQ",
243
2c0262af
FB
244 "SHLB",
245 "SHLW",
246 "SHLL",
14ce26e7
FB
247 "SHLQ",
248
2c0262af
FB
249 "SARB",
250 "SARW",
251 "SARL",
14ce26e7 252 "SARQ",
2c0262af
FB
253};
254
7fe48483
FB
255void cpu_dump_state(CPUState *env, FILE *f,
256 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
257 int flags)
2c0262af 258{
2157fa06 259 int eflags, i, nb;
2c0262af 260 char cc_op_name[32];
246d897f 261 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
2c0262af
FB
262
263 eflags = env->eflags;
14ce26e7
FB
264#ifdef TARGET_X86_64
265 if (env->hflags & HF_CS64_MASK) {
266 cpu_fprintf(f,
26a76461
FB
267 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
268 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
269 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
270 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
271 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n",
14ce26e7
FB
272 env->regs[R_EAX],
273 env->regs[R_EBX],
274 env->regs[R_ECX],
275 env->regs[R_EDX],
276 env->regs[R_ESI],
277 env->regs[R_EDI],
278 env->regs[R_EBP],
279 env->regs[R_ESP],
280 env->regs[8],
281 env->regs[9],
282 env->regs[10],
283 env->regs[11],
284 env->regs[12],
285 env->regs[13],
286 env->regs[14],
287 env->regs[15],
288 env->eip, eflags,
289 eflags & DF_MASK ? 'D' : '-',
290 eflags & CC_O ? 'O' : '-',
291 eflags & CC_S ? 'S' : '-',
292 eflags & CC_Z ? 'Z' : '-',
293 eflags & CC_A ? 'A' : '-',
294 eflags & CC_P ? 'P' : '-',
295 eflags & CC_C ? 'C' : '-',
296 env->hflags & HF_CPL_MASK,
297 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
d2ac63e0
FB
298 (env->a20_mask >> 20) & 1,
299 (env->hflags >> HF_HALTED_SHIFT) & 1);
14ce26e7
FB
300 } else
301#endif
302 {
303 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
304 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
d2ac63e0 305 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d HLT=%d\n",
14ce26e7
FB
306 (uint32_t)env->regs[R_EAX],
307 (uint32_t)env->regs[R_EBX],
308 (uint32_t)env->regs[R_ECX],
309 (uint32_t)env->regs[R_EDX],
310 (uint32_t)env->regs[R_ESI],
311 (uint32_t)env->regs[R_EDI],
312 (uint32_t)env->regs[R_EBP],
313 (uint32_t)env->regs[R_ESP],
314 (uint32_t)env->eip, eflags,
315 eflags & DF_MASK ? 'D' : '-',
316 eflags & CC_O ? 'O' : '-',
317 eflags & CC_S ? 'S' : '-',
318 eflags & CC_Z ? 'Z' : '-',
319 eflags & CC_A ? 'A' : '-',
320 eflags & CC_P ? 'P' : '-',
321 eflags & CC_C ? 'C' : '-',
322 env->hflags & HF_CPL_MASK,
323 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
d2ac63e0
FB
324 (env->a20_mask >> 20) & 1,
325 (env->hflags >> HF_HALTED_SHIFT) & 1);
14ce26e7
FB
326 }
327
328#ifdef TARGET_X86_64
329 if (env->hflags & HF_LMA_MASK) {
330 for(i = 0; i < 6; i++) {
331 SegmentCache *sc = &env->segs[i];
26a76461 332 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
14ce26e7
FB
333 seg_name[i],
334 sc->selector,
335 sc->base,
336 sc->limit,
337 sc->flags);
338 }
26a76461 339 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
14ce26e7
FB
340 env->ldt.selector,
341 env->ldt.base,
342 env->ldt.limit,
343 env->ldt.flags);
26a76461 344 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
14ce26e7
FB
345 env->tr.selector,
346 env->tr.base,
347 env->tr.limit,
348 env->tr.flags);
26a76461 349 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
14ce26e7 350 env->gdt.base, env->gdt.limit);
26a76461 351 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
14ce26e7 352 env->idt.base, env->idt.limit);
26a76461 353 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
14ce26e7
FB
354 (uint32_t)env->cr[0],
355 env->cr[2],
356 env->cr[3],
357 (uint32_t)env->cr[4]);
358 } else
359#endif
360 {
361 for(i = 0; i < 6; i++) {
362 SegmentCache *sc = &env->segs[i];
363 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
364 seg_name[i],
365 sc->selector,
366 (uint32_t)sc->base,
367 sc->limit,
368 sc->flags);
369 }
370 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
371 env->ldt.selector,
372 (uint32_t)env->ldt.base,
373 env->ldt.limit,
374 env->ldt.flags);
375 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
376 env->tr.selector,
377 (uint32_t)env->tr.base,
378 env->tr.limit,
379 env->tr.flags);
380 cpu_fprintf(f, "GDT= %08x %08x\n",
381 (uint32_t)env->gdt.base, env->gdt.limit);
382 cpu_fprintf(f, "IDT= %08x %08x\n",
383 (uint32_t)env->idt.base, env->idt.limit);
384 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
385 (uint32_t)env->cr[0],
386 (uint32_t)env->cr[2],
387 (uint32_t)env->cr[3],
388 (uint32_t)env->cr[4]);
246d897f 389 }
2c0262af
FB
390 if (flags & X86_DUMP_CCOP) {
391 if ((unsigned)env->cc_op < CC_OP_NB)
eba2af63 392 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
2c0262af
FB
393 else
394 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
14ce26e7
FB
395#ifdef TARGET_X86_64
396 if (env->hflags & HF_CS64_MASK) {
26a76461 397 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
14ce26e7
FB
398 env->cc_src, env->cc_dst,
399 cc_op_name);
400 } else
401#endif
402 {
403 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
404 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
405 cc_op_name);
406 }
2c0262af
FB
407 }
408 if (flags & X86_DUMP_FPU) {
2157fa06
FB
409 int fptag;
410 fptag = 0;
411 for(i = 0; i < 8; i++) {
412 fptag |= ((!env->fptags[i]) << i);
413 }
414 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
415 env->fpuc,
416 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
417 env->fpstt,
418 fptag,
419 env->mxcsr);
420 for(i=0;i<8;i++) {
421#if defined(USE_X86LDOUBLE)
422 union {
423 long double d;
424 struct {
425 uint64_t lower;
426 uint16_t upper;
427 } l;
428 } tmp;
429 tmp.d = env->fpregs[i].d;
26a76461 430 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
2157fa06
FB
431 i, tmp.l.lower, tmp.l.upper);
432#else
26a76461 433 cpu_fprintf(f, "FPR%d=%016" PRIx64,
2157fa06
FB
434 i, env->fpregs[i].mmx.q);
435#endif
436 if ((i & 1) == 1)
437 cpu_fprintf(f, "\n");
438 else
439 cpu_fprintf(f, " ");
440 }
441 if (env->hflags & HF_CS64_MASK)
442 nb = 16;
443 else
444 nb = 8;
445 for(i=0;i<nb;i++) {
446 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
447 i,
448 env->xmm_regs[i].XMM_L(3),
449 env->xmm_regs[i].XMM_L(2),
450 env->xmm_regs[i].XMM_L(1),
451 env->xmm_regs[i].XMM_L(0));
452 if ((i & 1) == 1)
453 cpu_fprintf(f, "\n");
454 else
455 cpu_fprintf(f, " ");
456 }
2c0262af
FB
457 }
458}
459
460/***********************************************************/
461/* x86 mmu */
462/* XXX: add PGE support */
463
461c0471
FB
464void cpu_x86_set_a20(CPUX86State *env, int a20_state)
465{
466 a20_state = (a20_state != 0);
1ac157da 467 if (a20_state != ((env->a20_mask >> 20) & 1)) {
b7f0f463
FB
468#if defined(DEBUG_MMU)
469 printf("A20 update: a20=%d\n", a20_state);
470#endif
6bb70571
FB
471 /* if the cpu is currently executing code, we must unlink it and
472 all the potentially executing TB */
0e4b179d 473 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
6bb70571 474
461c0471
FB
475 /* when a20 is changed, all the MMU mappings are invalid, so
476 we must flush everything */
1ac157da
FB
477 tlb_flush(env, 1);
478 env->a20_mask = 0xffefffff | (a20_state << 20);
461c0471
FB
479 }
480}
481
1ac157da 482void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
2c0262af 483{
1ac157da 484 int pe_state;
2c0262af 485
b7f0f463 486#if defined(DEBUG_MMU)
1ac157da 487 printf("CR0 update: CR0=0x%08x\n", new_cr0);
2c0262af 488#endif
1ac157da
FB
489 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
490 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
491 tlb_flush(env, 1);
2c0262af 492 }
14ce26e7
FB
493
494#ifdef TARGET_X86_64
495 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
496 (env->efer & MSR_EFER_LME)) {
497 /* enter in long mode */
498 /* XXX: generate an exception */
499 if (!(env->cr[4] & CR4_PAE_MASK))
500 return;
501 env->efer |= MSR_EFER_LMA;
502 env->hflags |= HF_LMA_MASK;
503 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
504 (env->efer & MSR_EFER_LMA)) {
505 /* exit long mode */
506 env->efer &= ~MSR_EFER_LMA;
507 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
508 env->eip &= 0xffffffff;
509 }
510#endif
28c3ee3f 511 env->cr[0] = new_cr0 | CR0_ET_MASK;
1ac157da 512
436d8b89
FB
513 /* update PE flag in hidden flags */
514 pe_state = (env->cr[0] & CR0_PE_MASK);
515 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
516 /* ensure that ADDSEG is always set in real mode */
517 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
9588b95a
FB
518 /* update FPU flags */
519 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
520 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
2c0262af
FB
521}
522
bf079a1e
FB
523/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
524 the PDPT */
14ce26e7 525void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
2c0262af 526{
1ac157da 527 env->cr[3] = new_cr3;
2c0262af
FB
528 if (env->cr[0] & CR0_PG_MASK) {
529#if defined(DEBUG_MMU)
14ce26e7 530 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
2c0262af 531#endif
1ac157da 532 tlb_flush(env, 0);
2c0262af
FB
533 }
534}
535
1ac157da 536void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
2c0262af 537{
1ac157da 538#if defined(DEBUG_MMU)
14ce26e7 539 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1ac157da
FB
540#endif
541 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
542 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
543 tlb_flush(env, 1);
544 }
664e0f19
FB
545 /* SSE handling */
546 if (!(env->cpuid_features & CPUID_SSE))
547 new_cr4 &= ~CR4_OSFXSR_MASK;
548 if (new_cr4 & CR4_OSFXSR_MASK)
549 env->hflags |= HF_OSFXSR_MASK;
550 else
551 env->hflags &= ~HF_OSFXSR_MASK;
552
1ac157da 553 env->cr[4] = new_cr4;
2c0262af
FB
554}
555
556/* XXX: also flush 4MB pages */
8f091a59 557void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
2c0262af 558{
2c0262af 559 tlb_flush_page(env, addr);
2c0262af
FB
560}
561
8df1cd07 562#if defined(CONFIG_USER_ONLY)
14ce26e7 563
8df1cd07
FB
564int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
565 int is_write, int is_user, int is_softmmu)
14ce26e7 566{
8df1cd07
FB
567 /* user mode only emulation */
568 is_write &= 1;
569 env->cr[2] = addr;
570 env->error_code = (is_write << PG_ERROR_W_BIT);
571 env->error_code |= PG_ERROR_U_MASK;
54ca9095 572 env->exception_index = EXCP0E_PAGE;
8df1cd07 573 return 1;
14ce26e7
FB
574}
575
8df1cd07 576target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
14ce26e7 577{
8df1cd07 578 return addr;
14ce26e7
FB
579}
580
8df1cd07
FB
581#else
582
4b4f782c
FB
583#define PHYS_ADDR_MASK 0xfffff000
584
2c0262af
FB
585/* return value:
586 -1 = cannot handle fault
587 0 = nothing more to do
588 1 = generate PF fault
589 2 = soft MMU activation required for this block
590*/
14ce26e7 591int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
4b4f782c 592 int is_write1, int is_user, int is_softmmu)
2c0262af 593{
4b4f782c 594 uint64_t ptep, pte;
14ce26e7 595 uint32_t pdpe_addr, pde_addr, pte_addr;
4b4f782c 596 int error_code, is_dirty, prot, page_size, ret, is_write;
14ce26e7
FB
597 unsigned long paddr, page_offset;
598 target_ulong vaddr, virt_addr;
2c0262af 599
436d8b89 600#if defined(DEBUG_MMU)
14ce26e7 601 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
4b4f782c 602 addr, is_write1, is_user, env->eip);
2c0262af 603#endif
4b4f782c 604 is_write = is_write1 & 1;
b769d8fe 605
2c0262af
FB
606 if (!(env->cr[0] & CR0_PG_MASK)) {
607 pte = addr;
461c0471 608 virt_addr = addr & TARGET_PAGE_MASK;
4b4f782c 609 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2c0262af
FB
610 page_size = 4096;
611 goto do_mapping;
612 }
613
14ce26e7 614 if (env->cr[4] & CR4_PAE_MASK) {
4b4f782c
FB
615 uint64_t pde, pdpe;
616
14ce26e7
FB
617 /* XXX: we only use 32 bit physical addresses */
618#ifdef TARGET_X86_64
619 if (env->hflags & HF_LMA_MASK) {
4b4f782c
FB
620 uint32_t pml4e_addr;
621 uint64_t pml4e;
14ce26e7
FB
622 int32_t sext;
623
14ce26e7
FB
624 /* test virtual address sign extension */
625 sext = (int64_t)addr >> 47;
626 if (sext != 0 && sext != -1) {
54ca9095
FB
627 env->error_code = 0;
628 env->exception_index = EXCP0D_GPF;
629 return 1;
14ce26e7
FB
630 }
631
632 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
633 env->a20_mask;
4b4f782c 634 pml4e = ldq_phys(pml4e_addr);
14ce26e7
FB
635 if (!(pml4e & PG_PRESENT_MASK)) {
636 error_code = 0;
637 goto do_fault;
638 }
4b4f782c
FB
639 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
640 error_code = PG_ERROR_RSVD_MASK;
641 goto do_fault;
642 }
14ce26e7
FB
643 if (!(pml4e & PG_ACCESSED_MASK)) {
644 pml4e |= PG_ACCESSED_MASK;
8df1cd07 645 stl_phys_notdirty(pml4e_addr, pml4e);
14ce26e7 646 }
4b4f782c
FB
647 ptep = pml4e ^ PG_NX_MASK;
648 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
14ce26e7 649 env->a20_mask;
4b4f782c 650 pdpe = ldq_phys(pdpe_addr);
14ce26e7
FB
651 if (!(pdpe & PG_PRESENT_MASK)) {
652 error_code = 0;
653 goto do_fault;
654 }
4b4f782c
FB
655 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
656 error_code = PG_ERROR_RSVD_MASK;
657 goto do_fault;
658 }
659 ptep &= pdpe ^ PG_NX_MASK;
14ce26e7
FB
660 if (!(pdpe & PG_ACCESSED_MASK)) {
661 pdpe |= PG_ACCESSED_MASK;
8df1cd07 662 stl_phys_notdirty(pdpe_addr, pdpe);
14ce26e7 663 }
4b4f782c 664 } else
14ce26e7
FB
665#endif
666 {
4b4f782c 667 /* XXX: load them when cr3 is loaded ? */
14ce26e7
FB
668 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
669 env->a20_mask;
4b4f782c 670 pdpe = ldq_phys(pdpe_addr);
14ce26e7
FB
671 if (!(pdpe & PG_PRESENT_MASK)) {
672 error_code = 0;
673 goto do_fault;
674 }
4b4f782c 675 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
10f0e412 676 }
14ce26e7 677
4b4f782c 678 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
14ce26e7 679 env->a20_mask;
4b4f782c 680 pde = ldq_phys(pde_addr);
14ce26e7
FB
681 if (!(pde & PG_PRESENT_MASK)) {
682 error_code = 0;
683 goto do_fault;
2c0262af 684 }
4b4f782c
FB
685 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
686 error_code = PG_ERROR_RSVD_MASK;
687 goto do_fault;
688 }
689 ptep &= pde ^ PG_NX_MASK;
14ce26e7
FB
690 if (pde & PG_PSE_MASK) {
691 /* 2 MB page */
692 page_size = 2048 * 1024;
4b4f782c
FB
693 ptep ^= PG_NX_MASK;
694 if ((ptep & PG_NX_MASK) && is_write1 == 2)
695 goto do_fault_protect;
696 if (is_user) {
697 if (!(ptep & PG_USER_MASK))
698 goto do_fault_protect;
699 if (is_write && !(ptep & PG_RW_MASK))
700 goto do_fault_protect;
701 } else {
702 if ((env->cr[0] & CR0_WP_MASK) &&
703 is_write && !(ptep & PG_RW_MASK))
704 goto do_fault_protect;
705 }
706 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
707 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
708 pde |= PG_ACCESSED_MASK;
709 if (is_dirty)
710 pde |= PG_DIRTY_MASK;
711 stl_phys_notdirty(pde_addr, pde);
712 }
713 /* align to page_size */
714 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
715 virt_addr = addr & ~(page_size - 1);
14ce26e7
FB
716 } else {
717 /* 4 KB page */
718 if (!(pde & PG_ACCESSED_MASK)) {
719 pde |= PG_ACCESSED_MASK;
8df1cd07 720 stl_phys_notdirty(pde_addr, pde);
14ce26e7 721 }
4b4f782c 722 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
14ce26e7 723 env->a20_mask;
4b4f782c
FB
724 pte = ldq_phys(pte_addr);
725 if (!(pte & PG_PRESENT_MASK)) {
726 error_code = 0;
727 goto do_fault;
728 }
729 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
730 error_code = PG_ERROR_RSVD_MASK;
731 goto do_fault;
732 }
733 /* combine pde and pte nx, user and rw protections */
734 ptep &= pte ^ PG_NX_MASK;
735 ptep ^= PG_NX_MASK;
736 if ((ptep & PG_NX_MASK) && is_write1 == 2)
737 goto do_fault_protect;
738 if (is_user) {
739 if (!(ptep & PG_USER_MASK))
740 goto do_fault_protect;
741 if (is_write && !(ptep & PG_RW_MASK))
742 goto do_fault_protect;
743 } else {
744 if ((env->cr[0] & CR0_WP_MASK) &&
745 is_write && !(ptep & PG_RW_MASK))
746 goto do_fault_protect;
747 }
748 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
749 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
750 pte |= PG_ACCESSED_MASK;
751 if (is_dirty)
752 pte |= PG_DIRTY_MASK;
753 stl_phys_notdirty(pte_addr, pte);
754 }
755 page_size = 4096;
756 virt_addr = addr & ~0xfff;
757 pte = pte & (PHYS_ADDR_MASK | 0xfff);
2c0262af 758 }
14ce26e7 759 } else {
4b4f782c
FB
760 uint32_t pde;
761
2c0262af 762 /* page directory entry */
14ce26e7
FB
763 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
764 env->a20_mask;
8df1cd07 765 pde = ldl_phys(pde_addr);
14ce26e7 766 if (!(pde & PG_PRESENT_MASK)) {
2c0262af
FB
767 error_code = 0;
768 goto do_fault;
769 }
14ce26e7
FB
770 /* if PSE bit is set, then we use a 4MB page */
771 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
772 page_size = 4096 * 1024;
14ce26e7
FB
773 if (is_user) {
774 if (!(pde & PG_USER_MASK))
775 goto do_fault_protect;
776 if (is_write && !(pde & PG_RW_MASK))
777 goto do_fault_protect;
778 } else {
779 if ((env->cr[0] & CR0_WP_MASK) &&
780 is_write && !(pde & PG_RW_MASK))
781 goto do_fault_protect;
782 }
783 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
784 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
785 pde |= PG_ACCESSED_MASK;
786 if (is_dirty)
787 pde |= PG_DIRTY_MASK;
8df1cd07 788 stl_phys_notdirty(pde_addr, pde);
14ce26e7
FB
789 }
790
791 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
792 ptep = pte;
793 virt_addr = addr & ~(page_size - 1);
2c0262af 794 } else {
14ce26e7
FB
795 if (!(pde & PG_ACCESSED_MASK)) {
796 pde |= PG_ACCESSED_MASK;
8df1cd07 797 stl_phys_notdirty(pde_addr, pde);
14ce26e7
FB
798 }
799
800 /* page directory entry */
801 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
802 env->a20_mask;
8df1cd07 803 pte = ldl_phys(pte_addr);
14ce26e7
FB
804 if (!(pte & PG_PRESENT_MASK)) {
805 error_code = 0;
806 goto do_fault;
807 }
808 /* combine pde and pte user and rw protections */
809 ptep = pte & pde;
810 if (is_user) {
811 if (!(ptep & PG_USER_MASK))
812 goto do_fault_protect;
813 if (is_write && !(ptep & PG_RW_MASK))
814 goto do_fault_protect;
815 } else {
816 if ((env->cr[0] & CR0_WP_MASK) &&
817 is_write && !(ptep & PG_RW_MASK))
818 goto do_fault_protect;
819 }
820 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
821 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
822 pte |= PG_ACCESSED_MASK;
823 if (is_dirty)
824 pte |= PG_DIRTY_MASK;
8df1cd07 825 stl_phys_notdirty(pte_addr, pte);
14ce26e7
FB
826 }
827 page_size = 4096;
828 virt_addr = addr & ~0xfff;
2c0262af 829 }
4b4f782c
FB
830 }
831 /* the page can be put in the TLB */
832 prot = PAGE_READ;
833 if (!(ptep & PG_NX_MASK))
834 prot |= PAGE_EXEC;
835 if (pte & PG_DIRTY_MASK) {
836 /* only set write access if already dirty... otherwise wait
837 for dirty access */
838 if (is_user) {
839 if (ptep & PG_RW_MASK)
840 prot |= PAGE_WRITE;
841 } else {
842 if (!(env->cr[0] & CR0_WP_MASK) ||
843 (ptep & PG_RW_MASK))
844 prot |= PAGE_WRITE;
c8135d9a 845 }
2c0262af 846 }
2c0262af 847 do_mapping:
1ac157da 848 pte = pte & env->a20_mask;
2c0262af 849
436d8b89
FB
850 /* Even if 4MB pages, we map only one 4KB page in the cache to
851 avoid filling it too fast */
852 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
853 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
854 vaddr = virt_addr + page_offset;
855
4b4f782c 856 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
2c0262af
FB
857 return ret;
858 do_fault_protect:
859 error_code = PG_ERROR_P_MASK;
860 do_fault:
861 env->cr[2] = addr;
4b4f782c 862 error_code |= (is_write << PG_ERROR_W_BIT);
2c0262af 863 if (is_user)
4b4f782c
FB
864 error_code |= PG_ERROR_U_MASK;
865 if (is_write1 == 2 &&
866 (env->efer & MSR_EFER_NXE) &&
867 (env->cr[4] & CR4_PAE_MASK))
868 error_code |= PG_ERROR_I_D_MASK;
869 env->error_code = error_code;
54ca9095 870 env->exception_index = EXCP0E_PAGE;
2c0262af
FB
871 return 1;
872}
10f0e412 873
10f0e412
FB
874target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
875{
f51589da 876 uint32_t pde_addr, pte_addr;
10f0e412
FB
877 uint32_t pde, pte, paddr, page_offset, page_size;
878
f51589da
FB
879 if (env->cr[4] & CR4_PAE_MASK) {
880 uint32_t pdpe_addr, pde_addr, pte_addr;
881 uint32_t pdpe;
882
883 /* XXX: we only use 32 bit physical addresses */
884#ifdef TARGET_X86_64
885 if (env->hflags & HF_LMA_MASK) {
886 uint32_t pml4e_addr, pml4e;
887 int32_t sext;
888
889 /* test virtual address sign extension */
890 sext = (int64_t)addr >> 47;
891 if (sext != 0 && sext != -1)
892 return -1;
893
894 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
895 env->a20_mask;
8df1cd07 896 pml4e = ldl_phys(pml4e_addr);
f51589da
FB
897 if (!(pml4e & PG_PRESENT_MASK))
898 return -1;
899
900 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
901 env->a20_mask;
8df1cd07 902 pdpe = ldl_phys(pdpe_addr);
f51589da
FB
903 if (!(pdpe & PG_PRESENT_MASK))
904 return -1;
905 } else
906#endif
907 {
908 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
909 env->a20_mask;
8df1cd07 910 pdpe = ldl_phys(pdpe_addr);
f51589da
FB
911 if (!(pdpe & PG_PRESENT_MASK))
912 return -1;
913 }
914
915 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
916 env->a20_mask;
8df1cd07 917 pde = ldl_phys(pde_addr);
f51589da 918 if (!(pde & PG_PRESENT_MASK)) {
10f0e412 919 return -1;
f51589da
FB
920 }
921 if (pde & PG_PSE_MASK) {
922 /* 2 MB page */
923 page_size = 2048 * 1024;
924 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
925 } else {
926 /* 4 KB page */
927 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
928 env->a20_mask;
929 page_size = 4096;
8df1cd07 930 pte = ldl_phys(pte_addr);
f51589da
FB
931 }
932 } else {
933 if (!(env->cr[0] & CR0_PG_MASK)) {
934 pte = addr;
935 page_size = 4096;
10f0e412
FB
936 } else {
937 /* page directory entry */
f51589da 938 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
8df1cd07 939 pde = ldl_phys(pde_addr);
f51589da 940 if (!(pde & PG_PRESENT_MASK))
10f0e412 941 return -1;
f51589da
FB
942 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
943 pte = pde & ~0x003ff000; /* align to 4MB */
944 page_size = 4096 * 1024;
945 } else {
946 /* page directory entry */
947 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
8df1cd07 948 pte = ldl_phys(pte_addr);
f51589da
FB
949 if (!(pte & PG_PRESENT_MASK))
950 return -1;
951 page_size = 4096;
952 }
10f0e412 953 }
f51589da 954 pte = pte & env->a20_mask;
10f0e412 955 }
f51589da 956
10f0e412
FB
957 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
958 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
959 return paddr;
960}
8df1cd07 961#endif /* !CONFIG_USER_ONLY */
9588b95a
FB
962
963#if defined(USE_CODE_COPY)
964struct fpstate {
965 uint16_t fpuc;
966 uint16_t dummy1;
967 uint16_t fpus;
968 uint16_t dummy2;
969 uint16_t fptag;
970 uint16_t dummy3;
971
972 uint32_t fpip;
973 uint32_t fpcs;
974 uint32_t fpoo;
975 uint32_t fpos;
976 uint8_t fpregs1[8 * 10];
977};
978
979void restore_native_fp_state(CPUState *env)
980{
981 int fptag, i, j;
982 struct fpstate fp1, *fp = &fp1;
983
984 fp->fpuc = env->fpuc;
985 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
986 fptag = 0;
987 for (i=7; i>=0; i--) {
988 fptag <<= 2;
989 if (env->fptags[i]) {
990 fptag |= 3;
991 } else {
992 /* the FPU automatically computes it */
993 }
994 }
995 fp->fptag = fptag;
996 j = env->fpstt;
997 for(i = 0;i < 8; i++) {
664e0f19 998 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
9588b95a
FB
999 j = (j + 1) & 7;
1000 }
1001 asm volatile ("frstor %0" : "=m" (*fp));
1002 env->native_fp_regs = 1;
1003}
1004
1005void save_native_fp_state(CPUState *env)
1006{
1007 int fptag, i, j;
1008 uint16_t fpuc;
1009 struct fpstate fp1, *fp = &fp1;
1010
1011 asm volatile ("fsave %0" : : "m" (*fp));
1012 env->fpuc = fp->fpuc;
1013 env->fpstt = (fp->fpus >> 11) & 7;
1014 env->fpus = fp->fpus & ~0x3800;
1015 fptag = fp->fptag;
1016 for(i = 0;i < 8; i++) {
1017 env->fptags[i] = ((fptag & 3) == 3);
1018 fptag >>= 2;
1019 }
1020 j = env->fpstt;
1021 for(i = 0;i < 8; i++) {
664e0f19 1022 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
9588b95a
FB
1023 j = (j + 1) & 7;
1024 }
1025 /* we must restore the default rounding state */
1026 /* XXX: we do not restore the exception state */
1027 fpuc = 0x037f | (env->fpuc & (3 << 10));
1028 asm volatile("fldcw %0" : : "m" (fpuc));
1029 env->native_fp_regs = 0;
1030}
1031#endif