]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/helper2.c
find -type f | xargs sed -i 's/[\t ]$//g' # on most files
[mirror_qemu.git] / target-i386 / helper2.c
CommitLineData
2c0262af
FB
1/*
2 * i386 helpers (without register variable usage)
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdarg.h>
21#include <stdlib.h>
22#include <stdio.h>
23#include <string.h>
24#include <inttypes.h>
25#include <signal.h>
26#include <assert.h>
2c0262af
FB
27
28#include "cpu.h"
29#include "exec-all.h"
30
31//#define DEBUG_MMU
32
0e4b179d
FB
33#ifdef USE_CODE_COPY
34#include <asm/ldt.h>
35#include <linux/unistd.h>
73bdea19 36#include <linux/version.h>
0e4b179d 37
83fcb515
FB
38int modify_ldt(int func, void *ptr, unsigned long bytecount)
39{
40 return syscall(__NR_modify_ldt, func, ptr, bytecount);
41}
73bdea19
FB
42
43#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
44#define modify_ldt_ldt_s user_desc
0e4b179d 45#endif
73bdea19 46#endif /* USE_CODE_COPY */
0e4b179d 47
2c0262af
FB
48CPUX86State *cpu_x86_init(void)
49{
50 CPUX86State *env;
2c0262af
FB
51 static int inited;
52
173d6cfe 53 env = qemu_mallocz(sizeof(CPUX86State));
2c0262af
FB
54 if (!env)
55 return NULL;
173d6cfe
FB
56 cpu_exec_init(env);
57
ffddfee3
FB
58 /* init various static tables */
59 if (!inited) {
60 inited = 1;
61 optimize_flags_init();
62 }
63#ifdef USE_CODE_COPY
64 /* testing code for code copy case */
65 {
66 struct modify_ldt_ldt_s ldt;
2c0262af 67
ffddfee3
FB
68 ldt.entry_number = 1;
69 ldt.base_addr = (unsigned long)env;
70 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
71 ldt.seg_32bit = 1;
72 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
73 ldt.read_exec_only = 0;
74 ldt.limit_in_pages = 1;
75 ldt.seg_not_present = 0;
76 ldt.useable = 1;
77 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
5fafdf24 78
ffddfee3 79 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
ffddfee3
FB
80 }
81#endif
14ce26e7
FB
82 {
83 int family, model, stepping;
84#ifdef TARGET_X86_64
85 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
86 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
87 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
88 family = 6;
89 model = 2;
90 stepping = 3;
91#else
92 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
93 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
94 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
95#if 0
96 /* pentium 75-200 */
97 family = 5;
98 model = 2;
99 stepping = 11;
100#else
101 /* pentium pro */
102 family = 6;
bf079a1e 103 model = 3;
14ce26e7
FB
104 stepping = 3;
105#endif
106#endif
a6f37988 107 env->cpuid_level = 2;
14ce26e7
FB
108 env->cpuid_version = (family << 8) | (model << 4) | stepping;
109 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
110 CPUID_TSC | CPUID_MSR | CPUID_MCE |
8f091a59
FB
111 CPUID_CX8 | CPUID_PGE | CPUID_CMOV |
112 CPUID_PAT);
113 env->pat = 0x0007040600070406ULL;
465e9838 114 env->cpuid_ext_features = CPUID_EXT_SSE3;
bf079a1e 115 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | CPUID_PAE | CPUID_SEP;
96b74a02 116 env->cpuid_features |= CPUID_APIC;
408e7837 117 env->cpuid_xlevel = 0x80000006;
a6f37988
FB
118 {
119 const char *model_id = "QEMU Virtual CPU version " QEMU_VERSION;
120 int c, len, i;
121 len = strlen(model_id);
122 for(i = 0; i < 48; i++) {
123 if (i >= len)
124 c = '\0';
125 else
126 c = model_id[i];
127 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
128 }
129 }
14ce26e7
FB
130#ifdef TARGET_X86_64
131 /* currently not enabled for std i386 because not fully tested */
a6f37988 132 env->cpuid_ext2_features = (env->cpuid_features & 0x0183F3FF);
4b4f782c 133 env->cpuid_ext2_features |= CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX;
a6f37988 134 env->cpuid_xlevel = 0x80000008;
8f091a59
FB
135
136 /* these features are needed for Win64 and aren't fully implemented */
137 env->cpuid_features |= CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA;
69c3bcb4
FB
138 /* this feature is needed for Solaris and isn't fully implemented */
139 env->cpuid_features |= CPUID_PSE36;
14ce26e7
FB
140#endif
141 }
ffddfee3 142 cpu_reset(env);
bf079a1e
FB
143#ifdef USE_KQEMU
144 kqemu_init(env);
145#endif
ffddfee3
FB
146 return env;
147}
148
149/* NOTE: must be called outside the CPU execute loop */
150void cpu_reset(CPUX86State *env)
151{
152 int i;
153
154 memset(env, 0, offsetof(CPUX86State, breakpoints));
1ac157da
FB
155
156 tlb_flush(env, 1);
ffddfee3 157
a2cce02c
TS
158 env->old_exception = -1;
159
ffddfee3
FB
160 /* init to reset state */
161
2c0262af
FB
162#ifdef CONFIG_SOFTMMU
163 env->hflags |= HF_SOFTMMU_MASK;
164#endif
1ac157da
FB
165
166 cpu_x86_update_cr0(env, 0x60000010);
167 env->a20_mask = 0xffffffff;
3b21e03e
FB
168 env->smbase = 0x30000;
169
1ac157da
FB
170 env->idt.limit = 0xffff;
171 env->gdt.limit = 0xffff;
172 env->ldt.limit = 0xffff;
173 env->ldt.flags = DESC_P_MASK;
174 env->tr.limit = 0xffff;
175 env->tr.flags = DESC_P_MASK;
5fafdf24
TS
176
177 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
14ce26e7
FB
178 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
179 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
180 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
181 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
182 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
5fafdf24 183
1ac157da
FB
184 env->eip = 0xfff0;
185 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
5fafdf24 186
1ac157da 187 env->eflags = 0x2;
5fafdf24 188
1ac157da
FB
189 /* FPU init */
190 for(i = 0;i < 8; i++)
191 env->fptags[i] = 1;
192 env->fpuc = 0x37f;
664e0f19
FB
193
194 env->mxcsr = 0x1f80;
2c0262af
FB
195}
196
197void cpu_x86_close(CPUX86State *env)
198{
199 free(env);
200}
201
202/***********************************************************/
203/* x86 debug */
204
205static const char *cc_op_str[] = {
206 "DYNAMIC",
207 "EFLAGS",
14ce26e7 208
b7f0f463
FB
209 "MULB",
210 "MULW",
211 "MULL",
14ce26e7
FB
212 "MULQ",
213
2c0262af
FB
214 "ADDB",
215 "ADDW",
216 "ADDL",
14ce26e7
FB
217 "ADDQ",
218
2c0262af
FB
219 "ADCB",
220 "ADCW",
221 "ADCL",
14ce26e7
FB
222 "ADCQ",
223
2c0262af
FB
224 "SUBB",
225 "SUBW",
226 "SUBL",
14ce26e7
FB
227 "SUBQ",
228
2c0262af
FB
229 "SBBB",
230 "SBBW",
231 "SBBL",
14ce26e7
FB
232 "SBBQ",
233
2c0262af
FB
234 "LOGICB",
235 "LOGICW",
236 "LOGICL",
14ce26e7
FB
237 "LOGICQ",
238
2c0262af
FB
239 "INCB",
240 "INCW",
241 "INCL",
14ce26e7
FB
242 "INCQ",
243
2c0262af
FB
244 "DECB",
245 "DECW",
246 "DECL",
14ce26e7
FB
247 "DECQ",
248
2c0262af
FB
249 "SHLB",
250 "SHLW",
251 "SHLL",
14ce26e7
FB
252 "SHLQ",
253
2c0262af
FB
254 "SARB",
255 "SARW",
256 "SARL",
14ce26e7 257 "SARQ",
2c0262af
FB
258};
259
5fafdf24 260void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
261 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
262 int flags)
2c0262af 263{
2157fa06 264 int eflags, i, nb;
2c0262af 265 char cc_op_name[32];
246d897f 266 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
2c0262af
FB
267
268 eflags = env->eflags;
14ce26e7
FB
269#ifdef TARGET_X86_64
270 if (env->hflags & HF_CS64_MASK) {
5fafdf24 271 cpu_fprintf(f,
26a76461
FB
272 "RAX=%016" PRIx64 " RBX=%016" PRIx64 " RCX=%016" PRIx64 " RDX=%016" PRIx64 "\n"
273 "RSI=%016" PRIx64 " RDI=%016" PRIx64 " RBP=%016" PRIx64 " RSP=%016" PRIx64 "\n"
274 "R8 =%016" PRIx64 " R9 =%016" PRIx64 " R10=%016" PRIx64 " R11=%016" PRIx64 "\n"
275 "R12=%016" PRIx64 " R13=%016" PRIx64 " R14=%016" PRIx64 " R15=%016" PRIx64 "\n"
3b21e03e 276 "RIP=%016" PRIx64 " RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
5fafdf24
TS
277 env->regs[R_EAX],
278 env->regs[R_EBX],
279 env->regs[R_ECX],
280 env->regs[R_EDX],
281 env->regs[R_ESI],
282 env->regs[R_EDI],
283 env->regs[R_EBP],
284 env->regs[R_ESP],
285 env->regs[8],
286 env->regs[9],
287 env->regs[10],
288 env->regs[11],
289 env->regs[12],
290 env->regs[13],
291 env->regs[14],
292 env->regs[15],
14ce26e7
FB
293 env->eip, eflags,
294 eflags & DF_MASK ? 'D' : '-',
295 eflags & CC_O ? 'O' : '-',
296 eflags & CC_S ? 'S' : '-',
297 eflags & CC_Z ? 'Z' : '-',
298 eflags & CC_A ? 'A' : '-',
299 eflags & CC_P ? 'P' : '-',
300 eflags & CC_C ? 'C' : '-',
5fafdf24 301 env->hflags & HF_CPL_MASK,
14ce26e7 302 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
d2ac63e0 303 (env->a20_mask >> 20) & 1,
3b21e03e 304 (env->hflags >> HF_SMM_SHIFT) & 1,
d2ac63e0 305 (env->hflags >> HF_HALTED_SHIFT) & 1);
5fafdf24 306 } else
14ce26e7
FB
307#endif
308 {
309 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
310 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
3b21e03e 311 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d SMM=%d HLT=%d\n",
5fafdf24
TS
312 (uint32_t)env->regs[R_EAX],
313 (uint32_t)env->regs[R_EBX],
314 (uint32_t)env->regs[R_ECX],
315 (uint32_t)env->regs[R_EDX],
316 (uint32_t)env->regs[R_ESI],
317 (uint32_t)env->regs[R_EDI],
318 (uint32_t)env->regs[R_EBP],
319 (uint32_t)env->regs[R_ESP],
14ce26e7
FB
320 (uint32_t)env->eip, eflags,
321 eflags & DF_MASK ? 'D' : '-',
322 eflags & CC_O ? 'O' : '-',
323 eflags & CC_S ? 'S' : '-',
324 eflags & CC_Z ? 'Z' : '-',
325 eflags & CC_A ? 'A' : '-',
326 eflags & CC_P ? 'P' : '-',
327 eflags & CC_C ? 'C' : '-',
5fafdf24 328 env->hflags & HF_CPL_MASK,
14ce26e7 329 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
d2ac63e0 330 (env->a20_mask >> 20) & 1,
3b21e03e 331 (env->hflags >> HF_SMM_SHIFT) & 1,
d2ac63e0 332 (env->hflags >> HF_HALTED_SHIFT) & 1);
14ce26e7
FB
333 }
334
335#ifdef TARGET_X86_64
336 if (env->hflags & HF_LMA_MASK) {
337 for(i = 0; i < 6; i++) {
338 SegmentCache *sc = &env->segs[i];
26a76461 339 cpu_fprintf(f, "%s =%04x %016" PRIx64 " %08x %08x\n",
14ce26e7
FB
340 seg_name[i],
341 sc->selector,
342 sc->base,
343 sc->limit,
344 sc->flags);
345 }
26a76461 346 cpu_fprintf(f, "LDT=%04x %016" PRIx64 " %08x %08x\n",
14ce26e7
FB
347 env->ldt.selector,
348 env->ldt.base,
349 env->ldt.limit,
350 env->ldt.flags);
26a76461 351 cpu_fprintf(f, "TR =%04x %016" PRIx64 " %08x %08x\n",
14ce26e7
FB
352 env->tr.selector,
353 env->tr.base,
354 env->tr.limit,
355 env->tr.flags);
26a76461 356 cpu_fprintf(f, "GDT= %016" PRIx64 " %08x\n",
14ce26e7 357 env->gdt.base, env->gdt.limit);
26a76461 358 cpu_fprintf(f, "IDT= %016" PRIx64 " %08x\n",
14ce26e7 359 env->idt.base, env->idt.limit);
26a76461 360 cpu_fprintf(f, "CR0=%08x CR2=%016" PRIx64 " CR3=%016" PRIx64 " CR4=%08x\n",
5fafdf24
TS
361 (uint32_t)env->cr[0],
362 env->cr[2],
363 env->cr[3],
14ce26e7
FB
364 (uint32_t)env->cr[4]);
365 } else
366#endif
367 {
368 for(i = 0; i < 6; i++) {
369 SegmentCache *sc = &env->segs[i];
370 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
371 seg_name[i],
372 sc->selector,
373 (uint32_t)sc->base,
374 sc->limit,
375 sc->flags);
376 }
377 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
378 env->ldt.selector,
379 (uint32_t)env->ldt.base,
380 env->ldt.limit,
381 env->ldt.flags);
382 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
383 env->tr.selector,
384 (uint32_t)env->tr.base,
385 env->tr.limit,
386 env->tr.flags);
387 cpu_fprintf(f, "GDT= %08x %08x\n",
388 (uint32_t)env->gdt.base, env->gdt.limit);
389 cpu_fprintf(f, "IDT= %08x %08x\n",
390 (uint32_t)env->idt.base, env->idt.limit);
391 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
5fafdf24
TS
392 (uint32_t)env->cr[0],
393 (uint32_t)env->cr[2],
394 (uint32_t)env->cr[3],
14ce26e7 395 (uint32_t)env->cr[4]);
246d897f 396 }
2c0262af
FB
397 if (flags & X86_DUMP_CCOP) {
398 if ((unsigned)env->cc_op < CC_OP_NB)
eba2af63 399 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
2c0262af
FB
400 else
401 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
14ce26e7
FB
402#ifdef TARGET_X86_64
403 if (env->hflags & HF_CS64_MASK) {
26a76461 404 cpu_fprintf(f, "CCS=%016" PRIx64 " CCD=%016" PRIx64 " CCO=%-8s\n",
5fafdf24 405 env->cc_src, env->cc_dst,
14ce26e7 406 cc_op_name);
5fafdf24 407 } else
14ce26e7
FB
408#endif
409 {
410 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
5fafdf24 411 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
14ce26e7
FB
412 cc_op_name);
413 }
2c0262af
FB
414 }
415 if (flags & X86_DUMP_FPU) {
2157fa06
FB
416 int fptag;
417 fptag = 0;
418 for(i = 0; i < 8; i++) {
419 fptag |= ((!env->fptags[i]) << i);
420 }
421 cpu_fprintf(f, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
422 env->fpuc,
423 (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11,
424 env->fpstt,
425 fptag,
426 env->mxcsr);
427 for(i=0;i<8;i++) {
428#if defined(USE_X86LDOUBLE)
429 union {
430 long double d;
431 struct {
432 uint64_t lower;
433 uint16_t upper;
434 } l;
435 } tmp;
436 tmp.d = env->fpregs[i].d;
26a76461 437 cpu_fprintf(f, "FPR%d=%016" PRIx64 " %04x",
2157fa06
FB
438 i, tmp.l.lower, tmp.l.upper);
439#else
26a76461 440 cpu_fprintf(f, "FPR%d=%016" PRIx64,
2157fa06
FB
441 i, env->fpregs[i].mmx.q);
442#endif
443 if ((i & 1) == 1)
444 cpu_fprintf(f, "\n");
445 else
446 cpu_fprintf(f, " ");
447 }
5fafdf24 448 if (env->hflags & HF_CS64_MASK)
2157fa06
FB
449 nb = 16;
450 else
451 nb = 8;
452 for(i=0;i<nb;i++) {
453 cpu_fprintf(f, "XMM%02d=%08x%08x%08x%08x",
5fafdf24 454 i,
2157fa06
FB
455 env->xmm_regs[i].XMM_L(3),
456 env->xmm_regs[i].XMM_L(2),
457 env->xmm_regs[i].XMM_L(1),
458 env->xmm_regs[i].XMM_L(0));
459 if ((i & 1) == 1)
460 cpu_fprintf(f, "\n");
461 else
462 cpu_fprintf(f, " ");
463 }
2c0262af
FB
464 }
465}
466
467/***********************************************************/
468/* x86 mmu */
469/* XXX: add PGE support */
470
461c0471
FB
471void cpu_x86_set_a20(CPUX86State *env, int a20_state)
472{
473 a20_state = (a20_state != 0);
1ac157da 474 if (a20_state != ((env->a20_mask >> 20) & 1)) {
b7f0f463
FB
475#if defined(DEBUG_MMU)
476 printf("A20 update: a20=%d\n", a20_state);
477#endif
6bb70571
FB
478 /* if the cpu is currently executing code, we must unlink it and
479 all the potentially executing TB */
0e4b179d 480 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
6bb70571 481
461c0471
FB
482 /* when a20 is changed, all the MMU mappings are invalid, so
483 we must flush everything */
1ac157da
FB
484 tlb_flush(env, 1);
485 env->a20_mask = 0xffefffff | (a20_state << 20);
461c0471
FB
486 }
487}
488
1ac157da 489void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
2c0262af 490{
1ac157da 491 int pe_state;
2c0262af 492
b7f0f463 493#if defined(DEBUG_MMU)
1ac157da 494 printf("CR0 update: CR0=0x%08x\n", new_cr0);
2c0262af 495#endif
1ac157da
FB
496 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
497 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
498 tlb_flush(env, 1);
2c0262af 499 }
14ce26e7
FB
500
501#ifdef TARGET_X86_64
502 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
503 (env->efer & MSR_EFER_LME)) {
504 /* enter in long mode */
505 /* XXX: generate an exception */
506 if (!(env->cr[4] & CR4_PAE_MASK))
507 return;
508 env->efer |= MSR_EFER_LMA;
509 env->hflags |= HF_LMA_MASK;
510 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
511 (env->efer & MSR_EFER_LMA)) {
512 /* exit long mode */
513 env->efer &= ~MSR_EFER_LMA;
514 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
515 env->eip &= 0xffffffff;
516 }
517#endif
28c3ee3f 518 env->cr[0] = new_cr0 | CR0_ET_MASK;
5fafdf24 519
436d8b89
FB
520 /* update PE flag in hidden flags */
521 pe_state = (env->cr[0] & CR0_PE_MASK);
522 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
523 /* ensure that ADDSEG is always set in real mode */
524 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
9588b95a
FB
525 /* update FPU flags */
526 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
527 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
2c0262af
FB
528}
529
bf079a1e
FB
530/* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
531 the PDPT */
14ce26e7 532void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
2c0262af 533{
1ac157da 534 env->cr[3] = new_cr3;
2c0262af
FB
535 if (env->cr[0] & CR0_PG_MASK) {
536#if defined(DEBUG_MMU)
14ce26e7 537 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
2c0262af 538#endif
1ac157da 539 tlb_flush(env, 0);
2c0262af
FB
540 }
541}
542
1ac157da 543void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
2c0262af 544{
1ac157da 545#if defined(DEBUG_MMU)
14ce26e7 546 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
1ac157da
FB
547#endif
548 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
549 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
550 tlb_flush(env, 1);
551 }
664e0f19
FB
552 /* SSE handling */
553 if (!(env->cpuid_features & CPUID_SSE))
554 new_cr4 &= ~CR4_OSFXSR_MASK;
555 if (new_cr4 & CR4_OSFXSR_MASK)
556 env->hflags |= HF_OSFXSR_MASK;
557 else
558 env->hflags &= ~HF_OSFXSR_MASK;
559
1ac157da 560 env->cr[4] = new_cr4;
2c0262af
FB
561}
562
563/* XXX: also flush 4MB pages */
8f091a59 564void cpu_x86_flush_tlb(CPUX86State *env, target_ulong addr)
2c0262af 565{
2c0262af 566 tlb_flush_page(env, addr);
2c0262af
FB
567}
568
5fafdf24 569#if defined(CONFIG_USER_ONLY)
14ce26e7 570
5fafdf24 571int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
8df1cd07 572 int is_write, int is_user, int is_softmmu)
14ce26e7 573{
8df1cd07
FB
574 /* user mode only emulation */
575 is_write &= 1;
576 env->cr[2] = addr;
577 env->error_code = (is_write << PG_ERROR_W_BIT);
578 env->error_code |= PG_ERROR_U_MASK;
54ca9095 579 env->exception_index = EXCP0E_PAGE;
8df1cd07 580 return 1;
14ce26e7
FB
581}
582
9b3c35e0 583target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
14ce26e7 584{
8df1cd07 585 return addr;
14ce26e7
FB
586}
587
8df1cd07
FB
588#else
589
4b4f782c
FB
590#define PHYS_ADDR_MASK 0xfffff000
591
2c0262af 592/* return value:
5fafdf24
TS
593 -1 = cannot handle fault
594 0 = nothing more to do
2c0262af
FB
595 1 = generate PF fault
596 2 = soft MMU activation required for this block
597*/
5fafdf24 598int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
4b4f782c 599 int is_write1, int is_user, int is_softmmu)
2c0262af 600{
4b4f782c 601 uint64_t ptep, pte;
14ce26e7 602 uint32_t pdpe_addr, pde_addr, pte_addr;
4b4f782c 603 int error_code, is_dirty, prot, page_size, ret, is_write;
14ce26e7
FB
604 unsigned long paddr, page_offset;
605 target_ulong vaddr, virt_addr;
5fafdf24 606
436d8b89 607#if defined(DEBUG_MMU)
5fafdf24 608 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
4b4f782c 609 addr, is_write1, is_user, env->eip);
2c0262af 610#endif
4b4f782c 611 is_write = is_write1 & 1;
5fafdf24 612
2c0262af
FB
613 if (!(env->cr[0] & CR0_PG_MASK)) {
614 pte = addr;
461c0471 615 virt_addr = addr & TARGET_PAGE_MASK;
4b4f782c 616 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
2c0262af
FB
617 page_size = 4096;
618 goto do_mapping;
619 }
620
14ce26e7 621 if (env->cr[4] & CR4_PAE_MASK) {
4b4f782c
FB
622 uint64_t pde, pdpe;
623
14ce26e7
FB
624 /* XXX: we only use 32 bit physical addresses */
625#ifdef TARGET_X86_64
626 if (env->hflags & HF_LMA_MASK) {
4b4f782c
FB
627 uint32_t pml4e_addr;
628 uint64_t pml4e;
14ce26e7
FB
629 int32_t sext;
630
14ce26e7
FB
631 /* test virtual address sign extension */
632 sext = (int64_t)addr >> 47;
633 if (sext != 0 && sext != -1) {
54ca9095
FB
634 env->error_code = 0;
635 env->exception_index = EXCP0D_GPF;
636 return 1;
14ce26e7 637 }
5fafdf24
TS
638
639 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
14ce26e7 640 env->a20_mask;
4b4f782c 641 pml4e = ldq_phys(pml4e_addr);
14ce26e7
FB
642 if (!(pml4e & PG_PRESENT_MASK)) {
643 error_code = 0;
644 goto do_fault;
645 }
4b4f782c
FB
646 if (!(env->efer & MSR_EFER_NXE) && (pml4e & PG_NX_MASK)) {
647 error_code = PG_ERROR_RSVD_MASK;
648 goto do_fault;
649 }
14ce26e7
FB
650 if (!(pml4e & PG_ACCESSED_MASK)) {
651 pml4e |= PG_ACCESSED_MASK;
8df1cd07 652 stl_phys_notdirty(pml4e_addr, pml4e);
14ce26e7 653 }
4b4f782c 654 ptep = pml4e ^ PG_NX_MASK;
5fafdf24 655 pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) &
14ce26e7 656 env->a20_mask;
4b4f782c 657 pdpe = ldq_phys(pdpe_addr);
14ce26e7
FB
658 if (!(pdpe & PG_PRESENT_MASK)) {
659 error_code = 0;
660 goto do_fault;
661 }
4b4f782c
FB
662 if (!(env->efer & MSR_EFER_NXE) && (pdpe & PG_NX_MASK)) {
663 error_code = PG_ERROR_RSVD_MASK;
664 goto do_fault;
665 }
666 ptep &= pdpe ^ PG_NX_MASK;
14ce26e7
FB
667 if (!(pdpe & PG_ACCESSED_MASK)) {
668 pdpe |= PG_ACCESSED_MASK;
8df1cd07 669 stl_phys_notdirty(pdpe_addr, pdpe);
14ce26e7 670 }
4b4f782c 671 } else
14ce26e7
FB
672#endif
673 {
4b4f782c 674 /* XXX: load them when cr3 is loaded ? */
5fafdf24 675 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
14ce26e7 676 env->a20_mask;
4b4f782c 677 pdpe = ldq_phys(pdpe_addr);
14ce26e7
FB
678 if (!(pdpe & PG_PRESENT_MASK)) {
679 error_code = 0;
680 goto do_fault;
681 }
4b4f782c 682 ptep = PG_NX_MASK | PG_USER_MASK | PG_RW_MASK;
10f0e412 683 }
14ce26e7 684
4b4f782c 685 pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) &
14ce26e7 686 env->a20_mask;
4b4f782c 687 pde = ldq_phys(pde_addr);
14ce26e7
FB
688 if (!(pde & PG_PRESENT_MASK)) {
689 error_code = 0;
690 goto do_fault;
2c0262af 691 }
4b4f782c
FB
692 if (!(env->efer & MSR_EFER_NXE) && (pde & PG_NX_MASK)) {
693 error_code = PG_ERROR_RSVD_MASK;
694 goto do_fault;
695 }
696 ptep &= pde ^ PG_NX_MASK;
14ce26e7
FB
697 if (pde & PG_PSE_MASK) {
698 /* 2 MB page */
699 page_size = 2048 * 1024;
4b4f782c
FB
700 ptep ^= PG_NX_MASK;
701 if ((ptep & PG_NX_MASK) && is_write1 == 2)
702 goto do_fault_protect;
703 if (is_user) {
704 if (!(ptep & PG_USER_MASK))
705 goto do_fault_protect;
706 if (is_write && !(ptep & PG_RW_MASK))
707 goto do_fault_protect;
708 } else {
5fafdf24
TS
709 if ((env->cr[0] & CR0_WP_MASK) &&
710 is_write && !(ptep & PG_RW_MASK))
4b4f782c
FB
711 goto do_fault_protect;
712 }
713 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
714 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
715 pde |= PG_ACCESSED_MASK;
716 if (is_dirty)
717 pde |= PG_DIRTY_MASK;
718 stl_phys_notdirty(pde_addr, pde);
719 }
720 /* align to page_size */
5fafdf24 721 pte = pde & ((PHYS_ADDR_MASK & ~(page_size - 1)) | 0xfff);
4b4f782c 722 virt_addr = addr & ~(page_size - 1);
14ce26e7
FB
723 } else {
724 /* 4 KB page */
725 if (!(pde & PG_ACCESSED_MASK)) {
726 pde |= PG_ACCESSED_MASK;
8df1cd07 727 stl_phys_notdirty(pde_addr, pde);
14ce26e7 728 }
4b4f782c 729 pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) &
14ce26e7 730 env->a20_mask;
4b4f782c
FB
731 pte = ldq_phys(pte_addr);
732 if (!(pte & PG_PRESENT_MASK)) {
733 error_code = 0;
734 goto do_fault;
735 }
736 if (!(env->efer & MSR_EFER_NXE) && (pte & PG_NX_MASK)) {
737 error_code = PG_ERROR_RSVD_MASK;
738 goto do_fault;
739 }
740 /* combine pde and pte nx, user and rw protections */
741 ptep &= pte ^ PG_NX_MASK;
742 ptep ^= PG_NX_MASK;
743 if ((ptep & PG_NX_MASK) && is_write1 == 2)
5fafdf24 744 goto do_fault_protect;
4b4f782c
FB
745 if (is_user) {
746 if (!(ptep & PG_USER_MASK))
747 goto do_fault_protect;
748 if (is_write && !(ptep & PG_RW_MASK))
749 goto do_fault_protect;
750 } else {
751 if ((env->cr[0] & CR0_WP_MASK) &&
5fafdf24 752 is_write && !(ptep & PG_RW_MASK))
4b4f782c
FB
753 goto do_fault_protect;
754 }
755 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
756 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
757 pte |= PG_ACCESSED_MASK;
758 if (is_dirty)
759 pte |= PG_DIRTY_MASK;
760 stl_phys_notdirty(pte_addr, pte);
761 }
762 page_size = 4096;
763 virt_addr = addr & ~0xfff;
764 pte = pte & (PHYS_ADDR_MASK | 0xfff);
2c0262af 765 }
14ce26e7 766 } else {
4b4f782c
FB
767 uint32_t pde;
768
2c0262af 769 /* page directory entry */
5fafdf24 770 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) &
14ce26e7 771 env->a20_mask;
8df1cd07 772 pde = ldl_phys(pde_addr);
14ce26e7 773 if (!(pde & PG_PRESENT_MASK)) {
2c0262af
FB
774 error_code = 0;
775 goto do_fault;
776 }
14ce26e7
FB
777 /* if PSE bit is set, then we use a 4MB page */
778 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
779 page_size = 4096 * 1024;
14ce26e7
FB
780 if (is_user) {
781 if (!(pde & PG_USER_MASK))
782 goto do_fault_protect;
783 if (is_write && !(pde & PG_RW_MASK))
784 goto do_fault_protect;
785 } else {
5fafdf24
TS
786 if ((env->cr[0] & CR0_WP_MASK) &&
787 is_write && !(pde & PG_RW_MASK))
14ce26e7
FB
788 goto do_fault_protect;
789 }
790 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
791 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
792 pde |= PG_ACCESSED_MASK;
793 if (is_dirty)
794 pde |= PG_DIRTY_MASK;
8df1cd07 795 stl_phys_notdirty(pde_addr, pde);
14ce26e7 796 }
5fafdf24 797
14ce26e7
FB
798 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
799 ptep = pte;
800 virt_addr = addr & ~(page_size - 1);
2c0262af 801 } else {
14ce26e7
FB
802 if (!(pde & PG_ACCESSED_MASK)) {
803 pde |= PG_ACCESSED_MASK;
8df1cd07 804 stl_phys_notdirty(pde_addr, pde);
14ce26e7
FB
805 }
806
807 /* page directory entry */
5fafdf24 808 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
14ce26e7 809 env->a20_mask;
8df1cd07 810 pte = ldl_phys(pte_addr);
14ce26e7
FB
811 if (!(pte & PG_PRESENT_MASK)) {
812 error_code = 0;
813 goto do_fault;
814 }
815 /* combine pde and pte user and rw protections */
816 ptep = pte & pde;
817 if (is_user) {
818 if (!(ptep & PG_USER_MASK))
819 goto do_fault_protect;
820 if (is_write && !(ptep & PG_RW_MASK))
821 goto do_fault_protect;
822 } else {
823 if ((env->cr[0] & CR0_WP_MASK) &&
5fafdf24 824 is_write && !(ptep & PG_RW_MASK))
14ce26e7
FB
825 goto do_fault_protect;
826 }
827 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
828 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
829 pte |= PG_ACCESSED_MASK;
830 if (is_dirty)
831 pte |= PG_DIRTY_MASK;
8df1cd07 832 stl_phys_notdirty(pte_addr, pte);
14ce26e7
FB
833 }
834 page_size = 4096;
835 virt_addr = addr & ~0xfff;
2c0262af 836 }
4b4f782c
FB
837 }
838 /* the page can be put in the TLB */
839 prot = PAGE_READ;
840 if (!(ptep & PG_NX_MASK))
841 prot |= PAGE_EXEC;
842 if (pte & PG_DIRTY_MASK) {
843 /* only set write access if already dirty... otherwise wait
844 for dirty access */
845 if (is_user) {
846 if (ptep & PG_RW_MASK)
847 prot |= PAGE_WRITE;
848 } else {
849 if (!(env->cr[0] & CR0_WP_MASK) ||
850 (ptep & PG_RW_MASK))
851 prot |= PAGE_WRITE;
c8135d9a 852 }
2c0262af 853 }
2c0262af 854 do_mapping:
1ac157da 855 pte = pte & env->a20_mask;
2c0262af 856
436d8b89
FB
857 /* Even if 4MB pages, we map only one 4KB page in the cache to
858 avoid filling it too fast */
859 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
860 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
861 vaddr = virt_addr + page_offset;
5fafdf24 862
4b4f782c 863 ret = tlb_set_page_exec(env, vaddr, paddr, prot, is_user, is_softmmu);
2c0262af
FB
864 return ret;
865 do_fault_protect:
866 error_code = PG_ERROR_P_MASK;
867 do_fault:
868 env->cr[2] = addr;
4b4f782c 869 error_code |= (is_write << PG_ERROR_W_BIT);
2c0262af 870 if (is_user)
4b4f782c 871 error_code |= PG_ERROR_U_MASK;
5fafdf24
TS
872 if (is_write1 == 2 &&
873 (env->efer & MSR_EFER_NXE) &&
4b4f782c
FB
874 (env->cr[4] & CR4_PAE_MASK))
875 error_code |= PG_ERROR_I_D_MASK;
876 env->error_code = error_code;
54ca9095 877 env->exception_index = EXCP0E_PAGE;
2c0262af
FB
878 return 1;
879}
10f0e412 880
9b3c35e0 881target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
10f0e412 882{
f51589da 883 uint32_t pde_addr, pte_addr;
10f0e412
FB
884 uint32_t pde, pte, paddr, page_offset, page_size;
885
f51589da
FB
886 if (env->cr[4] & CR4_PAE_MASK) {
887 uint32_t pdpe_addr, pde_addr, pte_addr;
888 uint32_t pdpe;
889
890 /* XXX: we only use 32 bit physical addresses */
891#ifdef TARGET_X86_64
892 if (env->hflags & HF_LMA_MASK) {
893 uint32_t pml4e_addr, pml4e;
894 int32_t sext;
895
896 /* test virtual address sign extension */
897 sext = (int64_t)addr >> 47;
898 if (sext != 0 && sext != -1)
899 return -1;
5fafdf24
TS
900
901 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
f51589da 902 env->a20_mask;
8df1cd07 903 pml4e = ldl_phys(pml4e_addr);
f51589da
FB
904 if (!(pml4e & PG_PRESENT_MASK))
905 return -1;
5fafdf24
TS
906
907 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
f51589da 908 env->a20_mask;
8df1cd07 909 pdpe = ldl_phys(pdpe_addr);
f51589da
FB
910 if (!(pdpe & PG_PRESENT_MASK))
911 return -1;
5fafdf24 912 } else
f51589da
FB
913#endif
914 {
5fafdf24 915 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) &
f51589da 916 env->a20_mask;
8df1cd07 917 pdpe = ldl_phys(pdpe_addr);
f51589da
FB
918 if (!(pdpe & PG_PRESENT_MASK))
919 return -1;
920 }
921
922 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
923 env->a20_mask;
8df1cd07 924 pde = ldl_phys(pde_addr);
f51589da 925 if (!(pde & PG_PRESENT_MASK)) {
10f0e412 926 return -1;
f51589da
FB
927 }
928 if (pde & PG_PSE_MASK) {
929 /* 2 MB page */
930 page_size = 2048 * 1024;
931 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
932 } else {
933 /* 4 KB page */
934 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
935 env->a20_mask;
936 page_size = 4096;
8df1cd07 937 pte = ldl_phys(pte_addr);
f51589da
FB
938 }
939 } else {
940 if (!(env->cr[0] & CR0_PG_MASK)) {
941 pte = addr;
942 page_size = 4096;
10f0e412
FB
943 } else {
944 /* page directory entry */
af661ad1 945 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & 0xffc)) & env->a20_mask;
8df1cd07 946 pde = ldl_phys(pde_addr);
5fafdf24 947 if (!(pde & PG_PRESENT_MASK))
10f0e412 948 return -1;
f51589da
FB
949 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
950 pte = pde & ~0x003ff000; /* align to 4MB */
951 page_size = 4096 * 1024;
952 } else {
953 /* page directory entry */
954 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
8df1cd07 955 pte = ldl_phys(pte_addr);
f51589da
FB
956 if (!(pte & PG_PRESENT_MASK))
957 return -1;
958 page_size = 4096;
959 }
10f0e412 960 }
f51589da 961 pte = pte & env->a20_mask;
10f0e412 962 }
f51589da 963
10f0e412
FB
964 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
965 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
966 return paddr;
967}
8df1cd07 968#endif /* !CONFIG_USER_ONLY */
9588b95a
FB
969
970#if defined(USE_CODE_COPY)
971struct fpstate {
972 uint16_t fpuc;
973 uint16_t dummy1;
974 uint16_t fpus;
975 uint16_t dummy2;
976 uint16_t fptag;
977 uint16_t dummy3;
978
979 uint32_t fpip;
980 uint32_t fpcs;
981 uint32_t fpoo;
982 uint32_t fpos;
983 uint8_t fpregs1[8 * 10];
984};
985
986void restore_native_fp_state(CPUState *env)
987{
988 int fptag, i, j;
989 struct fpstate fp1, *fp = &fp1;
5fafdf24 990
9588b95a
FB
991 fp->fpuc = env->fpuc;
992 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
993 fptag = 0;
994 for (i=7; i>=0; i--) {
995 fptag <<= 2;
996 if (env->fptags[i]) {
997 fptag |= 3;
998 } else {
999 /* the FPU automatically computes it */
1000 }
1001 }
1002 fp->fptag = fptag;
1003 j = env->fpstt;
1004 for(i = 0;i < 8; i++) {
664e0f19 1005 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
9588b95a
FB
1006 j = (j + 1) & 7;
1007 }
1008 asm volatile ("frstor %0" : "=m" (*fp));
1009 env->native_fp_regs = 1;
1010}
5fafdf24 1011
9588b95a
FB
1012void save_native_fp_state(CPUState *env)
1013{
1014 int fptag, i, j;
1015 uint16_t fpuc;
1016 struct fpstate fp1, *fp = &fp1;
1017
1018 asm volatile ("fsave %0" : : "m" (*fp));
1019 env->fpuc = fp->fpuc;
1020 env->fpstt = (fp->fpus >> 11) & 7;
1021 env->fpus = fp->fpus & ~0x3800;
1022 fptag = fp->fptag;
1023 for(i = 0;i < 8; i++) {
1024 env->fptags[i] = ((fptag & 3) == 3);
1025 fptag >>= 2;
1026 }
1027 j = env->fpstt;
1028 for(i = 0;i < 8; i++) {
664e0f19 1029 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
9588b95a
FB
1030 j = (j + 1) & 7;
1031 }
1032 /* we must restore the default rounding state */
1033 /* XXX: we do not restore the exception state */
1034 fpuc = 0x037f | (env->fpuc & (3 << 10));
1035 asm volatile("fldcw %0" : : "m" (fpuc));
1036 env->native_fp_regs = 0;
1037}
1038#endif