]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/helper2.c
physical memory access functions
[mirror_qemu.git] / target-i386 / helper2.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27
28 #include "cpu.h"
29 #include "exec-all.h"
30
31 //#define DEBUG_MMU
32
33 #ifdef USE_CODE_COPY
34 #include <asm/ldt.h>
35 #include <linux/unistd.h>
36 #include <linux/version.h>
37
38 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41 #define modify_ldt_ldt_s user_desc
42 #endif
43 #endif /* USE_CODE_COPY */
44
45 CPUX86State *cpu_x86_init(void)
46 {
47 CPUX86State *env;
48 static int inited;
49
50 cpu_exec_init();
51
52 env = malloc(sizeof(CPUX86State));
53 if (!env)
54 return NULL;
55 memset(env, 0, sizeof(CPUX86State));
56 /* init various static tables */
57 if (!inited) {
58 inited = 1;
59 optimize_flags_init();
60 }
61 #ifdef USE_CODE_COPY
62 /* testing code for code copy case */
63 {
64 struct modify_ldt_ldt_s ldt;
65
66 ldt.entry_number = 1;
67 ldt.base_addr = (unsigned long)env;
68 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
69 ldt.seg_32bit = 1;
70 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
71 ldt.read_exec_only = 0;
72 ldt.limit_in_pages = 1;
73 ldt.seg_not_present = 0;
74 ldt.useable = 1;
75 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
76
77 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
78 }
79 #endif
80 {
81 int family, model, stepping;
82 #ifdef TARGET_X86_64
83 env->cpuid_vendor1 = 0x68747541; /* "Auth" */
84 env->cpuid_vendor2 = 0x69746e65; /* "enti" */
85 env->cpuid_vendor3 = 0x444d4163; /* "cAMD" */
86 family = 6;
87 model = 2;
88 stepping = 3;
89 #else
90 env->cpuid_vendor1 = 0x756e6547; /* "Genu" */
91 env->cpuid_vendor2 = 0x49656e69; /* "ineI" */
92 env->cpuid_vendor3 = 0x6c65746e; /* "ntel" */
93 #if 0
94 /* pentium 75-200 */
95 family = 5;
96 model = 2;
97 stepping = 11;
98 #else
99 /* pentium pro */
100 family = 6;
101 model = 1;
102 stepping = 3;
103 #endif
104 #endif
105 env->cpuid_version = (family << 8) | (model << 4) | stepping;
106 env->cpuid_features = (CPUID_FP87 | CPUID_DE | CPUID_PSE |
107 CPUID_TSC | CPUID_MSR | CPUID_MCE |
108 CPUID_CX8 | CPUID_PGE | CPUID_CMOV);
109 #ifdef TARGET_X86_64
110 /* currently not enabled for std i386 because not fully tested */
111 env->cpuid_features |= CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2;
112 env->cpuid_features |= CPUID_APIC | CPUID_PAE;
113 #endif
114 }
115 cpu_single_env = env;
116 cpu_reset(env);
117 return env;
118 }
119
120 /* NOTE: must be called outside the CPU execute loop */
121 void cpu_reset(CPUX86State *env)
122 {
123 int i;
124
125 memset(env, 0, offsetof(CPUX86State, breakpoints));
126
127 tlb_flush(env, 1);
128
129 /* init to reset state */
130
131 #ifdef CONFIG_SOFTMMU
132 env->hflags |= HF_SOFTMMU_MASK;
133 #endif
134
135 cpu_x86_update_cr0(env, 0x60000010);
136 env->a20_mask = 0xffffffff;
137
138 env->idt.limit = 0xffff;
139 env->gdt.limit = 0xffff;
140 env->ldt.limit = 0xffff;
141 env->ldt.flags = DESC_P_MASK;
142 env->tr.limit = 0xffff;
143 env->tr.flags = DESC_P_MASK;
144
145 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff, 0);
146 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff, 0);
147 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff, 0);
148 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff, 0);
149 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff, 0);
150 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff, 0);
151
152 env->eip = 0xfff0;
153 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
154
155 env->eflags = 0x2;
156
157 /* FPU init */
158 for(i = 0;i < 8; i++)
159 env->fptags[i] = 1;
160 env->fpuc = 0x37f;
161
162 env->mxcsr = 0x1f80;
163 }
164
165 void cpu_x86_close(CPUX86State *env)
166 {
167 free(env);
168 }
169
170 /***********************************************************/
171 /* x86 debug */
172
173 static const char *cc_op_str[] = {
174 "DYNAMIC",
175 "EFLAGS",
176
177 "MULB",
178 "MULW",
179 "MULL",
180 "MULQ",
181
182 "ADDB",
183 "ADDW",
184 "ADDL",
185 "ADDQ",
186
187 "ADCB",
188 "ADCW",
189 "ADCL",
190 "ADCQ",
191
192 "SUBB",
193 "SUBW",
194 "SUBL",
195 "SUBQ",
196
197 "SBBB",
198 "SBBW",
199 "SBBL",
200 "SBBQ",
201
202 "LOGICB",
203 "LOGICW",
204 "LOGICL",
205 "LOGICQ",
206
207 "INCB",
208 "INCW",
209 "INCL",
210 "INCQ",
211
212 "DECB",
213 "DECW",
214 "DECL",
215 "DECQ",
216
217 "SHLB",
218 "SHLW",
219 "SHLL",
220 "SHLQ",
221
222 "SARB",
223 "SARW",
224 "SARL",
225 "SARQ",
226 };
227
228 void cpu_dump_state(CPUState *env, FILE *f,
229 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
230 int flags)
231 {
232 int eflags, i;
233 char cc_op_name[32];
234 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
235
236 eflags = env->eflags;
237 #ifdef TARGET_X86_64
238 if (env->hflags & HF_CS64_MASK) {
239 cpu_fprintf(f,
240 "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
241 "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
242 "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
243 "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
244 "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
245 env->regs[R_EAX],
246 env->regs[R_EBX],
247 env->regs[R_ECX],
248 env->regs[R_EDX],
249 env->regs[R_ESI],
250 env->regs[R_EDI],
251 env->regs[R_EBP],
252 env->regs[R_ESP],
253 env->regs[8],
254 env->regs[9],
255 env->regs[10],
256 env->regs[11],
257 env->regs[12],
258 env->regs[13],
259 env->regs[14],
260 env->regs[15],
261 env->eip, eflags,
262 eflags & DF_MASK ? 'D' : '-',
263 eflags & CC_O ? 'O' : '-',
264 eflags & CC_S ? 'S' : '-',
265 eflags & CC_Z ? 'Z' : '-',
266 eflags & CC_A ? 'A' : '-',
267 eflags & CC_P ? 'P' : '-',
268 eflags & CC_C ? 'C' : '-',
269 env->hflags & HF_CPL_MASK,
270 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
271 (env->a20_mask >> 20) & 1);
272 } else
273 #endif
274 {
275 cpu_fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
276 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
277 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
278 (uint32_t)env->regs[R_EAX],
279 (uint32_t)env->regs[R_EBX],
280 (uint32_t)env->regs[R_ECX],
281 (uint32_t)env->regs[R_EDX],
282 (uint32_t)env->regs[R_ESI],
283 (uint32_t)env->regs[R_EDI],
284 (uint32_t)env->regs[R_EBP],
285 (uint32_t)env->regs[R_ESP],
286 (uint32_t)env->eip, eflags,
287 eflags & DF_MASK ? 'D' : '-',
288 eflags & CC_O ? 'O' : '-',
289 eflags & CC_S ? 'S' : '-',
290 eflags & CC_Z ? 'Z' : '-',
291 eflags & CC_A ? 'A' : '-',
292 eflags & CC_P ? 'P' : '-',
293 eflags & CC_C ? 'C' : '-',
294 env->hflags & HF_CPL_MASK,
295 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1,
296 (env->a20_mask >> 20) & 1);
297 }
298
299 #ifdef TARGET_X86_64
300 if (env->hflags & HF_LMA_MASK) {
301 for(i = 0; i < 6; i++) {
302 SegmentCache *sc = &env->segs[i];
303 cpu_fprintf(f, "%s =%04x %016llx %08x %08x\n",
304 seg_name[i],
305 sc->selector,
306 sc->base,
307 sc->limit,
308 sc->flags);
309 }
310 cpu_fprintf(f, "LDT=%04x %016llx %08x %08x\n",
311 env->ldt.selector,
312 env->ldt.base,
313 env->ldt.limit,
314 env->ldt.flags);
315 cpu_fprintf(f, "TR =%04x %016llx %08x %08x\n",
316 env->tr.selector,
317 env->tr.base,
318 env->tr.limit,
319 env->tr.flags);
320 cpu_fprintf(f, "GDT= %016llx %08x\n",
321 env->gdt.base, env->gdt.limit);
322 cpu_fprintf(f, "IDT= %016llx %08x\n",
323 env->idt.base, env->idt.limit);
324 cpu_fprintf(f, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
325 (uint32_t)env->cr[0],
326 env->cr[2],
327 env->cr[3],
328 (uint32_t)env->cr[4]);
329 } else
330 #endif
331 {
332 for(i = 0; i < 6; i++) {
333 SegmentCache *sc = &env->segs[i];
334 cpu_fprintf(f, "%s =%04x %08x %08x %08x\n",
335 seg_name[i],
336 sc->selector,
337 (uint32_t)sc->base,
338 sc->limit,
339 sc->flags);
340 }
341 cpu_fprintf(f, "LDT=%04x %08x %08x %08x\n",
342 env->ldt.selector,
343 (uint32_t)env->ldt.base,
344 env->ldt.limit,
345 env->ldt.flags);
346 cpu_fprintf(f, "TR =%04x %08x %08x %08x\n",
347 env->tr.selector,
348 (uint32_t)env->tr.base,
349 env->tr.limit,
350 env->tr.flags);
351 cpu_fprintf(f, "GDT= %08x %08x\n",
352 (uint32_t)env->gdt.base, env->gdt.limit);
353 cpu_fprintf(f, "IDT= %08x %08x\n",
354 (uint32_t)env->idt.base, env->idt.limit);
355 cpu_fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
356 (uint32_t)env->cr[0],
357 (uint32_t)env->cr[2],
358 (uint32_t)env->cr[3],
359 (uint32_t)env->cr[4]);
360 }
361 if (flags & X86_DUMP_CCOP) {
362 if ((unsigned)env->cc_op < CC_OP_NB)
363 snprintf(cc_op_name, sizeof(cc_op_name), "%s", cc_op_str[env->cc_op]);
364 else
365 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
366 #ifdef TARGET_X86_64
367 if (env->hflags & HF_CS64_MASK) {
368 cpu_fprintf(f, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
369 env->cc_src, env->cc_dst,
370 cc_op_name);
371 } else
372 #endif
373 {
374 cpu_fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
375 (uint32_t)env->cc_src, (uint32_t)env->cc_dst,
376 cc_op_name);
377 }
378 }
379 if (flags & X86_DUMP_FPU) {
380 cpu_fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
381 (double)env->fpregs[0].d,
382 (double)env->fpregs[1].d,
383 (double)env->fpregs[2].d,
384 (double)env->fpregs[3].d);
385 cpu_fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
386 (double)env->fpregs[4].d,
387 (double)env->fpregs[5].d,
388 (double)env->fpregs[7].d,
389 (double)env->fpregs[8].d);
390 }
391 }
392
393 /***********************************************************/
394 /* x86 mmu */
395 /* XXX: add PGE support */
396
397 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
398 {
399 a20_state = (a20_state != 0);
400 if (a20_state != ((env->a20_mask >> 20) & 1)) {
401 #if defined(DEBUG_MMU)
402 printf("A20 update: a20=%d\n", a20_state);
403 #endif
404 /* if the cpu is currently executing code, we must unlink it and
405 all the potentially executing TB */
406 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
407
408 /* when a20 is changed, all the MMU mappings are invalid, so
409 we must flush everything */
410 tlb_flush(env, 1);
411 env->a20_mask = 0xffefffff | (a20_state << 20);
412 }
413 }
414
415 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
416 {
417 int pe_state;
418
419 #if defined(DEBUG_MMU)
420 printf("CR0 update: CR0=0x%08x\n", new_cr0);
421 #endif
422 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
423 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
424 tlb_flush(env, 1);
425 }
426
427 #ifdef TARGET_X86_64
428 if (!(env->cr[0] & CR0_PG_MASK) && (new_cr0 & CR0_PG_MASK) &&
429 (env->efer & MSR_EFER_LME)) {
430 /* enter in long mode */
431 /* XXX: generate an exception */
432 if (!(env->cr[4] & CR4_PAE_MASK))
433 return;
434 env->efer |= MSR_EFER_LMA;
435 env->hflags |= HF_LMA_MASK;
436 } else if ((env->cr[0] & CR0_PG_MASK) && !(new_cr0 & CR0_PG_MASK) &&
437 (env->efer & MSR_EFER_LMA)) {
438 /* exit long mode */
439 env->efer &= ~MSR_EFER_LMA;
440 env->hflags &= ~(HF_LMA_MASK | HF_CS64_MASK);
441 env->eip &= 0xffffffff;
442 }
443 #endif
444 env->cr[0] = new_cr0 | CR0_ET_MASK;
445
446 /* update PE flag in hidden flags */
447 pe_state = (env->cr[0] & CR0_PE_MASK);
448 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
449 /* ensure that ADDSEG is always set in real mode */
450 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
451 /* update FPU flags */
452 env->hflags = (env->hflags & ~(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK)) |
453 ((new_cr0 << (HF_MP_SHIFT - 1)) & (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK));
454 }
455
456 void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
457 {
458 env->cr[3] = new_cr3;
459 if (env->cr[0] & CR0_PG_MASK) {
460 #if defined(DEBUG_MMU)
461 printf("CR3 update: CR3=" TARGET_FMT_lx "\n", new_cr3);
462 #endif
463 tlb_flush(env, 0);
464 }
465 }
466
467 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
468 {
469 #if defined(DEBUG_MMU)
470 printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
471 #endif
472 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
473 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
474 tlb_flush(env, 1);
475 }
476 /* SSE handling */
477 if (!(env->cpuid_features & CPUID_SSE))
478 new_cr4 &= ~CR4_OSFXSR_MASK;
479 if (new_cr4 & CR4_OSFXSR_MASK)
480 env->hflags |= HF_OSFXSR_MASK;
481 else
482 env->hflags &= ~HF_OSFXSR_MASK;
483
484 env->cr[4] = new_cr4;
485 }
486
487 /* XXX: also flush 4MB pages */
488 void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
489 {
490 tlb_flush_page(env, addr);
491 }
492
493 #if defined(CONFIG_USER_ONLY)
494
495 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
496 int is_write, int is_user, int is_softmmu)
497 {
498 /* user mode only emulation */
499 is_write &= 1;
500 env->cr[2] = addr;
501 env->error_code = (is_write << PG_ERROR_W_BIT);
502 env->error_code |= PG_ERROR_U_MASK;
503 return 1;
504 }
505
506 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
507 {
508 return addr;
509 }
510
511 #else
512
513 /* return value:
514 -1 = cannot handle fault
515 0 = nothing more to do
516 1 = generate PF fault
517 2 = soft MMU activation required for this block
518 */
519 int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
520 int is_write, int is_user, int is_softmmu)
521 {
522 uint32_t pdpe_addr, pde_addr, pte_addr;
523 uint32_t pde, pte, ptep, pdpe;
524 int error_code, is_dirty, prot, page_size, ret;
525 unsigned long paddr, page_offset;
526 target_ulong vaddr, virt_addr;
527
528 #if defined(DEBUG_MMU)
529 printf("MMU fault: addr=" TARGET_FMT_lx " w=%d u=%d eip=" TARGET_FMT_lx "\n",
530 addr, is_write, is_user, env->eip);
531 #endif
532 is_write &= 1;
533
534 if (!(env->cr[0] & CR0_PG_MASK)) {
535 pte = addr;
536 virt_addr = addr & TARGET_PAGE_MASK;
537 prot = PAGE_READ | PAGE_WRITE;
538 page_size = 4096;
539 goto do_mapping;
540 }
541
542 if (env->cr[4] & CR4_PAE_MASK) {
543 /* XXX: we only use 32 bit physical addresses */
544 #ifdef TARGET_X86_64
545 if (env->hflags & HF_LMA_MASK) {
546 uint32_t pml4e_addr, pml4e;
547 int32_t sext;
548
549 /* XXX: handle user + rw rights */
550 /* XXX: handle NX flag */
551 /* test virtual address sign extension */
552 sext = (int64_t)addr >> 47;
553 if (sext != 0 && sext != -1) {
554 error_code = 0;
555 goto do_fault;
556 }
557
558 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
559 env->a20_mask;
560 pml4e = ldl_phys(pml4e_addr);
561 if (!(pml4e & PG_PRESENT_MASK)) {
562 error_code = 0;
563 goto do_fault;
564 }
565 if (!(pml4e & PG_ACCESSED_MASK)) {
566 pml4e |= PG_ACCESSED_MASK;
567 stl_phys_notdirty(pml4e_addr, pml4e);
568 }
569
570 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
571 env->a20_mask;
572 pdpe = ldl_phys(pdpe_addr);
573 if (!(pdpe & PG_PRESENT_MASK)) {
574 error_code = 0;
575 goto do_fault;
576 }
577 if (!(pdpe & PG_ACCESSED_MASK)) {
578 pdpe |= PG_ACCESSED_MASK;
579 stl_phys_notdirty(pdpe_addr, pdpe);
580 }
581 } else
582 #endif
583 {
584 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
585 env->a20_mask;
586 pdpe = ldl_phys(pdpe_addr);
587 if (!(pdpe & PG_PRESENT_MASK)) {
588 error_code = 0;
589 goto do_fault;
590 }
591 }
592
593 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
594 env->a20_mask;
595 pde = ldl_phys(pde_addr);
596 if (!(pde & PG_PRESENT_MASK)) {
597 error_code = 0;
598 goto do_fault;
599 }
600 if (pde & PG_PSE_MASK) {
601 /* 2 MB page */
602 page_size = 2048 * 1024;
603 goto handle_big_page;
604 } else {
605 /* 4 KB page */
606 if (!(pde & PG_ACCESSED_MASK)) {
607 pde |= PG_ACCESSED_MASK;
608 stl_phys_notdirty(pde_addr, pde);
609 }
610 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
611 env->a20_mask;
612 goto handle_4k_page;
613 }
614 } else {
615 /* page directory entry */
616 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) &
617 env->a20_mask;
618 pde = ldl_phys(pde_addr);
619 if (!(pde & PG_PRESENT_MASK)) {
620 error_code = 0;
621 goto do_fault;
622 }
623 /* if PSE bit is set, then we use a 4MB page */
624 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
625 page_size = 4096 * 1024;
626 handle_big_page:
627 if (is_user) {
628 if (!(pde & PG_USER_MASK))
629 goto do_fault_protect;
630 if (is_write && !(pde & PG_RW_MASK))
631 goto do_fault_protect;
632 } else {
633 if ((env->cr[0] & CR0_WP_MASK) &&
634 is_write && !(pde & PG_RW_MASK))
635 goto do_fault_protect;
636 }
637 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
638 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
639 pde |= PG_ACCESSED_MASK;
640 if (is_dirty)
641 pde |= PG_DIRTY_MASK;
642 stl_phys_notdirty(pde_addr, pde);
643 }
644
645 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
646 ptep = pte;
647 virt_addr = addr & ~(page_size - 1);
648 } else {
649 if (!(pde & PG_ACCESSED_MASK)) {
650 pde |= PG_ACCESSED_MASK;
651 stl_phys_notdirty(pde_addr, pde);
652 }
653
654 /* page directory entry */
655 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) &
656 env->a20_mask;
657 handle_4k_page:
658 pte = ldl_phys(pte_addr);
659 if (!(pte & PG_PRESENT_MASK)) {
660 error_code = 0;
661 goto do_fault;
662 }
663 /* combine pde and pte user and rw protections */
664 ptep = pte & pde;
665 if (is_user) {
666 if (!(ptep & PG_USER_MASK))
667 goto do_fault_protect;
668 if (is_write && !(ptep & PG_RW_MASK))
669 goto do_fault_protect;
670 } else {
671 if ((env->cr[0] & CR0_WP_MASK) &&
672 is_write && !(ptep & PG_RW_MASK))
673 goto do_fault_protect;
674 }
675 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
676 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
677 pte |= PG_ACCESSED_MASK;
678 if (is_dirty)
679 pte |= PG_DIRTY_MASK;
680 stl_phys_notdirty(pte_addr, pte);
681 }
682 page_size = 4096;
683 virt_addr = addr & ~0xfff;
684 }
685
686 /* the page can be put in the TLB */
687 prot = PAGE_READ;
688 if (pte & PG_DIRTY_MASK) {
689 /* only set write access if already dirty... otherwise wait
690 for dirty access */
691 if (is_user) {
692 if (ptep & PG_RW_MASK)
693 prot |= PAGE_WRITE;
694 } else {
695 if (!(env->cr[0] & CR0_WP_MASK) ||
696 (ptep & PG_RW_MASK))
697 prot |= PAGE_WRITE;
698 }
699 }
700 }
701 do_mapping:
702 pte = pte & env->a20_mask;
703
704 /* Even if 4MB pages, we map only one 4KB page in the cache to
705 avoid filling it too fast */
706 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
707 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
708 vaddr = virt_addr + page_offset;
709
710 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
711 return ret;
712 do_fault_protect:
713 error_code = PG_ERROR_P_MASK;
714 do_fault:
715 env->cr[2] = addr;
716 env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
717 if (is_user)
718 env->error_code |= PG_ERROR_U_MASK;
719 return 1;
720 }
721
722 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
723 {
724 uint32_t pde_addr, pte_addr;
725 uint32_t pde, pte, paddr, page_offset, page_size;
726
727 if (env->cr[4] & CR4_PAE_MASK) {
728 uint32_t pdpe_addr, pde_addr, pte_addr;
729 uint32_t pdpe;
730
731 /* XXX: we only use 32 bit physical addresses */
732 #ifdef TARGET_X86_64
733 if (env->hflags & HF_LMA_MASK) {
734 uint32_t pml4e_addr, pml4e;
735 int32_t sext;
736
737 /* test virtual address sign extension */
738 sext = (int64_t)addr >> 47;
739 if (sext != 0 && sext != -1)
740 return -1;
741
742 pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) &
743 env->a20_mask;
744 pml4e = ldl_phys(pml4e_addr);
745 if (!(pml4e & PG_PRESENT_MASK))
746 return -1;
747
748 pdpe_addr = ((pml4e & ~0xfff) + (((addr >> 30) & 0x1ff) << 3)) &
749 env->a20_mask;
750 pdpe = ldl_phys(pdpe_addr);
751 if (!(pdpe & PG_PRESENT_MASK))
752 return -1;
753 } else
754 #endif
755 {
756 pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 30) << 3)) &
757 env->a20_mask;
758 pdpe = ldl_phys(pdpe_addr);
759 if (!(pdpe & PG_PRESENT_MASK))
760 return -1;
761 }
762
763 pde_addr = ((pdpe & ~0xfff) + (((addr >> 21) & 0x1ff) << 3)) &
764 env->a20_mask;
765 pde = ldl_phys(pde_addr);
766 if (!(pde & PG_PRESENT_MASK)) {
767 return -1;
768 }
769 if (pde & PG_PSE_MASK) {
770 /* 2 MB page */
771 page_size = 2048 * 1024;
772 pte = pde & ~( (page_size - 1) & ~0xfff); /* align to page_size */
773 } else {
774 /* 4 KB page */
775 pte_addr = ((pde & ~0xfff) + (((addr >> 12) & 0x1ff) << 3)) &
776 env->a20_mask;
777 page_size = 4096;
778 pte = ldl_phys(pte_addr);
779 }
780 } else {
781 if (!(env->cr[0] & CR0_PG_MASK)) {
782 pte = addr;
783 page_size = 4096;
784 } else {
785 /* page directory entry */
786 pde_addr = ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask;
787 pde = ldl_phys(pde_addr);
788 if (!(pde & PG_PRESENT_MASK))
789 return -1;
790 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
791 pte = pde & ~0x003ff000; /* align to 4MB */
792 page_size = 4096 * 1024;
793 } else {
794 /* page directory entry */
795 pte_addr = ((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask;
796 pte = ldl_phys(pte_addr);
797 if (!(pte & PG_PRESENT_MASK))
798 return -1;
799 page_size = 4096;
800 }
801 }
802 pte = pte & env->a20_mask;
803 }
804
805 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
806 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
807 return paddr;
808 }
809 #endif /* !CONFIG_USER_ONLY */
810
811 #if defined(USE_CODE_COPY)
812 struct fpstate {
813 uint16_t fpuc;
814 uint16_t dummy1;
815 uint16_t fpus;
816 uint16_t dummy2;
817 uint16_t fptag;
818 uint16_t dummy3;
819
820 uint32_t fpip;
821 uint32_t fpcs;
822 uint32_t fpoo;
823 uint32_t fpos;
824 uint8_t fpregs1[8 * 10];
825 };
826
827 void restore_native_fp_state(CPUState *env)
828 {
829 int fptag, i, j;
830 struct fpstate fp1, *fp = &fp1;
831
832 fp->fpuc = env->fpuc;
833 fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
834 fptag = 0;
835 for (i=7; i>=0; i--) {
836 fptag <<= 2;
837 if (env->fptags[i]) {
838 fptag |= 3;
839 } else {
840 /* the FPU automatically computes it */
841 }
842 }
843 fp->fptag = fptag;
844 j = env->fpstt;
845 for(i = 0;i < 8; i++) {
846 memcpy(&fp->fpregs1[i * 10], &env->fpregs[j].d, 10);
847 j = (j + 1) & 7;
848 }
849 asm volatile ("frstor %0" : "=m" (*fp));
850 env->native_fp_regs = 1;
851 }
852
853 void save_native_fp_state(CPUState *env)
854 {
855 int fptag, i, j;
856 uint16_t fpuc;
857 struct fpstate fp1, *fp = &fp1;
858
859 asm volatile ("fsave %0" : : "m" (*fp));
860 env->fpuc = fp->fpuc;
861 env->fpstt = (fp->fpus >> 11) & 7;
862 env->fpus = fp->fpus & ~0x3800;
863 fptag = fp->fptag;
864 for(i = 0;i < 8; i++) {
865 env->fptags[i] = ((fptag & 3) == 3);
866 fptag >>= 2;
867 }
868 j = env->fpstt;
869 for(i = 0;i < 8; i++) {
870 memcpy(&env->fpregs[j].d, &fp->fpregs1[i * 10], 10);
871 j = (j + 1) & 7;
872 }
873 /* we must restore the default rounding state */
874 /* XXX: we do not restore the exception state */
875 fpuc = 0x037f | (env->fpuc & (3 << 10));
876 asm volatile("fldcw %0" : : "m" (fpuc));
877 env->native_fp_regs = 0;
878 }
879 #endif