]>
git.proxmox.com Git - mirror_qemu.git/blob - target-i386/helper2.c
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 _syscall3(int, modify_ldt
, int, func
, void *, ptr
, unsigned long, bytecount
)
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41 #define modify_ldt_ldt_s user_desc
43 #endif /* USE_CODE_COPY */
45 CPUX86State
*cpu_x86_init(void)
52 env
= malloc(sizeof(CPUX86State
));
55 memset(env
, 0, sizeof(CPUX86State
));
56 /* init various static tables */
59 optimize_flags_init();
62 /* testing code for code copy case */
64 struct modify_ldt_ldt_s ldt
;
67 ldt
.base_addr
= (unsigned long)env
;
68 ldt
.limit
= (sizeof(CPUState
) + 0xfff) >> 12;
70 ldt
.contents
= MODIFY_LDT_CONTENTS_DATA
;
71 ldt
.read_exec_only
= 0;
72 ldt
.limit_in_pages
= 1;
73 ldt
.seg_not_present
= 0;
75 modify_ldt(1, &ldt
, sizeof(ldt
)); /* write ldt entry */
77 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
81 int family
, model
, stepping
;
83 env
->cpuid_vendor1
= 0x68747541; /* "Auth" */
84 env
->cpuid_vendor2
= 0x69746e65; /* "enti" */
85 env
->cpuid_vendor3
= 0x444d4163; /* "cAMD" */
90 env
->cpuid_vendor1
= 0x756e6547; /* "Genu" */
91 env
->cpuid_vendor2
= 0x49656e69; /* "ineI" */
92 env
->cpuid_vendor3
= 0x6c65746e; /* "ntel" */
105 env
->cpuid_version
= (family
<< 8) | (model
<< 4) | stepping
;
106 env
->cpuid_features
= (CPUID_FP87
| CPUID_DE
| CPUID_PSE
|
107 CPUID_TSC
| CPUID_MSR
| CPUID_MCE
|
108 CPUID_CX8
| CPUID_PGE
| CPUID_CMOV
);
110 /* currently not enabled for std i386 because not fully tested */
111 env
->cpuid_features
|= CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
| CPUID_SSE2
;
112 env
->cpuid_features
|= CPUID_APIC
| CPUID_PAE
;
115 cpu_single_env
= env
;
120 /* NOTE: must be called outside the CPU execute loop */
121 void cpu_reset(CPUX86State
*env
)
125 memset(env
, 0, offsetof(CPUX86State
, breakpoints
));
129 /* init to reset state */
131 #ifdef CONFIG_SOFTMMU
132 env
->hflags
|= HF_SOFTMMU_MASK
;
135 cpu_x86_update_cr0(env
, 0x60000010);
136 env
->a20_mask
= 0xffffffff;
138 env
->idt
.limit
= 0xffff;
139 env
->gdt
.limit
= 0xffff;
140 env
->ldt
.limit
= 0xffff;
141 env
->ldt
.flags
= DESC_P_MASK
;
142 env
->tr
.limit
= 0xffff;
143 env
->tr
.flags
= DESC_P_MASK
;
145 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff, 0);
146 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff, 0);
147 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff, 0);
148 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff, 0);
149 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff, 0);
150 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff, 0);
153 env
->regs
[R_EDX
] = 0x600; /* indicate P6 processor */
158 for(i
= 0;i
< 8; i
++)
165 void cpu_x86_close(CPUX86State
*env
)
170 /***********************************************************/
173 static const char *cc_op_str
[] = {
228 void cpu_dump_state(CPUState
*env
, FILE *f
,
229 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
234 static const char *seg_name
[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
236 eflags
= env
->eflags
;
238 if (env
->hflags
& HF_CS64_MASK
) {
240 "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
241 "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
242 "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
243 "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
244 "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
262 eflags
& DF_MASK
? 'D' : '-',
263 eflags
& CC_O
? 'O' : '-',
264 eflags
& CC_S
? 'S' : '-',
265 eflags
& CC_Z
? 'Z' : '-',
266 eflags
& CC_A
? 'A' : '-',
267 eflags
& CC_P
? 'P' : '-',
268 eflags
& CC_C
? 'C' : '-',
269 env
->hflags
& HF_CPL_MASK
,
270 (env
->hflags
>> HF_INHIBIT_IRQ_SHIFT
) & 1,
271 (env
->a20_mask
>> 20) & 1);
275 cpu_fprintf(f
, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
276 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
277 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
278 (uint32_t)env
->regs
[R_EAX
],
279 (uint32_t)env
->regs
[R_EBX
],
280 (uint32_t)env
->regs
[R_ECX
],
281 (uint32_t)env
->regs
[R_EDX
],
282 (uint32_t)env
->regs
[R_ESI
],
283 (uint32_t)env
->regs
[R_EDI
],
284 (uint32_t)env
->regs
[R_EBP
],
285 (uint32_t)env
->regs
[R_ESP
],
286 (uint32_t)env
->eip
, eflags
,
287 eflags
& DF_MASK
? 'D' : '-',
288 eflags
& CC_O
? 'O' : '-',
289 eflags
& CC_S
? 'S' : '-',
290 eflags
& CC_Z
? 'Z' : '-',
291 eflags
& CC_A
? 'A' : '-',
292 eflags
& CC_P
? 'P' : '-',
293 eflags
& CC_C
? 'C' : '-',
294 env
->hflags
& HF_CPL_MASK
,
295 (env
->hflags
>> HF_INHIBIT_IRQ_SHIFT
) & 1,
296 (env
->a20_mask
>> 20) & 1);
300 if (env
->hflags
& HF_LMA_MASK
) {
301 for(i
= 0; i
< 6; i
++) {
302 SegmentCache
*sc
= &env
->segs
[i
];
303 cpu_fprintf(f
, "%s =%04x %016llx %08x %08x\n",
310 cpu_fprintf(f
, "LDT=%04x %016llx %08x %08x\n",
315 cpu_fprintf(f
, "TR =%04x %016llx %08x %08x\n",
320 cpu_fprintf(f
, "GDT= %016llx %08x\n",
321 env
->gdt
.base
, env
->gdt
.limit
);
322 cpu_fprintf(f
, "IDT= %016llx %08x\n",
323 env
->idt
.base
, env
->idt
.limit
);
324 cpu_fprintf(f
, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
325 (uint32_t)env
->cr
[0],
328 (uint32_t)env
->cr
[4]);
332 for(i
= 0; i
< 6; i
++) {
333 SegmentCache
*sc
= &env
->segs
[i
];
334 cpu_fprintf(f
, "%s =%04x %08x %08x %08x\n",
341 cpu_fprintf(f
, "LDT=%04x %08x %08x %08x\n",
343 (uint32_t)env
->ldt
.base
,
346 cpu_fprintf(f
, "TR =%04x %08x %08x %08x\n",
348 (uint32_t)env
->tr
.base
,
351 cpu_fprintf(f
, "GDT= %08x %08x\n",
352 (uint32_t)env
->gdt
.base
, env
->gdt
.limit
);
353 cpu_fprintf(f
, "IDT= %08x %08x\n",
354 (uint32_t)env
->idt
.base
, env
->idt
.limit
);
355 cpu_fprintf(f
, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
356 (uint32_t)env
->cr
[0],
357 (uint32_t)env
->cr
[2],
358 (uint32_t)env
->cr
[3],
359 (uint32_t)env
->cr
[4]);
361 if (flags
& X86_DUMP_CCOP
) {
362 if ((unsigned)env
->cc_op
< CC_OP_NB
)
363 snprintf(cc_op_name
, sizeof(cc_op_name
), "%s", cc_op_str
[env
->cc_op
]);
365 snprintf(cc_op_name
, sizeof(cc_op_name
), "[%d]", env
->cc_op
);
367 if (env
->hflags
& HF_CS64_MASK
) {
368 cpu_fprintf(f
, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
369 env
->cc_src
, env
->cc_dst
,
374 cpu_fprintf(f
, "CCS=%08x CCD=%08x CCO=%-8s\n",
375 (uint32_t)env
->cc_src
, (uint32_t)env
->cc_dst
,
379 if (flags
& X86_DUMP_FPU
) {
380 cpu_fprintf(f
, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
381 (double)env
->fpregs
[0].d
,
382 (double)env
->fpregs
[1].d
,
383 (double)env
->fpregs
[2].d
,
384 (double)env
->fpregs
[3].d
);
385 cpu_fprintf(f
, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
386 (double)env
->fpregs
[4].d
,
387 (double)env
->fpregs
[5].d
,
388 (double)env
->fpregs
[7].d
,
389 (double)env
->fpregs
[8].d
);
393 /***********************************************************/
395 /* XXX: add PGE support */
397 void cpu_x86_set_a20(CPUX86State
*env
, int a20_state
)
399 a20_state
= (a20_state
!= 0);
400 if (a20_state
!= ((env
->a20_mask
>> 20) & 1)) {
401 #if defined(DEBUG_MMU)
402 printf("A20 update: a20=%d\n", a20_state
);
404 /* if the cpu is currently executing code, we must unlink it and
405 all the potentially executing TB */
406 cpu_interrupt(env
, CPU_INTERRUPT_EXITTB
);
408 /* when a20 is changed, all the MMU mappings are invalid, so
409 we must flush everything */
411 env
->a20_mask
= 0xffefffff | (a20_state
<< 20);
415 void cpu_x86_update_cr0(CPUX86State
*env
, uint32_t new_cr0
)
419 #if defined(DEBUG_MMU)
420 printf("CR0 update: CR0=0x%08x\n", new_cr0
);
422 if ((new_cr0
& (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
)) !=
423 (env
->cr
[0] & (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
))) {
428 if (!(env
->cr
[0] & CR0_PG_MASK
) && (new_cr0
& CR0_PG_MASK
) &&
429 (env
->efer
& MSR_EFER_LME
)) {
430 /* enter in long mode */
431 /* XXX: generate an exception */
432 if (!(env
->cr
[4] & CR4_PAE_MASK
))
434 env
->efer
|= MSR_EFER_LMA
;
435 env
->hflags
|= HF_LMA_MASK
;
436 } else if ((env
->cr
[0] & CR0_PG_MASK
) && !(new_cr0
& CR0_PG_MASK
) &&
437 (env
->efer
& MSR_EFER_LMA
)) {
439 env
->efer
&= ~MSR_EFER_LMA
;
440 env
->hflags
&= ~(HF_LMA_MASK
| HF_CS64_MASK
);
441 env
->eip
&= 0xffffffff;
444 env
->cr
[0] = new_cr0
| CR0_ET_MASK
;
446 /* update PE flag in hidden flags */
447 pe_state
= (env
->cr
[0] & CR0_PE_MASK
);
448 env
->hflags
= (env
->hflags
& ~HF_PE_MASK
) | (pe_state
<< HF_PE_SHIFT
);
449 /* ensure that ADDSEG is always set in real mode */
450 env
->hflags
|= ((pe_state
^ 1) << HF_ADDSEG_SHIFT
);
451 /* update FPU flags */
452 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
453 ((new_cr0
<< (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
456 void cpu_x86_update_cr3(CPUX86State
*env
, target_ulong new_cr3
)
458 env
->cr
[3] = new_cr3
;
459 if (env
->cr
[0] & CR0_PG_MASK
) {
460 #if defined(DEBUG_MMU)
461 printf("CR3 update: CR3=" TARGET_FMT_lx
"\n", new_cr3
);
467 void cpu_x86_update_cr4(CPUX86State
*env
, uint32_t new_cr4
)
469 #if defined(DEBUG_MMU)
470 printf("CR4 update: CR4=%08x\n", (uint32_t)env
->cr
[4]);
472 if ((new_cr4
& (CR4_PGE_MASK
| CR4_PAE_MASK
| CR4_PSE_MASK
)) !=
473 (env
->cr
[4] & (CR4_PGE_MASK
| CR4_PAE_MASK
| CR4_PSE_MASK
))) {
477 if (!(env
->cpuid_features
& CPUID_SSE
))
478 new_cr4
&= ~CR4_OSFXSR_MASK
;
479 if (new_cr4
& CR4_OSFXSR_MASK
)
480 env
->hflags
|= HF_OSFXSR_MASK
;
482 env
->hflags
&= ~HF_OSFXSR_MASK
;
484 env
->cr
[4] = new_cr4
;
487 /* XXX: also flush 4MB pages */
488 void cpu_x86_flush_tlb(CPUX86State
*env
, uint32_t addr
)
490 tlb_flush_page(env
, addr
);
493 #if defined(CONFIG_USER_ONLY)
495 int cpu_x86_handle_mmu_fault(CPUX86State
*env
, target_ulong addr
,
496 int is_write
, int is_user
, int is_softmmu
)
498 /* user mode only emulation */
501 env
->error_code
= (is_write
<< PG_ERROR_W_BIT
);
502 env
->error_code
|= PG_ERROR_U_MASK
;
506 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
514 -1 = cannot handle fault
515 0 = nothing more to do
516 1 = generate PF fault
517 2 = soft MMU activation required for this block
519 int cpu_x86_handle_mmu_fault(CPUX86State
*env
, target_ulong addr
,
520 int is_write
, int is_user
, int is_softmmu
)
522 uint32_t pdpe_addr
, pde_addr
, pte_addr
;
523 uint32_t pde
, pte
, ptep
, pdpe
;
524 int error_code
, is_dirty
, prot
, page_size
, ret
;
525 unsigned long paddr
, page_offset
;
526 target_ulong vaddr
, virt_addr
;
528 #if defined(DEBUG_MMU)
529 printf("MMU fault: addr=" TARGET_FMT_lx
" w=%d u=%d eip=" TARGET_FMT_lx
"\n",
530 addr
, is_write
, is_user
, env
->eip
);
534 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
536 virt_addr
= addr
& TARGET_PAGE_MASK
;
537 prot
= PAGE_READ
| PAGE_WRITE
;
542 if (env
->cr
[4] & CR4_PAE_MASK
) {
543 /* XXX: we only use 32 bit physical addresses */
545 if (env
->hflags
& HF_LMA_MASK
) {
546 uint32_t pml4e_addr
, pml4e
;
549 /* XXX: handle user + rw rights */
550 /* XXX: handle NX flag */
551 /* test virtual address sign extension */
552 sext
= (int64_t)addr
>> 47;
553 if (sext
!= 0 && sext
!= -1) {
558 pml4e_addr
= ((env
->cr
[3] & ~0xfff) + (((addr
>> 39) & 0x1ff) << 3)) &
560 pml4e
= ldl_phys(pml4e_addr
);
561 if (!(pml4e
& PG_PRESENT_MASK
)) {
565 if (!(pml4e
& PG_ACCESSED_MASK
)) {
566 pml4e
|= PG_ACCESSED_MASK
;
567 stl_phys_notdirty(pml4e_addr
, pml4e
);
570 pdpe_addr
= ((pml4e
& ~0xfff) + (((addr
>> 30) & 0x1ff) << 3)) &
572 pdpe
= ldl_phys(pdpe_addr
);
573 if (!(pdpe
& PG_PRESENT_MASK
)) {
577 if (!(pdpe
& PG_ACCESSED_MASK
)) {
578 pdpe
|= PG_ACCESSED_MASK
;
579 stl_phys_notdirty(pdpe_addr
, pdpe
);
584 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 30) << 3)) &
586 pdpe
= ldl_phys(pdpe_addr
);
587 if (!(pdpe
& PG_PRESENT_MASK
)) {
593 pde_addr
= ((pdpe
& ~0xfff) + (((addr
>> 21) & 0x1ff) << 3)) &
595 pde
= ldl_phys(pde_addr
);
596 if (!(pde
& PG_PRESENT_MASK
)) {
600 if (pde
& PG_PSE_MASK
) {
602 page_size
= 2048 * 1024;
603 goto handle_big_page
;
606 if (!(pde
& PG_ACCESSED_MASK
)) {
607 pde
|= PG_ACCESSED_MASK
;
608 stl_phys_notdirty(pde_addr
, pde
);
610 pte_addr
= ((pde
& ~0xfff) + (((addr
>> 12) & 0x1ff) << 3)) &
615 /* page directory entry */
616 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & ~3)) &
618 pde
= ldl_phys(pde_addr
);
619 if (!(pde
& PG_PRESENT_MASK
)) {
623 /* if PSE bit is set, then we use a 4MB page */
624 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
625 page_size
= 4096 * 1024;
628 if (!(pde
& PG_USER_MASK
))
629 goto do_fault_protect
;
630 if (is_write
&& !(pde
& PG_RW_MASK
))
631 goto do_fault_protect
;
633 if ((env
->cr
[0] & CR0_WP_MASK
) &&
634 is_write
&& !(pde
& PG_RW_MASK
))
635 goto do_fault_protect
;
637 is_dirty
= is_write
&& !(pde
& PG_DIRTY_MASK
);
638 if (!(pde
& PG_ACCESSED_MASK
) || is_dirty
) {
639 pde
|= PG_ACCESSED_MASK
;
641 pde
|= PG_DIRTY_MASK
;
642 stl_phys_notdirty(pde_addr
, pde
);
645 pte
= pde
& ~( (page_size
- 1) & ~0xfff); /* align to page_size */
647 virt_addr
= addr
& ~(page_size
- 1);
649 if (!(pde
& PG_ACCESSED_MASK
)) {
650 pde
|= PG_ACCESSED_MASK
;
651 stl_phys_notdirty(pde_addr
, pde
);
654 /* page directory entry */
655 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) &
658 pte
= ldl_phys(pte_addr
);
659 if (!(pte
& PG_PRESENT_MASK
)) {
663 /* combine pde and pte user and rw protections */
666 if (!(ptep
& PG_USER_MASK
))
667 goto do_fault_protect
;
668 if (is_write
&& !(ptep
& PG_RW_MASK
))
669 goto do_fault_protect
;
671 if ((env
->cr
[0] & CR0_WP_MASK
) &&
672 is_write
&& !(ptep
& PG_RW_MASK
))
673 goto do_fault_protect
;
675 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
676 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
677 pte
|= PG_ACCESSED_MASK
;
679 pte
|= PG_DIRTY_MASK
;
680 stl_phys_notdirty(pte_addr
, pte
);
683 virt_addr
= addr
& ~0xfff;
686 /* the page can be put in the TLB */
688 if (pte
& PG_DIRTY_MASK
) {
689 /* only set write access if already dirty... otherwise wait
692 if (ptep
& PG_RW_MASK
)
695 if (!(env
->cr
[0] & CR0_WP_MASK
) ||
702 pte
= pte
& env
->a20_mask
;
704 /* Even if 4MB pages, we map only one 4KB page in the cache to
705 avoid filling it too fast */
706 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
707 paddr
= (pte
& TARGET_PAGE_MASK
) + page_offset
;
708 vaddr
= virt_addr
+ page_offset
;
710 ret
= tlb_set_page(env
, vaddr
, paddr
, prot
, is_user
, is_softmmu
);
713 error_code
= PG_ERROR_P_MASK
;
716 env
->error_code
= (is_write
<< PG_ERROR_W_BIT
) | error_code
;
718 env
->error_code
|= PG_ERROR_U_MASK
;
722 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
724 uint32_t pde_addr
, pte_addr
;
725 uint32_t pde
, pte
, paddr
, page_offset
, page_size
;
727 if (env
->cr
[4] & CR4_PAE_MASK
) {
728 uint32_t pdpe_addr
, pde_addr
, pte_addr
;
731 /* XXX: we only use 32 bit physical addresses */
733 if (env
->hflags
& HF_LMA_MASK
) {
734 uint32_t pml4e_addr
, pml4e
;
737 /* test virtual address sign extension */
738 sext
= (int64_t)addr
>> 47;
739 if (sext
!= 0 && sext
!= -1)
742 pml4e_addr
= ((env
->cr
[3] & ~0xfff) + (((addr
>> 39) & 0x1ff) << 3)) &
744 pml4e
= ldl_phys(pml4e_addr
);
745 if (!(pml4e
& PG_PRESENT_MASK
))
748 pdpe_addr
= ((pml4e
& ~0xfff) + (((addr
>> 30) & 0x1ff) << 3)) &
750 pdpe
= ldl_phys(pdpe_addr
);
751 if (!(pdpe
& PG_PRESENT_MASK
))
756 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 30) << 3)) &
758 pdpe
= ldl_phys(pdpe_addr
);
759 if (!(pdpe
& PG_PRESENT_MASK
))
763 pde_addr
= ((pdpe
& ~0xfff) + (((addr
>> 21) & 0x1ff) << 3)) &
765 pde
= ldl_phys(pde_addr
);
766 if (!(pde
& PG_PRESENT_MASK
)) {
769 if (pde
& PG_PSE_MASK
) {
771 page_size
= 2048 * 1024;
772 pte
= pde
& ~( (page_size
- 1) & ~0xfff); /* align to page_size */
775 pte_addr
= ((pde
& ~0xfff) + (((addr
>> 12) & 0x1ff) << 3)) &
778 pte
= ldl_phys(pte_addr
);
781 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
785 /* page directory entry */
786 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & ~3)) & env
->a20_mask
;
787 pde
= ldl_phys(pde_addr
);
788 if (!(pde
& PG_PRESENT_MASK
))
790 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
791 pte
= pde
& ~0x003ff000; /* align to 4MB */
792 page_size
= 4096 * 1024;
794 /* page directory entry */
795 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) & env
->a20_mask
;
796 pte
= ldl_phys(pte_addr
);
797 if (!(pte
& PG_PRESENT_MASK
))
802 pte
= pte
& env
->a20_mask
;
805 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
806 paddr
= (pte
& TARGET_PAGE_MASK
) + page_offset
;
809 #endif /* !CONFIG_USER_ONLY */
811 #if defined(USE_CODE_COPY)
824 uint8_t fpregs1
[8 * 10];
827 void restore_native_fp_state(CPUState
*env
)
830 struct fpstate fp1
, *fp
= &fp1
;
832 fp
->fpuc
= env
->fpuc
;
833 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
835 for (i
=7; i
>=0; i
--) {
837 if (env
->fptags
[i
]) {
840 /* the FPU automatically computes it */
845 for(i
= 0;i
< 8; i
++) {
846 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
849 asm volatile ("frstor %0" : "=m" (*fp
));
850 env
->native_fp_regs
= 1;
853 void save_native_fp_state(CPUState
*env
)
857 struct fpstate fp1
, *fp
= &fp1
;
859 asm volatile ("fsave %0" : : "m" (*fp
));
860 env
->fpuc
= fp
->fpuc
;
861 env
->fpstt
= (fp
->fpus
>> 11) & 7;
862 env
->fpus
= fp
->fpus
& ~0x3800;
864 for(i
= 0;i
< 8; i
++) {
865 env
->fptags
[i
] = ((fptag
& 3) == 3);
869 for(i
= 0;i
< 8; i
++) {
870 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
873 /* we must restore the default rounding state */
874 /* XXX: we do not restore the exception state */
875 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
876 asm volatile("fldcw %0" : : "m" (fpuc
));
877 env
->native_fp_regs
= 0;