]>
git.proxmox.com Git - qemu.git/blob - target-i386/helper2.c
2 * i386 helpers (without register variable usage)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
35 #include <linux/unistd.h>
36 #include <linux/version.h>
38 _syscall3(int, modify_ldt
, int, func
, void *, ptr
, unsigned long, bytecount
)
40 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 66)
41 #define modify_ldt_ldt_s user_desc
43 #endif /* USE_CODE_COPY */
45 CPUX86State
*cpu_x86_init(void)
52 env
= malloc(sizeof(CPUX86State
));
55 memset(env
, 0, sizeof(CPUX86State
));
56 /* init various static tables */
59 optimize_flags_init();
62 /* testing code for code copy case */
64 struct modify_ldt_ldt_s ldt
;
67 ldt
.base_addr
= (unsigned long)env
;
68 ldt
.limit
= (sizeof(CPUState
) + 0xfff) >> 12;
70 ldt
.contents
= MODIFY_LDT_CONTENTS_DATA
;
71 ldt
.read_exec_only
= 0;
72 ldt
.limit_in_pages
= 1;
73 ldt
.seg_not_present
= 0;
75 modify_ldt(1, &ldt
, sizeof(ldt
)); /* write ldt entry */
77 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
81 int family
, model
, stepping
;
83 env
->cpuid_vendor1
= 0x68747541; /* "Auth" */
84 env
->cpuid_vendor2
= 0x69746e65; /* "enti" */
85 env
->cpuid_vendor3
= 0x444d4163; /* "cAMD" */
90 env
->cpuid_vendor1
= 0x756e6547; /* "Genu" */
91 env
->cpuid_vendor2
= 0x49656e69; /* "ineI" */
92 env
->cpuid_vendor3
= 0x6c65746e; /* "ntel" */
105 env
->cpuid_level
= 2;
106 env
->cpuid_version
= (family
<< 8) | (model
<< 4) | stepping
;
107 env
->cpuid_features
= (CPUID_FP87
| CPUID_DE
| CPUID_PSE
|
108 CPUID_TSC
| CPUID_MSR
| CPUID_MCE
|
109 CPUID_CX8
| CPUID_PGE
| CPUID_CMOV
|
111 env
->pat
= 0x0007040600070406ULL
;
112 env
->cpuid_ext_features
= 0;
113 env
->cpuid_features
|= CPUID_FXSR
| CPUID_MMX
| CPUID_SSE
| CPUID_SSE2
| CPUID_PAE
| CPUID_SEP
;
114 env
->cpuid_xlevel
= 0;
116 const char *model_id
= "QEMU Virtual CPU version " QEMU_VERSION
;
118 len
= strlen(model_id
);
119 for(i
= 0; i
< 48; i
++) {
124 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
128 /* currently not enabled for std i386 because not fully tested */
129 env
->cpuid_features
|= CPUID_APIC
;
130 env
->cpuid_ext2_features
= (env
->cpuid_features
& 0x0183F3FF);
131 env
->cpuid_ext2_features
|= CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
;
132 env
->cpuid_xlevel
= 0x80000008;
134 /* these features are needed for Win64 and aren't fully implemented */
135 env
->cpuid_features
|= CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
;
138 cpu_single_env
= env
;
146 /* NOTE: must be called outside the CPU execute loop */
147 void cpu_reset(CPUX86State
*env
)
151 memset(env
, 0, offsetof(CPUX86State
, breakpoints
));
155 /* init to reset state */
157 #ifdef CONFIG_SOFTMMU
158 env
->hflags
|= HF_SOFTMMU_MASK
;
161 cpu_x86_update_cr0(env
, 0x60000010);
162 env
->a20_mask
= 0xffffffff;
164 env
->idt
.limit
= 0xffff;
165 env
->gdt
.limit
= 0xffff;
166 env
->ldt
.limit
= 0xffff;
167 env
->ldt
.flags
= DESC_P_MASK
;
168 env
->tr
.limit
= 0xffff;
169 env
->tr
.flags
= DESC_P_MASK
;
171 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff, 0);
172 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff, 0);
173 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff, 0);
174 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff, 0);
175 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff, 0);
176 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff, 0);
179 env
->regs
[R_EDX
] = 0x600; /* indicate P6 processor */
184 for(i
= 0;i
< 8; i
++)
191 void cpu_x86_close(CPUX86State
*env
)
196 /***********************************************************/
199 static const char *cc_op_str
[] = {
254 void cpu_dump_state(CPUState
*env
, FILE *f
,
255 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
260 static const char *seg_name
[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
262 eflags
= env
->eflags
;
264 if (env
->hflags
& HF_CS64_MASK
) {
266 "RAX=%016llx RBX=%016llx RCX=%016llx RDX=%016llx\n"
267 "RSI=%016llx RDI=%016llx RBP=%016llx RSP=%016llx\n"
268 "R8 =%016llx R9 =%016llx R10=%016llx R11=%016llx\n"
269 "R12=%016llx R13=%016llx R14=%016llx R15=%016llx\n"
270 "RIP=%016llx RFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
288 eflags
& DF_MASK
? 'D' : '-',
289 eflags
& CC_O
? 'O' : '-',
290 eflags
& CC_S
? 'S' : '-',
291 eflags
& CC_Z
? 'Z' : '-',
292 eflags
& CC_A
? 'A' : '-',
293 eflags
& CC_P
? 'P' : '-',
294 eflags
& CC_C
? 'C' : '-',
295 env
->hflags
& HF_CPL_MASK
,
296 (env
->hflags
>> HF_INHIBIT_IRQ_SHIFT
) & 1,
297 (env
->a20_mask
>> 20) & 1);
301 cpu_fprintf(f
, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
302 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
303 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d A20=%d\n",
304 (uint32_t)env
->regs
[R_EAX
],
305 (uint32_t)env
->regs
[R_EBX
],
306 (uint32_t)env
->regs
[R_ECX
],
307 (uint32_t)env
->regs
[R_EDX
],
308 (uint32_t)env
->regs
[R_ESI
],
309 (uint32_t)env
->regs
[R_EDI
],
310 (uint32_t)env
->regs
[R_EBP
],
311 (uint32_t)env
->regs
[R_ESP
],
312 (uint32_t)env
->eip
, eflags
,
313 eflags
& DF_MASK
? 'D' : '-',
314 eflags
& CC_O
? 'O' : '-',
315 eflags
& CC_S
? 'S' : '-',
316 eflags
& CC_Z
? 'Z' : '-',
317 eflags
& CC_A
? 'A' : '-',
318 eflags
& CC_P
? 'P' : '-',
319 eflags
& CC_C
? 'C' : '-',
320 env
->hflags
& HF_CPL_MASK
,
321 (env
->hflags
>> HF_INHIBIT_IRQ_SHIFT
) & 1,
322 (env
->a20_mask
>> 20) & 1);
326 if (env
->hflags
& HF_LMA_MASK
) {
327 for(i
= 0; i
< 6; i
++) {
328 SegmentCache
*sc
= &env
->segs
[i
];
329 cpu_fprintf(f
, "%s =%04x %016llx %08x %08x\n",
336 cpu_fprintf(f
, "LDT=%04x %016llx %08x %08x\n",
341 cpu_fprintf(f
, "TR =%04x %016llx %08x %08x\n",
346 cpu_fprintf(f
, "GDT= %016llx %08x\n",
347 env
->gdt
.base
, env
->gdt
.limit
);
348 cpu_fprintf(f
, "IDT= %016llx %08x\n",
349 env
->idt
.base
, env
->idt
.limit
);
350 cpu_fprintf(f
, "CR0=%08x CR2=%016llx CR3=%016llx CR4=%08x\n",
351 (uint32_t)env
->cr
[0],
354 (uint32_t)env
->cr
[4]);
358 for(i
= 0; i
< 6; i
++) {
359 SegmentCache
*sc
= &env
->segs
[i
];
360 cpu_fprintf(f
, "%s =%04x %08x %08x %08x\n",
367 cpu_fprintf(f
, "LDT=%04x %08x %08x %08x\n",
369 (uint32_t)env
->ldt
.base
,
372 cpu_fprintf(f
, "TR =%04x %08x %08x %08x\n",
374 (uint32_t)env
->tr
.base
,
377 cpu_fprintf(f
, "GDT= %08x %08x\n",
378 (uint32_t)env
->gdt
.base
, env
->gdt
.limit
);
379 cpu_fprintf(f
, "IDT= %08x %08x\n",
380 (uint32_t)env
->idt
.base
, env
->idt
.limit
);
381 cpu_fprintf(f
, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
382 (uint32_t)env
->cr
[0],
383 (uint32_t)env
->cr
[2],
384 (uint32_t)env
->cr
[3],
385 (uint32_t)env
->cr
[4]);
387 if (flags
& X86_DUMP_CCOP
) {
388 if ((unsigned)env
->cc_op
< CC_OP_NB
)
389 snprintf(cc_op_name
, sizeof(cc_op_name
), "%s", cc_op_str
[env
->cc_op
]);
391 snprintf(cc_op_name
, sizeof(cc_op_name
), "[%d]", env
->cc_op
);
393 if (env
->hflags
& HF_CS64_MASK
) {
394 cpu_fprintf(f
, "CCS=%016llx CCD=%016llx CCO=%-8s\n",
395 env
->cc_src
, env
->cc_dst
,
400 cpu_fprintf(f
, "CCS=%08x CCD=%08x CCO=%-8s\n",
401 (uint32_t)env
->cc_src
, (uint32_t)env
->cc_dst
,
405 if (flags
& X86_DUMP_FPU
) {
408 for(i
= 0; i
< 8; i
++) {
409 fptag
|= ((!env
->fptags
[i
]) << i
);
411 cpu_fprintf(f
, "FCW=%04x FSW=%04x [ST=%d] FTW=%02x MXCSR=%08x\n",
413 (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11,
418 #if defined(USE_X86LDOUBLE)
426 tmp
.d
= env
->fpregs
[i
].d
;
427 cpu_fprintf(f
, "FPR%d=%016llx %04x",
428 i
, tmp
.l
.lower
, tmp
.l
.upper
);
430 cpu_fprintf(f
, "FPR%d=%016llx",
431 i
, env
->fpregs
[i
].mmx
.q
);
434 cpu_fprintf(f
, "\n");
438 if (env
->hflags
& HF_CS64_MASK
)
443 cpu_fprintf(f
, "XMM%02d=%08x%08x%08x%08x",
445 env
->xmm_regs
[i
].XMM_L(3),
446 env
->xmm_regs
[i
].XMM_L(2),
447 env
->xmm_regs
[i
].XMM_L(1),
448 env
->xmm_regs
[i
].XMM_L(0));
450 cpu_fprintf(f
, "\n");
457 /***********************************************************/
459 /* XXX: add PGE support */
461 void cpu_x86_set_a20(CPUX86State
*env
, int a20_state
)
463 a20_state
= (a20_state
!= 0);
464 if (a20_state
!= ((env
->a20_mask
>> 20) & 1)) {
465 #if defined(DEBUG_MMU)
466 printf("A20 update: a20=%d\n", a20_state
);
468 /* if the cpu is currently executing code, we must unlink it and
469 all the potentially executing TB */
470 cpu_interrupt(env
, CPU_INTERRUPT_EXITTB
);
472 /* when a20 is changed, all the MMU mappings are invalid, so
473 we must flush everything */
475 env
->a20_mask
= 0xffefffff | (a20_state
<< 20);
479 void cpu_x86_update_cr0(CPUX86State
*env
, uint32_t new_cr0
)
483 #if defined(DEBUG_MMU)
484 printf("CR0 update: CR0=0x%08x\n", new_cr0
);
486 if ((new_cr0
& (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
)) !=
487 (env
->cr
[0] & (CR0_PG_MASK
| CR0_WP_MASK
| CR0_PE_MASK
))) {
492 if (!(env
->cr
[0] & CR0_PG_MASK
) && (new_cr0
& CR0_PG_MASK
) &&
493 (env
->efer
& MSR_EFER_LME
)) {
494 /* enter in long mode */
495 /* XXX: generate an exception */
496 if (!(env
->cr
[4] & CR4_PAE_MASK
))
498 env
->efer
|= MSR_EFER_LMA
;
499 env
->hflags
|= HF_LMA_MASK
;
500 } else if ((env
->cr
[0] & CR0_PG_MASK
) && !(new_cr0
& CR0_PG_MASK
) &&
501 (env
->efer
& MSR_EFER_LMA
)) {
503 env
->efer
&= ~MSR_EFER_LMA
;
504 env
->hflags
&= ~(HF_LMA_MASK
| HF_CS64_MASK
);
505 env
->eip
&= 0xffffffff;
508 env
->cr
[0] = new_cr0
| CR0_ET_MASK
;
510 /* update PE flag in hidden flags */
511 pe_state
= (env
->cr
[0] & CR0_PE_MASK
);
512 env
->hflags
= (env
->hflags
& ~HF_PE_MASK
) | (pe_state
<< HF_PE_SHIFT
);
513 /* ensure that ADDSEG is always set in real mode */
514 env
->hflags
|= ((pe_state
^ 1) << HF_ADDSEG_SHIFT
);
515 /* update FPU flags */
516 env
->hflags
= (env
->hflags
& ~(HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
)) |
517 ((new_cr0
<< (HF_MP_SHIFT
- 1)) & (HF_MP_MASK
| HF_EM_MASK
| HF_TS_MASK
));
520 /* XXX: in legacy PAE mode, generate a GPF if reserved bits are set in
522 void cpu_x86_update_cr3(CPUX86State
*env
, target_ulong new_cr3
)
524 env
->cr
[3] = new_cr3
;
525 if (env
->cr
[0] & CR0_PG_MASK
) {
526 #if defined(DEBUG_MMU)
527 printf("CR3 update: CR3=" TARGET_FMT_lx
"\n", new_cr3
);
533 void cpu_x86_update_cr4(CPUX86State
*env
, uint32_t new_cr4
)
535 #if defined(DEBUG_MMU)
536 printf("CR4 update: CR4=%08x\n", (uint32_t)env
->cr
[4]);
538 if ((new_cr4
& (CR4_PGE_MASK
| CR4_PAE_MASK
| CR4_PSE_MASK
)) !=
539 (env
->cr
[4] & (CR4_PGE_MASK
| CR4_PAE_MASK
| CR4_PSE_MASK
))) {
543 if (!(env
->cpuid_features
& CPUID_SSE
))
544 new_cr4
&= ~CR4_OSFXSR_MASK
;
545 if (new_cr4
& CR4_OSFXSR_MASK
)
546 env
->hflags
|= HF_OSFXSR_MASK
;
548 env
->hflags
&= ~HF_OSFXSR_MASK
;
550 env
->cr
[4] = new_cr4
;
553 /* XXX: also flush 4MB pages */
554 void cpu_x86_flush_tlb(CPUX86State
*env
, target_ulong addr
)
556 tlb_flush_page(env
, addr
);
559 #if defined(CONFIG_USER_ONLY)
561 int cpu_x86_handle_mmu_fault(CPUX86State
*env
, target_ulong addr
,
562 int is_write
, int is_user
, int is_softmmu
)
564 /* user mode only emulation */
567 env
->error_code
= (is_write
<< PG_ERROR_W_BIT
);
568 env
->error_code
|= PG_ERROR_U_MASK
;
572 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
580 -1 = cannot handle fault
581 0 = nothing more to do
582 1 = generate PF fault
583 2 = soft MMU activation required for this block
585 int cpu_x86_handle_mmu_fault(CPUX86State
*env
, target_ulong addr
,
586 int is_write
, int is_user
, int is_softmmu
)
588 uint32_t pdpe_addr
, pde_addr
, pte_addr
;
589 uint32_t pde
, pte
, ptep
, pdpe
;
590 int error_code
, is_dirty
, prot
, page_size
, ret
;
591 unsigned long paddr
, page_offset
;
592 target_ulong vaddr
, virt_addr
;
594 #if defined(DEBUG_MMU)
595 printf("MMU fault: addr=" TARGET_FMT_lx
" w=%d u=%d eip=" TARGET_FMT_lx
"\n",
596 addr
, is_write
, is_user
, env
->eip
);
600 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
602 virt_addr
= addr
& TARGET_PAGE_MASK
;
603 prot
= PAGE_READ
| PAGE_WRITE
;
608 if (env
->cr
[4] & CR4_PAE_MASK
) {
609 /* XXX: we only use 32 bit physical addresses */
611 if (env
->hflags
& HF_LMA_MASK
) {
612 uint32_t pml4e_addr
, pml4e
;
615 /* XXX: handle user + rw rights */
616 /* XXX: handle NX flag */
617 /* test virtual address sign extension */
618 sext
= (int64_t)addr
>> 47;
619 if (sext
!= 0 && sext
!= -1) {
624 pml4e_addr
= ((env
->cr
[3] & ~0xfff) + (((addr
>> 39) & 0x1ff) << 3)) &
626 pml4e
= ldl_phys(pml4e_addr
);
627 if (!(pml4e
& PG_PRESENT_MASK
)) {
631 if (!(pml4e
& PG_ACCESSED_MASK
)) {
632 pml4e
|= PG_ACCESSED_MASK
;
633 stl_phys_notdirty(pml4e_addr
, pml4e
);
636 pdpe_addr
= ((pml4e
& ~0xfff) + (((addr
>> 30) & 0x1ff) << 3)) &
638 pdpe
= ldl_phys(pdpe_addr
);
639 if (!(pdpe
& PG_PRESENT_MASK
)) {
643 if (!(pdpe
& PG_ACCESSED_MASK
)) {
644 pdpe
|= PG_ACCESSED_MASK
;
645 stl_phys_notdirty(pdpe_addr
, pdpe
);
650 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 30) << 3)) &
652 pdpe
= ldl_phys(pdpe_addr
);
653 if (!(pdpe
& PG_PRESENT_MASK
)) {
659 pde_addr
= ((pdpe
& ~0xfff) + (((addr
>> 21) & 0x1ff) << 3)) &
661 pde
= ldl_phys(pde_addr
);
662 if (!(pde
& PG_PRESENT_MASK
)) {
666 if (pde
& PG_PSE_MASK
) {
668 page_size
= 2048 * 1024;
669 goto handle_big_page
;
672 if (!(pde
& PG_ACCESSED_MASK
)) {
673 pde
|= PG_ACCESSED_MASK
;
674 stl_phys_notdirty(pde_addr
, pde
);
676 pte_addr
= ((pde
& ~0xfff) + (((addr
>> 12) & 0x1ff) << 3)) &
681 /* page directory entry */
682 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & ~3)) &
684 pde
= ldl_phys(pde_addr
);
685 if (!(pde
& PG_PRESENT_MASK
)) {
689 /* if PSE bit is set, then we use a 4MB page */
690 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
691 page_size
= 4096 * 1024;
694 if (!(pde
& PG_USER_MASK
))
695 goto do_fault_protect
;
696 if (is_write
&& !(pde
& PG_RW_MASK
))
697 goto do_fault_protect
;
699 if ((env
->cr
[0] & CR0_WP_MASK
) &&
700 is_write
&& !(pde
& PG_RW_MASK
))
701 goto do_fault_protect
;
703 is_dirty
= is_write
&& !(pde
& PG_DIRTY_MASK
);
704 if (!(pde
& PG_ACCESSED_MASK
) || is_dirty
) {
705 pde
|= PG_ACCESSED_MASK
;
707 pde
|= PG_DIRTY_MASK
;
708 stl_phys_notdirty(pde_addr
, pde
);
711 pte
= pde
& ~( (page_size
- 1) & ~0xfff); /* align to page_size */
713 virt_addr
= addr
& ~(page_size
- 1);
715 if (!(pde
& PG_ACCESSED_MASK
)) {
716 pde
|= PG_ACCESSED_MASK
;
717 stl_phys_notdirty(pde_addr
, pde
);
720 /* page directory entry */
721 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) &
724 pte
= ldl_phys(pte_addr
);
725 if (!(pte
& PG_PRESENT_MASK
)) {
729 /* combine pde and pte user and rw protections */
732 if (!(ptep
& PG_USER_MASK
))
733 goto do_fault_protect
;
734 if (is_write
&& !(ptep
& PG_RW_MASK
))
735 goto do_fault_protect
;
737 if ((env
->cr
[0] & CR0_WP_MASK
) &&
738 is_write
&& !(ptep
& PG_RW_MASK
))
739 goto do_fault_protect
;
741 is_dirty
= is_write
&& !(pte
& PG_DIRTY_MASK
);
742 if (!(pte
& PG_ACCESSED_MASK
) || is_dirty
) {
743 pte
|= PG_ACCESSED_MASK
;
745 pte
|= PG_DIRTY_MASK
;
746 stl_phys_notdirty(pte_addr
, pte
);
749 virt_addr
= addr
& ~0xfff;
752 /* the page can be put in the TLB */
754 if (pte
& PG_DIRTY_MASK
) {
755 /* only set write access if already dirty... otherwise wait
758 if (ptep
& PG_RW_MASK
)
761 if (!(env
->cr
[0] & CR0_WP_MASK
) ||
768 pte
= pte
& env
->a20_mask
;
770 /* Even if 4MB pages, we map only one 4KB page in the cache to
771 avoid filling it too fast */
772 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
773 paddr
= (pte
& TARGET_PAGE_MASK
) + page_offset
;
774 vaddr
= virt_addr
+ page_offset
;
776 ret
= tlb_set_page(env
, vaddr
, paddr
, prot
, is_user
, is_softmmu
);
779 error_code
= PG_ERROR_P_MASK
;
782 env
->error_code
= (is_write
<< PG_ERROR_W_BIT
) | error_code
;
784 env
->error_code
|= PG_ERROR_U_MASK
;
788 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
790 uint32_t pde_addr
, pte_addr
;
791 uint32_t pde
, pte
, paddr
, page_offset
, page_size
;
793 if (env
->cr
[4] & CR4_PAE_MASK
) {
794 uint32_t pdpe_addr
, pde_addr
, pte_addr
;
797 /* XXX: we only use 32 bit physical addresses */
799 if (env
->hflags
& HF_LMA_MASK
) {
800 uint32_t pml4e_addr
, pml4e
;
803 /* test virtual address sign extension */
804 sext
= (int64_t)addr
>> 47;
805 if (sext
!= 0 && sext
!= -1)
808 pml4e_addr
= ((env
->cr
[3] & ~0xfff) + (((addr
>> 39) & 0x1ff) << 3)) &
810 pml4e
= ldl_phys(pml4e_addr
);
811 if (!(pml4e
& PG_PRESENT_MASK
))
814 pdpe_addr
= ((pml4e
& ~0xfff) + (((addr
>> 30) & 0x1ff) << 3)) &
816 pdpe
= ldl_phys(pdpe_addr
);
817 if (!(pdpe
& PG_PRESENT_MASK
))
822 pdpe_addr
= ((env
->cr
[3] & ~0x1f) + ((addr
>> 30) << 3)) &
824 pdpe
= ldl_phys(pdpe_addr
);
825 if (!(pdpe
& PG_PRESENT_MASK
))
829 pde_addr
= ((pdpe
& ~0xfff) + (((addr
>> 21) & 0x1ff) << 3)) &
831 pde
= ldl_phys(pde_addr
);
832 if (!(pde
& PG_PRESENT_MASK
)) {
835 if (pde
& PG_PSE_MASK
) {
837 page_size
= 2048 * 1024;
838 pte
= pde
& ~( (page_size
- 1) & ~0xfff); /* align to page_size */
841 pte_addr
= ((pde
& ~0xfff) + (((addr
>> 12) & 0x1ff) << 3)) &
844 pte
= ldl_phys(pte_addr
);
847 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
851 /* page directory entry */
852 pde_addr
= ((env
->cr
[3] & ~0xfff) + ((addr
>> 20) & ~3)) & env
->a20_mask
;
853 pde
= ldl_phys(pde_addr
);
854 if (!(pde
& PG_PRESENT_MASK
))
856 if ((pde
& PG_PSE_MASK
) && (env
->cr
[4] & CR4_PSE_MASK
)) {
857 pte
= pde
& ~0x003ff000; /* align to 4MB */
858 page_size
= 4096 * 1024;
860 /* page directory entry */
861 pte_addr
= ((pde
& ~0xfff) + ((addr
>> 10) & 0xffc)) & env
->a20_mask
;
862 pte
= ldl_phys(pte_addr
);
863 if (!(pte
& PG_PRESENT_MASK
))
868 pte
= pte
& env
->a20_mask
;
871 page_offset
= (addr
& TARGET_PAGE_MASK
) & (page_size
- 1);
872 paddr
= (pte
& TARGET_PAGE_MASK
) + page_offset
;
875 #endif /* !CONFIG_USER_ONLY */
877 #if defined(USE_CODE_COPY)
890 uint8_t fpregs1
[8 * 10];
893 void restore_native_fp_state(CPUState
*env
)
896 struct fpstate fp1
, *fp
= &fp1
;
898 fp
->fpuc
= env
->fpuc
;
899 fp
->fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
901 for (i
=7; i
>=0; i
--) {
903 if (env
->fptags
[i
]) {
906 /* the FPU automatically computes it */
911 for(i
= 0;i
< 8; i
++) {
912 memcpy(&fp
->fpregs1
[i
* 10], &env
->fpregs
[j
].d
, 10);
915 asm volatile ("frstor %0" : "=m" (*fp
));
916 env
->native_fp_regs
= 1;
919 void save_native_fp_state(CPUState
*env
)
923 struct fpstate fp1
, *fp
= &fp1
;
925 asm volatile ("fsave %0" : : "m" (*fp
));
926 env
->fpuc
= fp
->fpuc
;
927 env
->fpstt
= (fp
->fpus
>> 11) & 7;
928 env
->fpus
= fp
->fpus
& ~0x3800;
930 for(i
= 0;i
< 8; i
++) {
931 env
->fptags
[i
] = ((fptag
& 3) == 3);
935 for(i
= 0;i
< 8; i
++) {
936 memcpy(&env
->fpregs
[j
].d
, &fp
->fpregs1
[i
* 10], 10);
939 /* we must restore the default rounding state */
940 /* XXX: we do not restore the exception state */
941 fpuc
= 0x037f | (env
->fpuc
& (3 << 10));
942 asm volatile("fldcw %0" : : "m" (fpuc
));
943 env
->native_fp_regs
= 0;