]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/helper2.c
experimental code copy support - fixed A20 emulation
[mirror_qemu.git] / target-i386 / helper2.c
1 /*
2 * i386 helpers (without register variable usage)
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
27 #include <sys/mman.h>
28
29 #include "cpu.h"
30 #include "exec-all.h"
31
32 //#define DEBUG_MMU
33
34 #ifdef USE_CODE_COPY
35 #include <asm/ldt.h>
36 #include <linux/unistd.h>
37
38 _syscall3(int, modify_ldt, int, func, void *, ptr, unsigned long, bytecount)
39 #endif
40
41 CPUX86State *cpu_x86_init(void)
42 {
43 CPUX86State *env;
44 int i;
45 static int inited;
46
47 cpu_exec_init();
48
49 env = malloc(sizeof(CPUX86State));
50 if (!env)
51 return NULL;
52 memset(env, 0, sizeof(CPUX86State));
53
54 /* init to reset state */
55
56 tlb_flush(env, 1);
57 #ifdef CONFIG_SOFTMMU
58 env->hflags |= HF_SOFTMMU_MASK;
59 #endif
60
61 cpu_x86_update_cr0(env, 0x60000010);
62 env->a20_mask = 0xffffffff;
63
64 env->idt.limit = 0xffff;
65 env->gdt.limit = 0xffff;
66 env->ldt.limit = 0xffff;
67 env->ldt.flags = DESC_P_MASK;
68 env->tr.limit = 0xffff;
69 env->tr.flags = DESC_P_MASK;
70
71 /* not correct (CS base=0xffff0000) */
72 cpu_x86_load_seg_cache(env, R_CS, 0xf000, (uint8_t *)0x000f0000, 0xffff, 0);
73 cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0xffff, 0);
74 cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0xffff, 0);
75 cpu_x86_load_seg_cache(env, R_SS, 0, NULL, 0xffff, 0);
76 cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0xffff, 0);
77 cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0xffff, 0);
78
79 env->eip = 0xfff0;
80 env->regs[R_EDX] = 0x600; /* indicate P6 processor */
81
82 env->eflags = 0x2;
83
84 /* FPU init */
85 for(i = 0;i < 8; i++)
86 env->fptags[i] = 1;
87 env->fpuc = 0x37f;
88
89 /* init various static tables */
90 if (!inited) {
91 inited = 1;
92 optimize_flags_init();
93 }
94 #ifdef USE_CODE_COPY
95 /* testing code for code copy case */
96 {
97 struct modify_ldt_ldt_s ldt;
98
99 ldt.entry_number = 1;
100 ldt.base_addr = (unsigned long)env;
101 ldt.limit = (sizeof(CPUState) + 0xfff) >> 12;
102 ldt.seg_32bit = 1;
103 ldt.contents = MODIFY_LDT_CONTENTS_DATA;
104 ldt.read_exec_only = 0;
105 ldt.limit_in_pages = 1;
106 ldt.seg_not_present = 0;
107 ldt.useable = 1;
108 modify_ldt(1, &ldt, sizeof(ldt)); /* write ldt entry */
109
110 asm volatile ("movl %0, %%fs" : : "r" ((1 << 3) | 7));
111 cpu_single_env = env;
112 }
113 #endif
114 return env;
115 }
116
117 void cpu_x86_close(CPUX86State *env)
118 {
119 free(env);
120 }
121
122 /***********************************************************/
123 /* x86 debug */
124
125 static const char *cc_op_str[] = {
126 "DYNAMIC",
127 "EFLAGS",
128 "MULB",
129 "MULW",
130 "MULL",
131 "ADDB",
132 "ADDW",
133 "ADDL",
134 "ADCB",
135 "ADCW",
136 "ADCL",
137 "SUBB",
138 "SUBW",
139 "SUBL",
140 "SBBB",
141 "SBBW",
142 "SBBL",
143 "LOGICB",
144 "LOGICW",
145 "LOGICL",
146 "INCB",
147 "INCW",
148 "INCL",
149 "DECB",
150 "DECW",
151 "DECL",
152 "SHLB",
153 "SHLW",
154 "SHLL",
155 "SARB",
156 "SARW",
157 "SARL",
158 };
159
160 void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags)
161 {
162 int eflags, i;
163 char cc_op_name[32];
164 static const char *seg_name[6] = { "ES", "CS", "SS", "DS", "FS", "GS" };
165
166 eflags = env->eflags;
167 fprintf(f, "EAX=%08x EBX=%08x ECX=%08x EDX=%08x\n"
168 "ESI=%08x EDI=%08x EBP=%08x ESP=%08x\n"
169 "EIP=%08x EFL=%08x [%c%c%c%c%c%c%c] CPL=%d II=%d\n",
170 env->regs[R_EAX], env->regs[R_EBX], env->regs[R_ECX], env->regs[R_EDX],
171 env->regs[R_ESI], env->regs[R_EDI], env->regs[R_EBP], env->regs[R_ESP],
172 env->eip, eflags,
173 eflags & DF_MASK ? 'D' : '-',
174 eflags & CC_O ? 'O' : '-',
175 eflags & CC_S ? 'S' : '-',
176 eflags & CC_Z ? 'Z' : '-',
177 eflags & CC_A ? 'A' : '-',
178 eflags & CC_P ? 'P' : '-',
179 eflags & CC_C ? 'C' : '-',
180 env->hflags & HF_CPL_MASK,
181 (env->hflags >> HF_INHIBIT_IRQ_SHIFT) & 1);
182 for(i = 0; i < 6; i++) {
183 SegmentCache *sc = &env->segs[i];
184 fprintf(f, "%s =%04x %08x %08x %08x\n",
185 seg_name[i],
186 sc->selector,
187 (int)sc->base,
188 sc->limit,
189 sc->flags);
190 }
191 fprintf(f, "LDT=%04x %08x %08x %08x\n",
192 env->ldt.selector,
193 (int)env->ldt.base,
194 env->ldt.limit,
195 env->ldt.flags);
196 fprintf(f, "TR =%04x %08x %08x %08x\n",
197 env->tr.selector,
198 (int)env->tr.base,
199 env->tr.limit,
200 env->tr.flags);
201 fprintf(f, "GDT= %08x %08x\n",
202 (int)env->gdt.base, env->gdt.limit);
203 fprintf(f, "IDT= %08x %08x\n",
204 (int)env->idt.base, env->idt.limit);
205 fprintf(f, "CR0=%08x CR2=%08x CR3=%08x CR4=%08x\n",
206 env->cr[0], env->cr[2], env->cr[3], env->cr[4]);
207
208 if (flags & X86_DUMP_CCOP) {
209 if ((unsigned)env->cc_op < CC_OP_NB)
210 strcpy(cc_op_name, cc_op_str[env->cc_op]);
211 else
212 snprintf(cc_op_name, sizeof(cc_op_name), "[%d]", env->cc_op);
213 fprintf(f, "CCS=%08x CCD=%08x CCO=%-8s\n",
214 env->cc_src, env->cc_dst, cc_op_name);
215 }
216 if (flags & X86_DUMP_FPU) {
217 fprintf(f, "ST0=%f ST1=%f ST2=%f ST3=%f\n",
218 (double)env->fpregs[0],
219 (double)env->fpregs[1],
220 (double)env->fpregs[2],
221 (double)env->fpregs[3]);
222 fprintf(f, "ST4=%f ST5=%f ST6=%f ST7=%f\n",
223 (double)env->fpregs[4],
224 (double)env->fpregs[5],
225 (double)env->fpregs[7],
226 (double)env->fpregs[8]);
227 }
228 }
229
230 /***********************************************************/
231 /* x86 mmu */
232 /* XXX: add PGE support */
233
234 void cpu_x86_set_a20(CPUX86State *env, int a20_state)
235 {
236 a20_state = (a20_state != 0);
237 if (a20_state != ((env->a20_mask >> 20) & 1)) {
238 #if defined(DEBUG_MMU)
239 printf("A20 update: a20=%d\n", a20_state);
240 #endif
241 /* if the cpu is currently executing code, we must unlink it and
242 all the potentially executing TB */
243 cpu_interrupt(env, CPU_INTERRUPT_EXITTB);
244
245 /* when a20 is changed, all the MMU mappings are invalid, so
246 we must flush everything */
247 tlb_flush(env, 1);
248 env->a20_mask = 0xffefffff | (a20_state << 20);
249 }
250 }
251
252 void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0)
253 {
254 int pe_state;
255
256 #if defined(DEBUG_MMU)
257 printf("CR0 update: CR0=0x%08x\n", new_cr0);
258 #endif
259 if ((new_cr0 & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK)) !=
260 (env->cr[0] & (CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK))) {
261 tlb_flush(env, 1);
262 }
263 env->cr[0] = new_cr0;
264
265 /* update PE flag in hidden flags */
266 pe_state = (env->cr[0] & CR0_PE_MASK);
267 env->hflags = (env->hflags & ~HF_PE_MASK) | (pe_state << HF_PE_SHIFT);
268 /* ensure that ADDSEG is always set in real mode */
269 env->hflags |= ((pe_state ^ 1) << HF_ADDSEG_SHIFT);
270 }
271
272 void cpu_x86_update_cr3(CPUX86State *env, uint32_t new_cr3)
273 {
274 env->cr[3] = new_cr3;
275 if (env->cr[0] & CR0_PG_MASK) {
276 #if defined(DEBUG_MMU)
277 printf("CR3 update: CR3=%08x\n", new_cr3);
278 #endif
279 tlb_flush(env, 0);
280 }
281 }
282
283 void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
284 {
285 #if defined(DEBUG_MMU)
286 printf("CR4 update: CR4=%08x\n", env->cr[4]);
287 #endif
288 if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) !=
289 (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) {
290 tlb_flush(env, 1);
291 }
292 env->cr[4] = new_cr4;
293 }
294
295 /* XXX: also flush 4MB pages */
296 void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr)
297 {
298 tlb_flush_page(env, addr);
299 }
300
301 /* return value:
302 -1 = cannot handle fault
303 0 = nothing more to do
304 1 = generate PF fault
305 2 = soft MMU activation required for this block
306 */
307 int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr,
308 int is_write, int is_user, int is_softmmu)
309 {
310 uint8_t *pde_ptr, *pte_ptr;
311 uint32_t pde, pte, virt_addr, ptep;
312 int error_code, is_dirty, prot, page_size, ret;
313 unsigned long paddr, vaddr, page_offset;
314
315 #if defined(DEBUG_MMU)
316 printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n",
317 addr, is_write, is_user, env->eip);
318 #endif
319
320 if (env->user_mode_only) {
321 /* user mode only emulation */
322 error_code = 0;
323 goto do_fault;
324 }
325
326 if (!(env->cr[0] & CR0_PG_MASK)) {
327 pte = addr;
328 virt_addr = addr & TARGET_PAGE_MASK;
329 prot = PROT_READ | PROT_WRITE;
330 page_size = 4096;
331 goto do_mapping;
332 }
333
334 /* page directory entry */
335 pde_ptr = phys_ram_base +
336 (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
337 pde = ldl_raw(pde_ptr);
338 if (!(pde & PG_PRESENT_MASK)) {
339 error_code = 0;
340 goto do_fault;
341 }
342 /* if PSE bit is set, then we use a 4MB page */
343 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
344 if (is_user) {
345 if (!(pde & PG_USER_MASK))
346 goto do_fault_protect;
347 if (is_write && !(pde & PG_RW_MASK))
348 goto do_fault_protect;
349 } else {
350 if ((env->cr[0] & CR0_WP_MASK) &&
351 is_write && !(pde & PG_RW_MASK))
352 goto do_fault_protect;
353 }
354 is_dirty = is_write && !(pde & PG_DIRTY_MASK);
355 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
356 pde |= PG_ACCESSED_MASK;
357 if (is_dirty)
358 pde |= PG_DIRTY_MASK;
359 stl_raw(pde_ptr, pde);
360 }
361
362 pte = pde & ~0x003ff000; /* align to 4MB */
363 ptep = pte;
364 page_size = 4096 * 1024;
365 virt_addr = addr & ~0x003fffff;
366 } else {
367 if (!(pde & PG_ACCESSED_MASK)) {
368 pde |= PG_ACCESSED_MASK;
369 stl_raw(pde_ptr, pde);
370 }
371
372 /* page directory entry */
373 pte_ptr = phys_ram_base +
374 (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
375 pte = ldl_raw(pte_ptr);
376 if (!(pte & PG_PRESENT_MASK)) {
377 error_code = 0;
378 goto do_fault;
379 }
380 /* combine pde and pte user and rw protections */
381 ptep = pte & pde;
382 if (is_user) {
383 if (!(ptep & PG_USER_MASK))
384 goto do_fault_protect;
385 if (is_write && !(ptep & PG_RW_MASK))
386 goto do_fault_protect;
387 } else {
388 if ((env->cr[0] & CR0_WP_MASK) &&
389 is_write && !(ptep & PG_RW_MASK))
390 goto do_fault_protect;
391 }
392 is_dirty = is_write && !(pte & PG_DIRTY_MASK);
393 if (!(pte & PG_ACCESSED_MASK) || is_dirty) {
394 pte |= PG_ACCESSED_MASK;
395 if (is_dirty)
396 pte |= PG_DIRTY_MASK;
397 stl_raw(pte_ptr, pte);
398 }
399 page_size = 4096;
400 virt_addr = addr & ~0xfff;
401 }
402
403 /* the page can be put in the TLB */
404 prot = PROT_READ;
405 if (pte & PG_DIRTY_MASK) {
406 /* only set write access if already dirty... otherwise wait
407 for dirty access */
408 if (is_user) {
409 if (ptep & PG_RW_MASK)
410 prot |= PROT_WRITE;
411 } else {
412 if (!(env->cr[0] & CR0_WP_MASK) ||
413 (ptep & PG_RW_MASK))
414 prot |= PROT_WRITE;
415 }
416 }
417
418 do_mapping:
419 pte = pte & env->a20_mask;
420
421 /* Even if 4MB pages, we map only one 4KB page in the cache to
422 avoid filling it too fast */
423 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
424 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
425 vaddr = virt_addr + page_offset;
426
427 ret = tlb_set_page(env, vaddr, paddr, prot, is_user, is_softmmu);
428 return ret;
429 do_fault_protect:
430 error_code = PG_ERROR_P_MASK;
431 do_fault:
432 env->cr[2] = addr;
433 env->error_code = (is_write << PG_ERROR_W_BIT) | error_code;
434 if (is_user)
435 env->error_code |= PG_ERROR_U_MASK;
436 return 1;
437 }
438
439 #if defined(CONFIG_USER_ONLY)
440 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
441 {
442 return addr;
443 }
444 #else
445 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
446 {
447 uint8_t *pde_ptr, *pte_ptr;
448 uint32_t pde, pte, paddr, page_offset, page_size;
449
450 if (!(env->cr[0] & CR0_PG_MASK)) {
451 pte = addr;
452 page_size = 4096;
453 } else {
454 /* page directory entry */
455 pde_ptr = phys_ram_base +
456 (((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)) & env->a20_mask);
457 pde = ldl_raw(pde_ptr);
458 if (!(pde & PG_PRESENT_MASK))
459 return -1;
460 if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) {
461 pte = pde & ~0x003ff000; /* align to 4MB */
462 page_size = 4096 * 1024;
463 } else {
464 /* page directory entry */
465 pte_ptr = phys_ram_base +
466 (((pde & ~0xfff) + ((addr >> 10) & 0xffc)) & env->a20_mask);
467 pte = ldl_raw(pte_ptr);
468 if (!(pte & PG_PRESENT_MASK))
469 return -1;
470 page_size = 4096;
471 }
472 }
473 pte = pte & env->a20_mask;
474 page_offset = (addr & TARGET_PAGE_MASK) & (page_size - 1);
475 paddr = (pte & TARGET_PAGE_MASK) + page_offset;
476 return paddr;
477 }
478 #endif