]> git.proxmox.com Git - qemu.git/blob - exec-i386.c
mmap2 fix
[qemu.git] / exec-i386.c
1 /*
2 * i386 emulator main execution loop
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "exec-i386.h"
21 #include "disas.h"
22
23 //#define DEBUG_EXEC
24 //#define DEBUG_SIGNAL
25
26 /* main execution loop */
27
28 /* thread support */
29
30 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
31
32 void cpu_lock(void)
33 {
34 spin_lock(&global_cpu_lock);
35 }
36
37 void cpu_unlock(void)
38 {
39 spin_unlock(&global_cpu_lock);
40 }
41
42 /* exception support */
43 /* NOTE: not static to force relocation generation by GCC */
44 void raise_exception_err(int exception_index, int error_code)
45 {
46 /* NOTE: the register at this point must be saved by hand because
47 longjmp restore them */
48 #ifdef __sparc__
49 /* We have to stay in the same register window as our caller,
50 * thus this trick.
51 */
52 __asm__ __volatile__("restore\n\t"
53 "mov\t%o0, %i0");
54 #endif
55 #ifdef reg_EAX
56 env->regs[R_EAX] = EAX;
57 #endif
58 #ifdef reg_ECX
59 env->regs[R_ECX] = ECX;
60 #endif
61 #ifdef reg_EDX
62 env->regs[R_EDX] = EDX;
63 #endif
64 #ifdef reg_EBX
65 env->regs[R_EBX] = EBX;
66 #endif
67 #ifdef reg_ESP
68 env->regs[R_ESP] = ESP;
69 #endif
70 #ifdef reg_EBP
71 env->regs[R_EBP] = EBP;
72 #endif
73 #ifdef reg_ESI
74 env->regs[R_ESI] = ESI;
75 #endif
76 #ifdef reg_EDI
77 env->regs[R_EDI] = EDI;
78 #endif
79 env->exception_index = exception_index;
80 env->error_code = error_code;
81 longjmp(env->jmp_env, 1);
82 }
83
84 /* short cut if error_code is 0 or not present */
85 void raise_exception(int exception_index)
86 {
87 raise_exception_err(exception_index, 0);
88 }
89
90 int cpu_x86_exec(CPUX86State *env1)
91 {
92 int saved_T0, saved_T1, saved_A0;
93 CPUX86State *saved_env;
94 #ifdef reg_EAX
95 int saved_EAX;
96 #endif
97 #ifdef reg_ECX
98 int saved_ECX;
99 #endif
100 #ifdef reg_EDX
101 int saved_EDX;
102 #endif
103 #ifdef reg_EBX
104 int saved_EBX;
105 #endif
106 #ifdef reg_ESP
107 int saved_ESP;
108 #endif
109 #ifdef reg_EBP
110 int saved_EBP;
111 #endif
112 #ifdef reg_ESI
113 int saved_ESI;
114 #endif
115 #ifdef reg_EDI
116 int saved_EDI;
117 #endif
118 int code_gen_size, ret, code_size;
119 void (*gen_func)(void);
120 TranslationBlock *tb, **ptb;
121 uint8_t *tc_ptr, *cs_base, *pc;
122 unsigned int flags;
123
124 /* first we save global registers */
125 saved_T0 = T0;
126 saved_T1 = T1;
127 saved_A0 = A0;
128 saved_env = env;
129 env = env1;
130 #ifdef reg_EAX
131 saved_EAX = EAX;
132 EAX = env->regs[R_EAX];
133 #endif
134 #ifdef reg_ECX
135 saved_ECX = ECX;
136 ECX = env->regs[R_ECX];
137 #endif
138 #ifdef reg_EDX
139 saved_EDX = EDX;
140 EDX = env->regs[R_EDX];
141 #endif
142 #ifdef reg_EBX
143 saved_EBX = EBX;
144 EBX = env->regs[R_EBX];
145 #endif
146 #ifdef reg_ESP
147 saved_ESP = ESP;
148 ESP = env->regs[R_ESP];
149 #endif
150 #ifdef reg_EBP
151 saved_EBP = EBP;
152 EBP = env->regs[R_EBP];
153 #endif
154 #ifdef reg_ESI
155 saved_ESI = ESI;
156 ESI = env->regs[R_ESI];
157 #endif
158 #ifdef reg_EDI
159 saved_EDI = EDI;
160 EDI = env->regs[R_EDI];
161 #endif
162
163 /* put eflags in CPU temporary format */
164 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
165 DF = 1 - (2 * ((env->eflags >> 10) & 1));
166 CC_OP = CC_OP_EFLAGS;
167 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
168 env->interrupt_request = 0;
169
170 /* prepare setjmp context for exception handling */
171 if (setjmp(env->jmp_env) == 0) {
172 T0 = 0; /* force lookup of first TB */
173 for(;;) {
174 if (env->interrupt_request) {
175 raise_exception(EXCP_INTERRUPT);
176 }
177 #ifdef DEBUG_EXEC
178 if (loglevel) {
179 /* XXX: save all volatile state in cpu state */
180 /* restore flags in standard format */
181 env->regs[R_EAX] = EAX;
182 env->regs[R_EBX] = EBX;
183 env->regs[R_ECX] = ECX;
184 env->regs[R_EDX] = EDX;
185 env->regs[R_ESI] = ESI;
186 env->regs[R_EDI] = EDI;
187 env->regs[R_EBP] = EBP;
188 env->regs[R_ESP] = ESP;
189 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
190 cpu_x86_dump_state(env, logfile, 0);
191 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
192 }
193 #endif
194 /* we compute the CPU state. We assume it will not
195 change during the whole generated block. */
196 flags = env->seg_cache[R_CS].seg_32bit << GEN_FLAG_CODE32_SHIFT;
197 flags |= env->seg_cache[R_SS].seg_32bit << GEN_FLAG_SS32_SHIFT;
198 flags |= (((unsigned long)env->seg_cache[R_DS].base |
199 (unsigned long)env->seg_cache[R_ES].base |
200 (unsigned long)env->seg_cache[R_SS].base) != 0) <<
201 GEN_FLAG_ADDSEG_SHIFT;
202 if (!(env->eflags & VM_MASK)) {
203 flags |= (env->segs[R_CS] & 3) << GEN_FLAG_CPL_SHIFT;
204 } else {
205 /* NOTE: a dummy CPL is kept */
206 flags |= (1 << GEN_FLAG_VM_SHIFT);
207 flags |= (3 << GEN_FLAG_CPL_SHIFT);
208 }
209 flags |= (env->eflags & (IOPL_MASK | TF_MASK));
210 cs_base = env->seg_cache[R_CS].base;
211 pc = cs_base + env->eip;
212 tb = tb_find(&ptb, (unsigned long)pc, (unsigned long)cs_base,
213 flags);
214 if (!tb) {
215 spin_lock(&tb_lock);
216 /* if no translated code available, then translate it now */
217 tb = tb_alloc((unsigned long)pc);
218 if (!tb) {
219 /* flush must be done */
220 tb_flush();
221 /* cannot fail at this point */
222 tb = tb_alloc((unsigned long)pc);
223 /* don't forget to invalidate previous TB info */
224 ptb = &tb_hash[tb_hash_func((unsigned long)pc)];
225 T0 = 0;
226 }
227 tc_ptr = code_gen_ptr;
228 tb->tc_ptr = tc_ptr;
229 ret = cpu_x86_gen_code(code_gen_ptr, CODE_GEN_MAX_SIZE,
230 &code_gen_size, pc, cs_base, flags,
231 &code_size, tb);
232 /* if invalid instruction, signal it */
233 if (ret != 0) {
234 /* NOTE: the tb is allocated but not linked, so we
235 can leave it */
236 spin_unlock(&tb_lock);
237 raise_exception(EXCP06_ILLOP);
238 }
239 *ptb = tb;
240 tb->size = code_size;
241 tb->cs_base = (unsigned long)cs_base;
242 tb->flags = flags;
243 tb->hash_next = NULL;
244 tb_link(tb);
245 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
246 spin_unlock(&tb_lock);
247 }
248 #ifdef DEBUG_EXEC
249 if (loglevel) {
250 fprintf(logfile, "Trace 0x%08lx [0x%08lx] %s\n",
251 (long)tb->tc_ptr, (long)tb->pc,
252 lookup_symbol((void *)tb->pc));
253 }
254 #endif
255 /* see if we can patch the calling TB */
256 if (T0 != 0 && !(env->eflags & TF_MASK)) {
257 spin_lock(&tb_lock);
258 tb_add_jump((TranslationBlock *)(T0 & ~3), T0 & 3, tb);
259 spin_unlock(&tb_lock);
260 }
261
262 tc_ptr = tb->tc_ptr;
263
264 /* execute the generated code */
265 gen_func = (void *)tc_ptr;
266 #ifdef __sparc__
267 __asm__ __volatile__("call %0\n\t"
268 " mov %%o7,%%i0"
269 : /* no outputs */
270 : "r" (gen_func)
271 : "i0", "i1", "i2", "i3", "i4", "i5");
272 #else
273 gen_func();
274 #endif
275 }
276 }
277 ret = env->exception_index;
278
279 /* restore flags in standard format */
280 env->eflags = env->eflags | cc_table[CC_OP].compute_all() | (DF & DF_MASK);
281
282 /* restore global registers */
283 #ifdef reg_EAX
284 EAX = saved_EAX;
285 #endif
286 #ifdef reg_ECX
287 ECX = saved_ECX;
288 #endif
289 #ifdef reg_EDX
290 EDX = saved_EDX;
291 #endif
292 #ifdef reg_EBX
293 EBX = saved_EBX;
294 #endif
295 #ifdef reg_ESP
296 ESP = saved_ESP;
297 #endif
298 #ifdef reg_EBP
299 EBP = saved_EBP;
300 #endif
301 #ifdef reg_ESI
302 ESI = saved_ESI;
303 #endif
304 #ifdef reg_EDI
305 EDI = saved_EDI;
306 #endif
307 T0 = saved_T0;
308 T1 = saved_T1;
309 A0 = saved_A0;
310 env = saved_env;
311 return ret;
312 }
313
314 void cpu_x86_interrupt(CPUX86State *s)
315 {
316 s->interrupt_request = 1;
317 }
318
319
320 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
321 {
322 CPUX86State *saved_env;
323
324 saved_env = env;
325 env = s;
326 load_seg(seg_reg, selector);
327 env = saved_env;
328 }
329
330 #undef EAX
331 #undef ECX
332 #undef EDX
333 #undef EBX
334 #undef ESP
335 #undef EBP
336 #undef ESI
337 #undef EDI
338 #undef EIP
339 #include <signal.h>
340 #include <sys/ucontext.h>
341
342 /* 'pc' is the host PC at which the exception was raised. 'address' is
343 the effective address of the memory exception. 'is_write' is 1 if a
344 write caused the exception and otherwise 0'. 'old_set' is the
345 signal set which should be restored */
346 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
347 int is_write, sigset_t *old_set)
348 {
349 #if defined(DEBUG_SIGNAL)
350 printf("qemu: SIGSEGV pc=0x%08lx address=%08lx wr=%d oldset=0x%08lx\n",
351 pc, address, is_write, *(unsigned long *)old_set);
352 #endif
353 /* XXX: locking issue */
354 if (is_write && page_unprotect(address)) {
355 return 1;
356 }
357 if (pc >= (unsigned long)code_gen_buffer &&
358 pc < (unsigned long)code_gen_buffer + CODE_GEN_BUFFER_SIZE) {
359 /* the PC is inside the translated code. It means that we have
360 a virtual CPU fault */
361 /* we restore the process signal mask as the sigreturn should
362 do it */
363 sigprocmask(SIG_SETMASK, old_set, NULL);
364 /* XXX: need to compute virtual pc position by retranslating
365 code. The rest of the CPU state should be correct. */
366 env->cr2 = address;
367 raise_exception_err(EXCP0E_PAGE, 4 | (is_write << 1));
368 /* never comes here */
369 return 1;
370 } else {
371 return 0;
372 }
373 }
374
375 #if defined(__i386__)
376
377 int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
378 void *puc)
379 {
380 struct ucontext *uc = puc;
381 unsigned long pc;
382
383 #ifndef REG_EIP
384 /* for glibc 2.1 */
385 #define REG_EIP EIP
386 #define REG_ERR ERR
387 #define REG_TRAPNO TRAPNO
388 #endif
389 pc = uc->uc_mcontext.gregs[REG_EIP];
390 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
391 uc->uc_mcontext.gregs[REG_TRAPNO] == 0xe ?
392 (uc->uc_mcontext.gregs[REG_ERR] >> 1) & 1 : 0,
393 &uc->uc_sigmask);
394 }
395
396 #elif defined(__powerpc)
397
398 int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
399 void *puc)
400 {
401 struct ucontext *uc = puc;
402 struct pt_regs *regs = uc->uc_mcontext.regs;
403 unsigned long pc;
404 int is_write;
405
406 pc = regs->nip;
407 is_write = 0;
408 #if 0
409 /* ppc 4xx case */
410 if (regs->dsisr & 0x00800000)
411 is_write = 1;
412 #else
413 if (regs->trap != 0x400 && (regs->dsisr & 0x02000000))
414 is_write = 1;
415 #endif
416 return handle_cpu_signal(pc, (unsigned long)info->si_addr,
417 is_write, &uc->uc_sigmask);
418 }
419
420 #else
421
422 #error CPU specific signal handler needed
423
424 #endif