]> git.proxmox.com Git - qemu.git/blame - target-i386/helper.c
fixed PPC state reloading
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "exec.h"
21
f3f2d9be
FB
22//#define DEBUG_PCALL
23
2c0262af
FB
24const uint8_t parity_table[256] = {
25 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
26 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
30 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
31 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
32 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
33 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
34 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57};
58
59/* modulo 17 table */
60const uint8_t rclw_table[32] = {
61 0, 1, 2, 3, 4, 5, 6, 7,
62 8, 9,10,11,12,13,14,15,
63 16, 0, 1, 2, 3, 4, 5, 6,
64 7, 8, 9,10,11,12,13,14,
65};
66
67/* modulo 9 table */
68const uint8_t rclb_table[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 0, 1, 2, 3, 4, 5, 6,
71 7, 8, 0, 1, 2, 3, 4, 5,
72 6, 7, 8, 0, 1, 2, 3, 4,
73};
74
75const CPU86_LDouble f15rk[7] =
76{
77 0.00000000000000000000L,
78 1.00000000000000000000L,
79 3.14159265358979323851L, /*pi*/
80 0.30102999566398119523L, /*lg2*/
81 0.69314718055994530943L, /*ln2*/
82 1.44269504088896340739L, /*l2e*/
83 3.32192809488736234781L, /*l2t*/
84};
85
86/* thread support */
87
88spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
89
90void cpu_lock(void)
91{
92 spin_lock(&global_cpu_lock);
93}
94
95void cpu_unlock(void)
96{
97 spin_unlock(&global_cpu_lock);
98}
99
100void cpu_loop_exit(void)
101{
102 /* NOTE: the register at this point must be saved by hand because
103 longjmp restore them */
104#ifdef reg_EAX
105 env->regs[R_EAX] = EAX;
106#endif
107#ifdef reg_ECX
108 env->regs[R_ECX] = ECX;
109#endif
110#ifdef reg_EDX
111 env->regs[R_EDX] = EDX;
112#endif
113#ifdef reg_EBX
114 env->regs[R_EBX] = EBX;
115#endif
116#ifdef reg_ESP
117 env->regs[R_ESP] = ESP;
118#endif
119#ifdef reg_EBP
120 env->regs[R_EBP] = EBP;
121#endif
122#ifdef reg_ESI
123 env->regs[R_ESI] = ESI;
124#endif
125#ifdef reg_EDI
126 env->regs[R_EDI] = EDI;
127#endif
128 longjmp(env->jmp_env, 1);
129}
130
7e84c249
FB
131/* return non zero if error */
132static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
133 int selector)
134{
135 SegmentCache *dt;
136 int index;
137 uint8_t *ptr;
138
139 if (selector & 0x4)
140 dt = &env->ldt;
141 else
142 dt = &env->gdt;
143 index = selector & ~7;
144 if ((index + 7) > dt->limit)
145 return -1;
146 ptr = dt->base + index;
147 *e1_ptr = ldl_kernel(ptr);
148 *e2_ptr = ldl_kernel(ptr + 4);
149 return 0;
150}
151
152static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
153{
154 unsigned int limit;
155 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
156 if (e2 & DESC_G_MASK)
157 limit = (limit << 12) | 0xfff;
158 return limit;
159}
160
161static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
162{
163 return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
164}
165
166static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
167{
168 sc->base = get_seg_base(e1, e2);
169 sc->limit = get_seg_limit(e1, e2);
170 sc->flags = e2;
171}
172
173/* init the segment cache in vm86 mode. */
174static inline void load_seg_vm(int seg, int selector)
175{
176 selector &= 0xffff;
177 cpu_x86_load_seg_cache(env, seg, selector,
178 (uint8_t *)(selector << 4), 0xffff, 0);
179}
180
2c0262af
FB
181static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
182 uint32_t *esp_ptr, int dpl)
183{
184 int type, index, shift;
185
186#if 0
187 {
188 int i;
189 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
190 for(i=0;i<env->tr.limit;i++) {
191 printf("%02x ", env->tr.base[i]);
192 if ((i & 7) == 7) printf("\n");
193 }
194 printf("\n");
195 }
196#endif
197
198 if (!(env->tr.flags & DESC_P_MASK))
199 cpu_abort(env, "invalid tss");
200 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
201 if ((type & 7) != 1)
202 cpu_abort(env, "invalid tss type");
203 shift = type >> 3;
204 index = (dpl * 4 + 2) << shift;
205 if (index + (4 << shift) - 1 > env->tr.limit)
206 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
207 if (shift == 0) {
61382a50
FB
208 *esp_ptr = lduw_kernel(env->tr.base + index);
209 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
2c0262af 210 } else {
61382a50
FB
211 *esp_ptr = ldl_kernel(env->tr.base + index);
212 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
2c0262af
FB
213 }
214}
215
7e84c249
FB
216/* XXX: merge with load_seg() */
217static void tss_load_seg(int seg_reg, int selector)
218{
219 uint32_t e1, e2;
220 int rpl, dpl, cpl;
221
222 if ((selector & 0xfffc) != 0) {
223 if (load_segment(&e1, &e2, selector) != 0)
224 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225 if (!(e2 & DESC_S_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 rpl = selector & 3;
228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
229 cpl = env->hflags & HF_CPL_MASK;
230 if (seg_reg == R_CS) {
231 if (!(e2 & DESC_CS_MASK))
232 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 if (dpl != rpl)
234 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235 if ((e2 & DESC_C_MASK) && dpl > rpl)
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237
238 } else if (seg_reg == R_SS) {
239 /* SS must be writable data */
240 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 if (dpl != cpl || dpl != rpl)
243 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244 } else {
245 /* not readable code */
246 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
247 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248 /* if data or non conforming code, checks the rights */
249 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
250 if (dpl < cpl || dpl < rpl)
251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252 }
253 }
254 if (!(e2 & DESC_P_MASK))
255 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
256 cpu_x86_load_seg_cache(env, seg_reg, selector,
257 get_seg_base(e1, e2),
258 get_seg_limit(e1, e2),
259 e2);
260 } else {
261 if (seg_reg == R_SS || seg_reg == R_CS)
262 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
263 }
264}
265
266#define SWITCH_TSS_JMP 0
267#define SWITCH_TSS_IRET 1
268#define SWITCH_TSS_CALL 2
269
270/* XXX: restore CPU state in registers (PowerPC case) */
271static void switch_tss(int tss_selector,
272 uint32_t e1, uint32_t e2, int source)
2c0262af 273{
7e84c249
FB
274 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
275 uint8_t *tss_base;
276 uint32_t new_regs[8], new_segs[6];
277 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
278 uint32_t old_eflags, eflags_mask;
2c0262af
FB
279 SegmentCache *dt;
280 int index;
281 uint8_t *ptr;
282
7e84c249
FB
283 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
284
285 /* if task gate, we read the TSS segment and we load it */
286 if (type == 5) {
287 if (!(e2 & DESC_P_MASK))
288 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
289 tss_selector = e1 >> 16;
290 if (tss_selector & 4)
291 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
292 if (load_segment(&e1, &e2, tss_selector) != 0)
293 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
294 if (e2 & DESC_S_MASK)
295 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
296 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
297 if ((type & 7) != 1)
298 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
299 }
300
301 if (!(e2 & DESC_P_MASK))
302 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
303
304 if (type & 8)
305 tss_limit_max = 103;
2c0262af 306 else
7e84c249
FB
307 tss_limit_max = 43;
308 tss_limit = get_seg_limit(e1, e2);
309 tss_base = get_seg_base(e1, e2);
310 if ((tss_selector & 4) != 0 ||
311 tss_limit < tss_limit_max)
312 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
313 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
314 if (old_type & 8)
315 old_tss_limit_max = 103;
316 else
317 old_tss_limit_max = 43;
318
319 /* read all the registers from the new TSS */
320 if (type & 8) {
321 /* 32 bit */
322 new_cr3 = ldl_kernel(tss_base + 0x1c);
323 new_eip = ldl_kernel(tss_base + 0x20);
324 new_eflags = ldl_kernel(tss_base + 0x24);
325 for(i = 0; i < 8; i++)
326 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
327 for(i = 0; i < 6; i++)
328 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
329 new_ldt = lduw_kernel(tss_base + 0x60);
330 new_trap = ldl_kernel(tss_base + 0x64);
331 } else {
332 /* 16 bit */
333 new_cr3 = 0;
334 new_eip = lduw_kernel(tss_base + 0x0e);
335 new_eflags = lduw_kernel(tss_base + 0x10);
336 for(i = 0; i < 8; i++)
337 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
338 for(i = 0; i < 4; i++)
339 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
340 new_ldt = lduw_kernel(tss_base + 0x2a);
341 new_segs[R_FS] = 0;
342 new_segs[R_GS] = 0;
343 new_trap = 0;
344 }
345
346 /* NOTE: we must avoid memory exceptions during the task switch,
347 so we make dummy accesses before */
348 /* XXX: it can still fail in some cases, so a bigger hack is
349 necessary to valid the TLB after having done the accesses */
350
351 v1 = ldub_kernel(env->tr.base);
352 v2 = ldub(env->tr.base + old_tss_limit_max);
353 stb_kernel(env->tr.base, v1);
354 stb_kernel(env->tr.base + old_tss_limit_max, v2);
355
356 /* clear busy bit (it is restartable) */
357 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
358 uint8_t *ptr;
359 uint32_t e2;
360 ptr = env->gdt.base + (env->tr.selector << 3);
361 e2 = ldl_kernel(ptr + 4);
362 e2 &= ~DESC_TSS_BUSY_MASK;
363 stl_kernel(ptr + 4, e2);
364 }
365 old_eflags = compute_eflags();
366 if (source == SWITCH_TSS_IRET)
367 old_eflags &= ~NT_MASK;
368
369 /* save the current state in the old TSS */
370 if (type & 8) {
371 /* 32 bit */
372 stl_kernel(env->tr.base + 0x20, env->eip);
373 stl_kernel(env->tr.base + 0x24, old_eflags);
374 for(i = 0; i < 8; i++)
375 stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
376 for(i = 0; i < 6; i++)
377 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
378 } else {
379 /* 16 bit */
380 stw_kernel(env->tr.base + 0x0e, new_eip);
381 stw_kernel(env->tr.base + 0x10, old_eflags);
382 for(i = 0; i < 8; i++)
383 stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
384 for(i = 0; i < 4; i++)
385 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
386 }
387
388 /* now if an exception occurs, it will occurs in the next task
389 context */
390
391 if (source == SWITCH_TSS_CALL) {
392 stw_kernel(tss_base, env->tr.selector);
393 new_eflags |= NT_MASK;
394 }
395
396 /* set busy bit */
397 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
398 uint8_t *ptr;
399 uint32_t e2;
400 ptr = env->gdt.base + (tss_selector << 3);
401 e2 = ldl_kernel(ptr + 4);
402 e2 |= DESC_TSS_BUSY_MASK;
403 stl_kernel(ptr + 4, e2);
404 }
405
406 /* set the new CPU state */
407 /* from this point, any exception which occurs can give problems */
408 env->cr[0] |= CR0_TS_MASK;
409 env->tr.selector = tss_selector;
410 env->tr.base = tss_base;
411 env->tr.limit = tss_limit;
412 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
413
414 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
415 env->cr[3] = new_cr3;
416 cpu_x86_update_cr3(env);
417 }
418
419 /* load all registers without an exception, then reload them with
420 possible exception */
421 env->eip = new_eip;
422 eflags_mask = FL_UPDATE_CPL0_MASK;
423 if (!(type & 8))
424 eflags_mask &= 0xffff;
425 load_eflags(new_eflags, eflags_mask);
426 for(i = 0; i < 8; i++)
427 env->regs[i] = new_regs[i];
428 if (new_eflags & VM_MASK) {
429 for(i = 0; i < 6; i++)
430 load_seg_vm(i, new_segs[i]);
431 /* in vm86, CPL is always 3 */
432 cpu_x86_set_cpl(env, 3);
433 } else {
434 /* CPL is set the RPL of CS */
435 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
436 /* first just selectors as the rest may trigger exceptions */
437 for(i = 0; i < 6; i++)
438 cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
439 }
440
441 env->ldt.selector = new_ldt & ~4;
442 env->ldt.base = NULL;
443 env->ldt.limit = 0;
444 env->ldt.flags = 0;
445
446 /* load the LDT */
447 if (new_ldt & 4)
448 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
449
450 dt = &env->gdt;
451 index = new_ldt & ~7;
2c0262af 452 if ((index + 7) > dt->limit)
7e84c249 453 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
2c0262af 454 ptr = dt->base + index;
7e84c249
FB
455 e1 = ldl_kernel(ptr);
456 e2 = ldl_kernel(ptr + 4);
457 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
458 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459 if (!(e2 & DESC_P_MASK))
460 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
461 load_seg_cache_raw_dt(&env->ldt, e1, e2);
462
463 /* load the segments */
464 if (!(new_eflags & VM_MASK)) {
465 tss_load_seg(R_CS, new_segs[R_CS]);
466 tss_load_seg(R_SS, new_segs[R_SS]);
467 tss_load_seg(R_ES, new_segs[R_ES]);
468 tss_load_seg(R_DS, new_segs[R_DS]);
469 tss_load_seg(R_FS, new_segs[R_FS]);
470 tss_load_seg(R_GS, new_segs[R_GS]);
471 }
472
473 /* check that EIP is in the CS segment limits */
474 if (new_eip > env->segs[R_CS].limit) {
475 raise_exception_err(EXCP0D_GPF, 0);
476 }
2c0262af 477}
7e84c249
FB
478
479/* check if Port I/O is allowed in TSS */
480static inline void check_io(int addr, int size)
2c0262af 481{
7e84c249
FB
482 int io_offset, val, mask;
483
484 /* TSS must be a valid 32 bit one */
485 if (!(env->tr.flags & DESC_P_MASK) ||
486 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
487 env->tr.limit < 103)
488 goto fail;
489 io_offset = lduw_kernel(env->tr.base + 0x66);
490 io_offset += (addr >> 3);
491 /* Note: the check needs two bytes */
492 if ((io_offset + 1) > env->tr.limit)
493 goto fail;
494 val = lduw_kernel(env->tr.base + io_offset);
495 val >>= (addr & 7);
496 mask = (1 << size) - 1;
497 /* all bits must be zero to allow the I/O */
498 if ((val & mask) != 0) {
499 fail:
500 raise_exception_err(EXCP0D_GPF, 0);
501 }
2c0262af
FB
502}
503
7e84c249 504void check_iob_T0(void)
2c0262af 505{
7e84c249 506 check_io(T0, 1);
2c0262af
FB
507}
508
7e84c249 509void check_iow_T0(void)
2c0262af 510{
7e84c249 511 check_io(T0, 2);
2c0262af
FB
512}
513
7e84c249 514void check_iol_T0(void)
2c0262af 515{
7e84c249
FB
516 check_io(T0, 4);
517}
518
519void check_iob_DX(void)
520{
521 check_io(EDX & 0xffff, 1);
522}
523
524void check_iow_DX(void)
525{
526 check_io(EDX & 0xffff, 2);
527}
528
529void check_iol_DX(void)
530{
531 check_io(EDX & 0xffff, 4);
2c0262af
FB
532}
533
891b38e4
FB
534static inline unsigned int get_sp_mask(unsigned int e2)
535{
536 if (e2 & DESC_B_MASK)
537 return 0xffffffff;
538 else
539 return 0xffff;
540}
541
542/* XXX: add a is_user flag to have proper security support */
543#define PUSHW(ssp, sp, sp_mask, val)\
544{\
545 sp -= 2;\
546 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
547}
548
549#define PUSHL(ssp, sp, sp_mask, val)\
550{\
551 sp -= 4;\
552 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
553}
554
555#define POPW(ssp, sp, sp_mask, val)\
556{\
557 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
558 sp += 2;\
559}
560
561#define POPL(ssp, sp, sp_mask, val)\
562{\
563 val = ldl_kernel((ssp) + (sp & (sp_mask)));\
564 sp += 4;\
565}
566
2c0262af
FB
567/* protected mode interrupt */
568static void do_interrupt_protected(int intno, int is_int, int error_code,
569 unsigned int next_eip, int is_hw)
570{
571 SegmentCache *dt;
572 uint8_t *ptr, *ssp;
891b38e4 573 int type, dpl, selector, ss_dpl, cpl, sp_mask;
2c0262af 574 int has_error_code, new_stack, shift;
891b38e4
FB
575 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
576 uint32_t old_eip;
2c0262af 577
f3f2d9be
FB
578#ifdef DEBUG_PCALL
579 if (loglevel) {
580 static int count;
581 fprintf(logfile, "%d: interrupt: vector=%02x error_code=%04x int=%d CS:IP=%04x:%08x CPL=%d\n",
582 count, intno, error_code, is_int, env->segs[R_CS].selector, env->eip, env->hflags & 3);
583#if 0
584 {
585 int i;
586 uint8_t *ptr;
587 printf(" code=");
588 ptr = env->segs[R_CS].base + env->eip;
589 for(i = 0; i < 16; i++) {
590 printf(" %02x", ldub(ptr + i));
591 }
592 printf("\n");
593 }
594#endif
595 count++;
596 }
597#endif
598
7e84c249
FB
599 has_error_code = 0;
600 if (!is_int && !is_hw) {
601 switch(intno) {
602 case 8:
603 case 10:
604 case 11:
605 case 12:
606 case 13:
607 case 14:
608 case 17:
609 has_error_code = 1;
610 break;
611 }
612 }
613
2c0262af
FB
614 dt = &env->idt;
615 if (intno * 8 + 7 > dt->limit)
616 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
617 ptr = dt->base + intno * 8;
61382a50
FB
618 e1 = ldl_kernel(ptr);
619 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
620 /* check gate type */
621 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
622 switch(type) {
623 case 5: /* task gate */
7e84c249
FB
624 /* must do that check here to return the correct error code */
625 if (!(e2 & DESC_P_MASK))
626 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
627 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL);
628 if (has_error_code) {
629 int mask;
630 /* push the error code */
631 shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
632 if (env->segs[R_SS].flags & DESC_B_MASK)
633 mask = 0xffffffff;
634 else
635 mask = 0xffff;
636 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
637 ssp = env->segs[R_SS].base + esp;
638 if (shift)
639 stl_kernel(ssp, error_code);
640 else
641 stw_kernel(ssp, error_code);
642 env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask);
643 }
644 return;
2c0262af
FB
645 case 6: /* 286 interrupt gate */
646 case 7: /* 286 trap gate */
647 case 14: /* 386 interrupt gate */
648 case 15: /* 386 trap gate */
649 break;
650 default:
651 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
652 break;
653 }
654 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
655 cpl = env->hflags & HF_CPL_MASK;
656 /* check privledge if software int */
657 if (is_int && dpl < cpl)
658 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
659 /* check valid bit */
660 if (!(e2 & DESC_P_MASK))
661 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
662 selector = e1 >> 16;
663 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
664 if ((selector & 0xfffc) == 0)
665 raise_exception_err(EXCP0D_GPF, 0);
666
667 if (load_segment(&e1, &e2, selector) != 0)
668 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
669 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
670 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
671 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672 if (dpl > cpl)
673 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
674 if (!(e2 & DESC_P_MASK))
675 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
676 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
677 /* to inner priviledge */
678 get_ss_esp_from_tss(&ss, &esp, dpl);
679 if ((ss & 0xfffc) == 0)
680 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
681 if ((ss & 3) != dpl)
682 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
683 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
684 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
685 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
686 if (ss_dpl != dpl)
687 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
688 if (!(ss_e2 & DESC_S_MASK) ||
689 (ss_e2 & DESC_CS_MASK) ||
690 !(ss_e2 & DESC_W_MASK))
691 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
692 if (!(ss_e2 & DESC_P_MASK))
693 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
694 new_stack = 1;
891b38e4
FB
695 sp_mask = get_sp_mask(ss_e2);
696 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af
FB
697 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
698 /* to same priviledge */
699 new_stack = 0;
891b38e4
FB
700 sp_mask = get_sp_mask(env->segs[R_SS].flags);
701 ssp = env->segs[R_SS].base;
702 esp = ESP;
2c0262af
FB
703 } else {
704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705 new_stack = 0; /* avoid warning */
891b38e4
FB
706 sp_mask = 0; /* avoid warning */
707 ssp = NULL; /* avoid warning */
708 esp = 0; /* avoid warning */
2c0262af
FB
709 }
710
711 shift = type >> 3;
891b38e4
FB
712
713#if 0
714 /* XXX: check that enough room is available */
2c0262af
FB
715 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
716 if (env->eflags & VM_MASK)
717 push_size += 8;
718 push_size <<= shift;
891b38e4 719#endif
2c0262af
FB
720 if (is_int)
721 old_eip = next_eip;
722 else
723 old_eip = env->eip;
2c0262af 724 if (shift == 1) {
2c0262af 725 if (env->eflags & VM_MASK) {
891b38e4
FB
726 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
727 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
728 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
729 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
2c0262af
FB
730 }
731 if (new_stack) {
891b38e4
FB
732 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
733 PUSHL(ssp, esp, sp_mask, ESP);
2c0262af 734 }
891b38e4
FB
735 PUSHL(ssp, esp, sp_mask, compute_eflags());
736 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
737 PUSHL(ssp, esp, sp_mask, old_eip);
2c0262af 738 if (has_error_code) {
891b38e4 739 PUSHL(ssp, esp, sp_mask, error_code);
2c0262af
FB
740 }
741 } else {
742 if (new_stack) {
891b38e4
FB
743 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
744 PUSHW(ssp, esp, sp_mask, ESP);
2c0262af 745 }
891b38e4
FB
746 PUSHW(ssp, esp, sp_mask, compute_eflags());
747 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
748 PUSHW(ssp, esp, sp_mask, old_eip);
2c0262af 749 if (has_error_code) {
891b38e4 750 PUSHW(ssp, esp, sp_mask, error_code);
2c0262af
FB
751 }
752 }
753
891b38e4
FB
754 if (new_stack) {
755 ss = (ss & ~3) | dpl;
756 cpu_x86_load_seg_cache(env, R_SS, ss,
757 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
758 }
759 ESP = (ESP & ~sp_mask) | (esp & sp_mask);
760
761 selector = (selector & ~3) | dpl;
762 cpu_x86_load_seg_cache(env, R_CS, selector,
763 get_seg_base(e1, e2),
764 get_seg_limit(e1, e2),
765 e2);
766 cpu_x86_set_cpl(env, dpl);
767 env->eip = offset;
768
2c0262af
FB
769 /* interrupt gate clear IF mask */
770 if ((type & 1) == 0) {
771 env->eflags &= ~IF_MASK;
772 }
773 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
774}
775
776/* real mode interrupt */
777static void do_interrupt_real(int intno, int is_int, int error_code,
778 unsigned int next_eip)
779{
780 SegmentCache *dt;
781 uint8_t *ptr, *ssp;
782 int selector;
783 uint32_t offset, esp;
784 uint32_t old_cs, old_eip;
785
786 /* real mode (simpler !) */
787 dt = &env->idt;
788 if (intno * 4 + 3 > dt->limit)
789 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
790 ptr = dt->base + intno * 4;
61382a50
FB
791 offset = lduw_kernel(ptr);
792 selector = lduw_kernel(ptr + 2);
2c0262af
FB
793 esp = ESP;
794 ssp = env->segs[R_SS].base;
795 if (is_int)
796 old_eip = next_eip;
797 else
798 old_eip = env->eip;
799 old_cs = env->segs[R_CS].selector;
891b38e4
FB
800 /* XXX: use SS segment size ? */
801 PUSHW(ssp, esp, 0xffff, compute_eflags());
802 PUSHW(ssp, esp, 0xffff, old_cs);
803 PUSHW(ssp, esp, 0xffff, old_eip);
2c0262af
FB
804
805 /* update processor state */
806 ESP = (ESP & ~0xffff) | (esp & 0xffff);
807 env->eip = offset;
808 env->segs[R_CS].selector = selector;
809 env->segs[R_CS].base = (uint8_t *)(selector << 4);
810 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
811}
812
813/* fake user mode interrupt */
814void do_interrupt_user(int intno, int is_int, int error_code,
815 unsigned int next_eip)
816{
817 SegmentCache *dt;
818 uint8_t *ptr;
819 int dpl, cpl;
820 uint32_t e2;
821
822 dt = &env->idt;
823 ptr = dt->base + (intno * 8);
61382a50 824 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
825
826 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
827 cpl = env->hflags & HF_CPL_MASK;
828 /* check privledge if software int */
829 if (is_int && dpl < cpl)
830 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
831
832 /* Since we emulate only user space, we cannot do more than
833 exiting the emulation with the suitable exception and error
834 code */
835 if (is_int)
836 EIP = next_eip;
837}
838
839/*
840 * Begin excution of an interruption. is_int is TRUE if coming from
841 * the int instruction. next_eip is the EIP value AFTER the interrupt
842 * instruction. It is only relevant if is_int is TRUE.
843 */
844void do_interrupt(int intno, int is_int, int error_code,
845 unsigned int next_eip, int is_hw)
846{
847 if (env->cr[0] & CR0_PE_MASK) {
848 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
849 } else {
850 do_interrupt_real(intno, is_int, error_code, next_eip);
851 }
852}
853
854/*
855 * Signal an interruption. It is executed in the main CPU loop.
856 * is_int is TRUE if coming from the int instruction. next_eip is the
857 * EIP value AFTER the interrupt instruction. It is only relevant if
858 * is_int is TRUE.
859 */
860void raise_interrupt(int intno, int is_int, int error_code,
861 unsigned int next_eip)
862{
863 env->exception_index = intno;
864 env->error_code = error_code;
865 env->exception_is_int = is_int;
866 env->exception_next_eip = next_eip;
867 cpu_loop_exit();
868}
869
870/* shortcuts to generate exceptions */
871void raise_exception_err(int exception_index, int error_code)
872{
873 raise_interrupt(exception_index, 0, error_code, 0);
874}
875
876void raise_exception(int exception_index)
877{
878 raise_interrupt(exception_index, 0, 0, 0);
879}
880
881#ifdef BUGGY_GCC_DIV64
882/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
883 call it from another function */
884uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
885{
886 *q_ptr = num / den;
887 return num % den;
888}
889
890int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
891{
892 *q_ptr = num / den;
893 return num % den;
894}
895#endif
896
897void helper_divl_EAX_T0(uint32_t eip)
898{
899 unsigned int den, q, r;
900 uint64_t num;
901
902 num = EAX | ((uint64_t)EDX << 32);
903 den = T0;
904 if (den == 0) {
905 EIP = eip;
906 raise_exception(EXCP00_DIVZ);
907 }
908#ifdef BUGGY_GCC_DIV64
909 r = div64(&q, num, den);
910#else
911 q = (num / den);
912 r = (num % den);
913#endif
914 EAX = q;
915 EDX = r;
916}
917
918void helper_idivl_EAX_T0(uint32_t eip)
919{
920 int den, q, r;
921 int64_t num;
922
923 num = EAX | ((uint64_t)EDX << 32);
924 den = T0;
925 if (den == 0) {
926 EIP = eip;
927 raise_exception(EXCP00_DIVZ);
928 }
929#ifdef BUGGY_GCC_DIV64
930 r = idiv64(&q, num, den);
931#else
932 q = (num / den);
933 r = (num % den);
934#endif
935 EAX = q;
936 EDX = r;
937}
938
939void helper_cmpxchg8b(void)
940{
941 uint64_t d;
942 int eflags;
943
944 eflags = cc_table[CC_OP].compute_all();
945 d = ldq((uint8_t *)A0);
946 if (d == (((uint64_t)EDX << 32) | EAX)) {
947 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
948 eflags |= CC_Z;
949 } else {
950 EDX = d >> 32;
951 EAX = d;
952 eflags &= ~CC_Z;
953 }
954 CC_SRC = eflags;
955}
956
957/* We simulate a pre-MMX pentium as in valgrind */
958#define CPUID_FP87 (1 << 0)
959#define CPUID_VME (1 << 1)
960#define CPUID_DE (1 << 2)
961#define CPUID_PSE (1 << 3)
962#define CPUID_TSC (1 << 4)
963#define CPUID_MSR (1 << 5)
964#define CPUID_PAE (1 << 6)
965#define CPUID_MCE (1 << 7)
966#define CPUID_CX8 (1 << 8)
967#define CPUID_APIC (1 << 9)
968#define CPUID_SEP (1 << 11) /* sysenter/sysexit */
969#define CPUID_MTRR (1 << 12)
970#define CPUID_PGE (1 << 13)
971#define CPUID_MCA (1 << 14)
972#define CPUID_CMOV (1 << 15)
973/* ... */
974#define CPUID_MMX (1 << 23)
975#define CPUID_FXSR (1 << 24)
976#define CPUID_SSE (1 << 25)
977#define CPUID_SSE2 (1 << 26)
978
979void helper_cpuid(void)
980{
981 if (EAX == 0) {
982 EAX = 1; /* max EAX index supported */
983 EBX = 0x756e6547;
984 ECX = 0x6c65746e;
985 EDX = 0x49656e69;
986 } else if (EAX == 1) {
987 int family, model, stepping;
988 /* EAX = 1 info */
989#if 0
990 /* pentium 75-200 */
991 family = 5;
992 model = 2;
993 stepping = 11;
994#else
995 /* pentium pro */
996 family = 6;
997 model = 1;
998 stepping = 3;
999#endif
1000 EAX = (family << 8) | (model << 4) | stepping;
1001 EBX = 0;
1002 ECX = 0;
1003 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1004 CPUID_TSC | CPUID_MSR | CPUID_MCE |
1005 CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1006 }
1007}
1008
1009void helper_lldt_T0(void)
1010{
1011 int selector;
1012 SegmentCache *dt;
1013 uint32_t e1, e2;
1014 int index;
1015 uint8_t *ptr;
1016
1017 selector = T0 & 0xffff;
1018 if ((selector & 0xfffc) == 0) {
1019 /* XXX: NULL selector case: invalid LDT */
1020 env->ldt.base = NULL;
1021 env->ldt.limit = 0;
1022 } else {
1023 if (selector & 0x4)
1024 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1025 dt = &env->gdt;
1026 index = selector & ~7;
1027 if ((index + 7) > dt->limit)
1028 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1029 ptr = dt->base + index;
61382a50
FB
1030 e1 = ldl_kernel(ptr);
1031 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
1032 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1033 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1034 if (!(e2 & DESC_P_MASK))
1035 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1036 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1037 }
1038 env->ldt.selector = selector;
1039}
1040
1041void helper_ltr_T0(void)
1042{
1043 int selector;
1044 SegmentCache *dt;
1045 uint32_t e1, e2;
1046 int index, type;
1047 uint8_t *ptr;
1048
1049 selector = T0 & 0xffff;
1050 if ((selector & 0xfffc) == 0) {
1051 /* NULL selector case: invalid LDT */
1052 env->tr.base = NULL;
1053 env->tr.limit = 0;
1054 env->tr.flags = 0;
1055 } else {
1056 if (selector & 0x4)
1057 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1058 dt = &env->gdt;
1059 index = selector & ~7;
1060 if ((index + 7) > dt->limit)
1061 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1062 ptr = dt->base + index;
61382a50
FB
1063 e1 = ldl_kernel(ptr);
1064 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
1065 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1066 if ((e2 & DESC_S_MASK) ||
7e84c249 1067 (type != 1 && type != 9))
2c0262af
FB
1068 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1069 if (!(e2 & DESC_P_MASK))
1070 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1071 load_seg_cache_raw_dt(&env->tr, e1, e2);
1072 e2 |= 0x00000200; /* set the busy bit */
61382a50 1073 stl_kernel(ptr + 4, e2);
2c0262af
FB
1074 }
1075 env->tr.selector = selector;
1076}
1077
3ab493de 1078/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2c0262af
FB
1079void load_seg(int seg_reg, int selector, unsigned int cur_eip)
1080{
1081 uint32_t e1, e2;
3ab493de
FB
1082 int cpl, dpl, rpl;
1083 SegmentCache *dt;
1084 int index;
1085 uint8_t *ptr;
1086
2c0262af
FB
1087 if ((selector & 0xfffc) == 0) {
1088 /* null selector case */
1089 if (seg_reg == R_SS) {
1090 EIP = cur_eip;
1091 raise_exception_err(EXCP0D_GPF, 0);
1092 } else {
1093 cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
1094 }
1095 } else {
3ab493de
FB
1096
1097 if (selector & 0x4)
1098 dt = &env->ldt;
1099 else
1100 dt = &env->gdt;
1101 index = selector & ~7;
1102 if ((index + 7) > dt->limit) {
2c0262af
FB
1103 EIP = cur_eip;
1104 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1105 }
3ab493de
FB
1106 ptr = dt->base + index;
1107 e1 = ldl_kernel(ptr);
1108 e2 = ldl_kernel(ptr + 4);
1109
1110 if (!(e2 & DESC_S_MASK)) {
2c0262af
FB
1111 EIP = cur_eip;
1112 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1113 }
3ab493de
FB
1114 rpl = selector & 3;
1115 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1116 cpl = env->hflags & HF_CPL_MASK;
2c0262af 1117 if (seg_reg == R_SS) {
3ab493de 1118 /* must be writable segment */
7e84c249 1119 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
2c0262af
FB
1120 EIP = cur_eip;
1121 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1122 }
3ab493de
FB
1123 if (rpl != cpl || dpl != cpl) {
1124 EIP = cur_eip;
1125 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1126 }
2c0262af 1127 } else {
3ab493de 1128 /* must be readable segment */
2c0262af
FB
1129 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1130 EIP = cur_eip;
1131 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1132 }
3ab493de
FB
1133
1134 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1135 /* if not conforming code, test rights */
1136 if (dpl < cpl || dpl < rpl) {
1137 EIP = cur_eip;
1138 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1139 }
1140 }
2c0262af
FB
1141 }
1142
1143 if (!(e2 & DESC_P_MASK)) {
1144 EIP = cur_eip;
1145 if (seg_reg == R_SS)
1146 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1147 else
1148 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1149 }
3ab493de
FB
1150
1151 /* set the access bit if not already set */
1152 if (!(e2 & DESC_A_MASK)) {
1153 e2 |= DESC_A_MASK;
1154 stl_kernel(ptr + 4, e2);
1155 }
1156
2c0262af
FB
1157 cpu_x86_load_seg_cache(env, seg_reg, selector,
1158 get_seg_base(e1, e2),
1159 get_seg_limit(e1, e2),
1160 e2);
1161#if 0
1162 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1163 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1164#endif
1165 }
1166}
1167
1168/* protected mode jump */
1169void helper_ljmp_protected_T0_T1(void)
1170{
7e84c249 1171 int new_cs, new_eip, gate_cs, type;
2c0262af
FB
1172 uint32_t e1, e2, cpl, dpl, rpl, limit;
1173
1174 new_cs = T0;
1175 new_eip = T1;
1176 if ((new_cs & 0xfffc) == 0)
1177 raise_exception_err(EXCP0D_GPF, 0);
1178 if (load_segment(&e1, &e2, new_cs) != 0)
1179 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1180 cpl = env->hflags & HF_CPL_MASK;
1181 if (e2 & DESC_S_MASK) {
1182 if (!(e2 & DESC_CS_MASK))
1183 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1184 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 1185 if (e2 & DESC_C_MASK) {
2c0262af
FB
1186 /* conforming code segment */
1187 if (dpl > cpl)
1188 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1189 } else {
1190 /* non conforming code segment */
1191 rpl = new_cs & 3;
1192 if (rpl > cpl)
1193 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1194 if (dpl != cpl)
1195 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1196 }
1197 if (!(e2 & DESC_P_MASK))
1198 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1199 limit = get_seg_limit(e1, e2);
1200 if (new_eip > limit)
1201 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1202 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1203 get_seg_base(e1, e2), limit, e2);
1204 EIP = new_eip;
1205 } else {
7e84c249
FB
1206 /* jump to call or task gate */
1207 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1208 rpl = new_cs & 3;
1209 cpl = env->hflags & HF_CPL_MASK;
1210 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1211 switch(type) {
1212 case 1: /* 286 TSS */
1213 case 9: /* 386 TSS */
1214 case 5: /* task gate */
1215 if (dpl < cpl || dpl < rpl)
1216 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1217 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP);
1218 break;
1219 case 4: /* 286 call gate */
1220 case 12: /* 386 call gate */
1221 if ((dpl < cpl) || (dpl < rpl))
1222 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1223 if (!(e2 & DESC_P_MASK))
1224 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1225 gate_cs = e1 >> 16;
1226 if (load_segment(&e1, &e2, gate_cs) != 0)
1227 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1229 /* must be code segment */
1230 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1231 (DESC_S_MASK | DESC_CS_MASK)))
1232 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1233 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1234 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1235 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1236 if (!(e2 & DESC_P_MASK))
1237 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1238 new_eip = (e1 & 0xffff);
1239 if (type == 12)
1240 new_eip |= (e2 & 0xffff0000);
1241 limit = get_seg_limit(e1, e2);
1242 if (new_eip > limit)
1243 raise_exception_err(EXCP0D_GPF, 0);
1244 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1245 get_seg_base(e1, e2), limit, e2);
1246 EIP = new_eip;
1247 break;
1248 default:
1249 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1250 break;
1251 }
2c0262af
FB
1252 }
1253}
1254
1255/* real mode call */
1256void helper_lcall_real_T0_T1(int shift, int next_eip)
1257{
1258 int new_cs, new_eip;
1259 uint32_t esp, esp_mask;
1260 uint8_t *ssp;
1261
1262 new_cs = T0;
1263 new_eip = T1;
1264 esp = ESP;
891b38e4 1265 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af
FB
1266 ssp = env->segs[R_SS].base;
1267 if (shift) {
891b38e4
FB
1268 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1269 PUSHL(ssp, esp, esp_mask, next_eip);
2c0262af 1270 } else {
891b38e4
FB
1271 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1272 PUSHW(ssp, esp, esp_mask, next_eip);
2c0262af
FB
1273 }
1274
891b38e4 1275 ESP = (ESP & ~esp_mask) | (esp & esp_mask);
2c0262af
FB
1276 env->eip = new_eip;
1277 env->segs[R_CS].selector = new_cs;
1278 env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1279}
1280
1281/* protected mode call */
1282void helper_lcall_protected_T0_T1(int shift, int next_eip)
1283{
891b38e4 1284 int new_cs, new_eip, new_stack, i;
2c0262af 1285 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
891b38e4
FB
1286 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1287 uint32_t val, limit, old_sp_mask;
2c0262af
FB
1288 uint8_t *ssp, *old_ssp;
1289
1290 new_cs = T0;
1291 new_eip = T1;
f3f2d9be
FB
1292#ifdef DEBUG_PCALL
1293 if (loglevel) {
1294 fprintf(logfile, "lcall %04x:%08x\n",
1295 new_cs, new_eip);
1296 }
1297#endif
2c0262af
FB
1298 if ((new_cs & 0xfffc) == 0)
1299 raise_exception_err(EXCP0D_GPF, 0);
1300 if (load_segment(&e1, &e2, new_cs) != 0)
1301 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1302 cpl = env->hflags & HF_CPL_MASK;
f3f2d9be
FB
1303#ifdef DEBUG_PCALL
1304 if (loglevel) {
1305 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1306 }
1307#endif
2c0262af
FB
1308 if (e2 & DESC_S_MASK) {
1309 if (!(e2 & DESC_CS_MASK))
1310 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1311 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 1312 if (e2 & DESC_C_MASK) {
2c0262af
FB
1313 /* conforming code segment */
1314 if (dpl > cpl)
1315 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1316 } else {
1317 /* non conforming code segment */
1318 rpl = new_cs & 3;
1319 if (rpl > cpl)
1320 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1321 if (dpl != cpl)
1322 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1323 }
1324 if (!(e2 & DESC_P_MASK))
1325 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1326
1327 sp = ESP;
891b38e4
FB
1328 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1329 ssp = env->segs[R_SS].base;
2c0262af 1330 if (shift) {
891b38e4
FB
1331 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1332 PUSHL(ssp, sp, sp_mask, next_eip);
2c0262af 1333 } else {
891b38e4
FB
1334 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1335 PUSHW(ssp, sp, sp_mask, next_eip);
2c0262af 1336 }
2c0262af
FB
1337
1338 limit = get_seg_limit(e1, e2);
1339 if (new_eip > limit)
1340 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1341 /* from this point, not restartable */
891b38e4 1342 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
1343 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1344 get_seg_base(e1, e2), limit, e2);
1345 EIP = new_eip;
1346 } else {
1347 /* check gate type */
1348 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
7e84c249
FB
1349 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1350 rpl = new_cs & 3;
2c0262af
FB
1351 switch(type) {
1352 case 1: /* available 286 TSS */
1353 case 9: /* available 386 TSS */
1354 case 5: /* task gate */
7e84c249
FB
1355 if (dpl < cpl || dpl < rpl)
1356 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1357 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL);
2c0262af
FB
1358 break;
1359 case 4: /* 286 call gate */
1360 case 12: /* 386 call gate */
1361 break;
1362 default:
1363 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1364 break;
1365 }
1366 shift = type >> 3;
1367
2c0262af
FB
1368 if (dpl < cpl || dpl < rpl)
1369 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1370 /* check valid bit */
1371 if (!(e2 & DESC_P_MASK))
1372 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1373 selector = e1 >> 16;
1374 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
f3f2d9be 1375 param_count = e2 & 0x1f;
2c0262af
FB
1376 if ((selector & 0xfffc) == 0)
1377 raise_exception_err(EXCP0D_GPF, 0);
1378
1379 if (load_segment(&e1, &e2, selector) != 0)
1380 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1381 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1382 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1383 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1384 if (dpl > cpl)
1385 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1386 if (!(e2 & DESC_P_MASK))
1387 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1388
1389 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1390 /* to inner priviledge */
1391 get_ss_esp_from_tss(&ss, &sp, dpl);
f3f2d9be
FB
1392#ifdef DEBUG_PCALL
1393 if (loglevel)
1394 fprintf(logfile, "ss=%04x sp=%04x param_count=%d ESP=%x\n",
1395 ss, sp, param_count, ESP);
1396#endif
2c0262af
FB
1397 if ((ss & 0xfffc) == 0)
1398 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1399 if ((ss & 3) != dpl)
1400 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1401 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1402 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1403 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1404 if (ss_dpl != dpl)
1405 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1406 if (!(ss_e2 & DESC_S_MASK) ||
1407 (ss_e2 & DESC_CS_MASK) ||
1408 !(ss_e2 & DESC_W_MASK))
1409 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1410 if (!(ss_e2 & DESC_P_MASK))
1411 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1412
891b38e4 1413 // push_size = ((param_count * 2) + 8) << shift;
2c0262af 1414
891b38e4
FB
1415 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1416 old_ssp = env->segs[R_SS].base;
2c0262af 1417
891b38e4
FB
1418 sp_mask = get_sp_mask(ss_e2);
1419 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 1420 if (shift) {
891b38e4
FB
1421 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1422 PUSHL(ssp, sp, sp_mask, ESP);
1423 for(i = param_count - 1; i >= 0; i--) {
1424 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1425 PUSHL(ssp, sp, sp_mask, val);
2c0262af
FB
1426 }
1427 } else {
891b38e4
FB
1428 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1429 PUSHW(ssp, sp, sp_mask, ESP);
1430 for(i = param_count - 1; i >= 0; i--) {
1431 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1432 PUSHW(ssp, sp, sp_mask, val);
2c0262af
FB
1433 }
1434 }
891b38e4 1435 new_stack = 1;
2c0262af
FB
1436 } else {
1437 /* to same priviledge */
891b38e4
FB
1438 sp = ESP;
1439 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1440 ssp = env->segs[R_SS].base;
1441 // push_size = (4 << shift);
1442 new_stack = 0;
2c0262af
FB
1443 }
1444
1445 if (shift) {
891b38e4
FB
1446 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1447 PUSHL(ssp, sp, sp_mask, next_eip);
2c0262af 1448 } else {
891b38e4
FB
1449 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1450 PUSHW(ssp, sp, sp_mask, next_eip);
1451 }
1452
1453 /* from this point, not restartable */
1454
1455 if (new_stack) {
1456 ss = (ss & ~3) | dpl;
1457 cpu_x86_load_seg_cache(env, R_SS, ss,
1458 ssp,
1459 get_seg_limit(ss_e1, ss_e2),
1460 ss_e2);
2c0262af
FB
1461 }
1462
2c0262af
FB
1463 selector = (selector & ~3) | dpl;
1464 cpu_x86_load_seg_cache(env, R_CS, selector,
1465 get_seg_base(e1, e2),
1466 get_seg_limit(e1, e2),
1467 e2);
1468 cpu_x86_set_cpl(env, dpl);
891b38e4 1469 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
1470 EIP = offset;
1471 }
1472}
1473
7e84c249 1474/* real and vm86 mode iret */
2c0262af
FB
1475void helper_iret_real(int shift)
1476{
891b38e4 1477 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2c0262af
FB
1478 uint8_t *ssp;
1479 int eflags_mask;
7e84c249 1480
891b38e4
FB
1481 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1482 sp = ESP;
1483 ssp = env->segs[R_SS].base;
2c0262af
FB
1484 if (shift == 1) {
1485 /* 32 bits */
891b38e4
FB
1486 POPL(ssp, sp, sp_mask, new_eip);
1487 POPL(ssp, sp, sp_mask, new_cs);
1488 new_cs &= 0xffff;
1489 POPL(ssp, sp, sp_mask, new_eflags);
2c0262af
FB
1490 } else {
1491 /* 16 bits */
891b38e4
FB
1492 POPW(ssp, sp, sp_mask, new_eip);
1493 POPW(ssp, sp, sp_mask, new_cs);
1494 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 1495 }
891b38e4 1496 ESP = (ESP & ~sp_mask) | (sp & 0xffff);
2c0262af
FB
1497 load_seg_vm(R_CS, new_cs);
1498 env->eip = new_eip;
7e84c249
FB
1499 if (env->eflags & VM_MASK)
1500 eflags_mask = FL_UPDATE_MASK32 | IF_MASK | RF_MASK;
1501 else
1502 eflags_mask = FL_UPDATE_CPL0_MASK;
2c0262af
FB
1503 if (shift == 0)
1504 eflags_mask &= 0xffff;
1505 load_eflags(new_eflags, eflags_mask);
1506}
1507
1508/* protected mode iret */
1509static inline void helper_ret_protected(int shift, int is_iret, int addend)
1510{
891b38e4 1511 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
2c0262af
FB
1512 uint32_t new_es, new_ds, new_fs, new_gs;
1513 uint32_t e1, e2, ss_e1, ss_e2;
1514 int cpl, dpl, rpl, eflags_mask;
1515 uint8_t *ssp;
1516
891b38e4 1517 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af 1518 sp = ESP;
891b38e4 1519 ssp = env->segs[R_SS].base;
2c0262af
FB
1520 if (shift == 1) {
1521 /* 32 bits */
891b38e4
FB
1522 POPL(ssp, sp, sp_mask, new_eip);
1523 POPL(ssp, sp, sp_mask, new_cs);
1524 new_cs &= 0xffff;
1525 if (is_iret) {
1526 POPL(ssp, sp, sp_mask, new_eflags);
1527 if (new_eflags & VM_MASK)
1528 goto return_to_vm86;
1529 }
2c0262af
FB
1530 } else {
1531 /* 16 bits */
891b38e4
FB
1532 POPW(ssp, sp, sp_mask, new_eip);
1533 POPW(ssp, sp, sp_mask, new_cs);
2c0262af 1534 if (is_iret)
891b38e4 1535 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 1536 }
891b38e4
FB
1537#ifdef DEBUG_PCALL
1538 if (loglevel) {
1539 fprintf(logfile, "lret new %04x:%08x\n",
1540 new_cs, new_eip);
1541 }
1542#endif
2c0262af
FB
1543 if ((new_cs & 0xfffc) == 0)
1544 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1545 if (load_segment(&e1, &e2, new_cs) != 0)
1546 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1547 if (!(e2 & DESC_S_MASK) ||
1548 !(e2 & DESC_CS_MASK))
1549 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1550 cpl = env->hflags & HF_CPL_MASK;
1551 rpl = new_cs & 3;
1552 if (rpl < cpl)
1553 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1554 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 1555 if (e2 & DESC_C_MASK) {
2c0262af
FB
1556 if (dpl > rpl)
1557 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1558 } else {
1559 if (dpl != rpl)
1560 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1561 }
1562 if (!(e2 & DESC_P_MASK))
1563 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1564
891b38e4 1565 sp += addend;
2c0262af
FB
1566 if (rpl == cpl) {
1567 /* return to same priledge level */
1568 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1569 get_seg_base(e1, e2),
1570 get_seg_limit(e1, e2),
1571 e2);
2c0262af
FB
1572 } else {
1573 /* return to different priviledge level */
2c0262af
FB
1574 if (shift == 1) {
1575 /* 32 bits */
891b38e4
FB
1576 POPL(ssp, sp, sp_mask, new_esp);
1577 POPL(ssp, sp, sp_mask, new_ss);
1578 new_ss &= 0xffff;
2c0262af
FB
1579 } else {
1580 /* 16 bits */
891b38e4
FB
1581 POPW(ssp, sp, sp_mask, new_esp);
1582 POPW(ssp, sp, sp_mask, new_ss);
2c0262af
FB
1583 }
1584
1585 if ((new_ss & 3) != rpl)
1586 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1587 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1588 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1589 if (!(ss_e2 & DESC_S_MASK) ||
1590 (ss_e2 & DESC_CS_MASK) ||
1591 !(ss_e2 & DESC_W_MASK))
1592 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1593 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1594 if (dpl != rpl)
1595 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1596 if (!(ss_e2 & DESC_P_MASK))
1597 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1598
1599 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1600 get_seg_base(e1, e2),
1601 get_seg_limit(e1, e2),
1602 e2);
1603 cpu_x86_load_seg_cache(env, R_SS, new_ss,
1604 get_seg_base(ss_e1, ss_e2),
1605 get_seg_limit(ss_e1, ss_e2),
1606 ss_e2);
1607 cpu_x86_set_cpl(env, rpl);
891b38e4
FB
1608 sp = new_esp;
1609 /* XXX: change sp_mask according to old segment ? */
2c0262af 1610 }
891b38e4 1611 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
1612 env->eip = new_eip;
1613 if (is_iret) {
1614 /* NOTE: 'cpl' can be different from the current CPL */
1615 if (cpl == 0)
1616 eflags_mask = FL_UPDATE_CPL0_MASK;
1617 else
1618 eflags_mask = FL_UPDATE_MASK32;
1619 if (shift == 0)
1620 eflags_mask &= 0xffff;
1621 load_eflags(new_eflags, eflags_mask);
1622 }
1623 return;
1624
1625 return_to_vm86:
891b38e4
FB
1626 POPL(ssp, sp, sp_mask, new_esp);
1627 POPL(ssp, sp, sp_mask, new_ss);
1628 POPL(ssp, sp, sp_mask, new_es);
1629 POPL(ssp, sp, sp_mask, new_ds);
1630 POPL(ssp, sp, sp_mask, new_fs);
1631 POPL(ssp, sp, sp_mask, new_gs);
2c0262af
FB
1632
1633 /* modify processor state */
1634 load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
891b38e4 1635 load_seg_vm(R_CS, new_cs & 0xffff);
2c0262af 1636 cpu_x86_set_cpl(env, 3);
891b38e4
FB
1637 load_seg_vm(R_SS, new_ss & 0xffff);
1638 load_seg_vm(R_ES, new_es & 0xffff);
1639 load_seg_vm(R_DS, new_ds & 0xffff);
1640 load_seg_vm(R_FS, new_fs & 0xffff);
1641 load_seg_vm(R_GS, new_gs & 0xffff);
2c0262af
FB
1642
1643 env->eip = new_eip;
1644 ESP = new_esp;
1645}
1646
1647void helper_iret_protected(int shift)
1648{
7e84c249
FB
1649 int tss_selector, type;
1650 uint32_t e1, e2;
1651
1652 /* specific case for TSS */
1653 if (env->eflags & NT_MASK) {
1654 tss_selector = lduw_kernel(env->tr.base + 0);
1655 if (tss_selector & 4)
1656 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1657 if (load_segment(&e1, &e2, tss_selector) != 0)
1658 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1659 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1660 /* NOTE: we check both segment and busy TSS */
1661 if (type != 3)
1662 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1663 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET);
1664 } else {
1665 helper_ret_protected(shift, 1, 0);
1666 }
2c0262af
FB
1667}
1668
1669void helper_lret_protected(int shift, int addend)
1670{
1671 helper_ret_protected(shift, 0, addend);
1672}
1673
1674void helper_movl_crN_T0(int reg)
1675{
1676 env->cr[reg] = T0;
1677 switch(reg) {
1678 case 0:
1679 cpu_x86_update_cr0(env);
1680 break;
1681 case 3:
1682 cpu_x86_update_cr3(env);
1683 break;
1684 }
1685}
1686
1687/* XXX: do more */
1688void helper_movl_drN_T0(int reg)
1689{
1690 env->dr[reg] = T0;
1691}
1692
1693void helper_invlpg(unsigned int addr)
1694{
1695 cpu_x86_flush_tlb(env, addr);
1696}
1697
1698/* rdtsc */
1699#ifndef __i386__
1700uint64_t emu_time;
1701#endif
1702
1703void helper_rdtsc(void)
1704{
1705 uint64_t val;
1706#ifdef __i386__
1707 asm("rdtsc" : "=A" (val));
1708#else
1709 /* better than nothing: the time increases */
1710 val = emu_time++;
1711#endif
1712 EAX = val;
1713 EDX = val >> 32;
1714}
1715
1716void helper_wrmsr(void)
1717{
1718 switch(ECX) {
1719 case MSR_IA32_SYSENTER_CS:
1720 env->sysenter_cs = EAX & 0xffff;
1721 break;
1722 case MSR_IA32_SYSENTER_ESP:
1723 env->sysenter_esp = EAX;
1724 break;
1725 case MSR_IA32_SYSENTER_EIP:
1726 env->sysenter_eip = EAX;
1727 break;
1728 default:
1729 /* XXX: exception ? */
1730 break;
1731 }
1732}
1733
1734void helper_rdmsr(void)
1735{
1736 switch(ECX) {
1737 case MSR_IA32_SYSENTER_CS:
1738 EAX = env->sysenter_cs;
1739 EDX = 0;
1740 break;
1741 case MSR_IA32_SYSENTER_ESP:
1742 EAX = env->sysenter_esp;
1743 EDX = 0;
1744 break;
1745 case MSR_IA32_SYSENTER_EIP:
1746 EAX = env->sysenter_eip;
1747 EDX = 0;
1748 break;
1749 default:
1750 /* XXX: exception ? */
1751 break;
1752 }
1753}
1754
1755void helper_lsl(void)
1756{
1757 unsigned int selector, limit;
1758 uint32_t e1, e2;
3ab493de 1759 int rpl, dpl, cpl, type;
2c0262af
FB
1760
1761 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1762 selector = T0 & 0xffff;
1763 if (load_segment(&e1, &e2, selector) != 0)
1764 return;
3ab493de
FB
1765 rpl = selector & 3;
1766 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1767 cpl = env->hflags & HF_CPL_MASK;
1768 if (e2 & DESC_S_MASK) {
1769 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1770 /* conforming */
1771 } else {
1772 if (dpl < cpl || dpl < rpl)
1773 return;
1774 }
1775 } else {
1776 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1777 switch(type) {
1778 case 1:
1779 case 2:
1780 case 3:
1781 case 9:
1782 case 11:
1783 break;
1784 default:
1785 return;
1786 }
1787 if (dpl < cpl || dpl < rpl)
1788 return;
1789 }
1790 limit = get_seg_limit(e1, e2);
2c0262af
FB
1791 T1 = limit;
1792 CC_SRC |= CC_Z;
1793}
1794
1795void helper_lar(void)
1796{
1797 unsigned int selector;
1798 uint32_t e1, e2;
3ab493de 1799 int rpl, dpl, cpl, type;
2c0262af
FB
1800
1801 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1802 selector = T0 & 0xffff;
3ab493de
FB
1803 if ((selector & 0xfffc) == 0)
1804 return;
2c0262af
FB
1805 if (load_segment(&e1, &e2, selector) != 0)
1806 return;
3ab493de
FB
1807 rpl = selector & 3;
1808 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1809 cpl = env->hflags & HF_CPL_MASK;
1810 if (e2 & DESC_S_MASK) {
1811 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1812 /* conforming */
1813 } else {
1814 if (dpl < cpl || dpl < rpl)
1815 return;
1816 }
1817 } else {
1818 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1819 switch(type) {
1820 case 1:
1821 case 2:
1822 case 3:
1823 case 4:
1824 case 5:
1825 case 9:
1826 case 11:
1827 case 12:
1828 break;
1829 default:
1830 return;
1831 }
1832 if (dpl < cpl || dpl < rpl)
1833 return;
1834 }
2c0262af
FB
1835 T1 = e2 & 0x00f0ff00;
1836 CC_SRC |= CC_Z;
1837}
1838
3ab493de
FB
1839void helper_verr(void)
1840{
1841 unsigned int selector;
1842 uint32_t e1, e2;
1843 int rpl, dpl, cpl;
1844
1845 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1846 selector = T0 & 0xffff;
1847 if ((selector & 0xfffc) == 0)
1848 return;
1849 if (load_segment(&e1, &e2, selector) != 0)
1850 return;
1851 if (!(e2 & DESC_S_MASK))
1852 return;
1853 rpl = selector & 3;
1854 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1855 cpl = env->hflags & HF_CPL_MASK;
1856 if (e2 & DESC_CS_MASK) {
1857 if (!(e2 & DESC_R_MASK))
1858 return;
1859 if (!(e2 & DESC_C_MASK)) {
1860 if (dpl < cpl || dpl < rpl)
1861 return;
1862 }
1863 } else {
1864 if (dpl < cpl || dpl < rpl)
1865 return;
1866 }
f3f2d9be 1867 CC_SRC |= CC_Z;
3ab493de
FB
1868}
1869
1870void helper_verw(void)
1871{
1872 unsigned int selector;
1873 uint32_t e1, e2;
1874 int rpl, dpl, cpl;
1875
1876 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1877 selector = T0 & 0xffff;
1878 if ((selector & 0xfffc) == 0)
1879 return;
1880 if (load_segment(&e1, &e2, selector) != 0)
1881 return;
1882 if (!(e2 & DESC_S_MASK))
1883 return;
1884 rpl = selector & 3;
1885 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1886 cpl = env->hflags & HF_CPL_MASK;
1887 if (e2 & DESC_CS_MASK) {
1888 return;
1889 } else {
1890 if (dpl < cpl || dpl < rpl)
1891 return;
1892 if (!(e2 & DESC_W_MASK))
1893 return;
1894 }
f3f2d9be 1895 CC_SRC |= CC_Z;
3ab493de
FB
1896}
1897
2c0262af
FB
1898/* FPU helpers */
1899
2c0262af
FB
1900void helper_fldt_ST0_A0(void)
1901{
1902 int new_fpstt;
1903 new_fpstt = (env->fpstt - 1) & 7;
1904 env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1905 env->fpstt = new_fpstt;
1906 env->fptags[new_fpstt] = 0; /* validate stack entry */
1907}
1908
1909void helper_fstt_ST0_A0(void)
1910{
1911 helper_fstt(ST0, (uint8_t *)A0);
1912}
2c0262af
FB
1913
1914/* BCD ops */
1915
1916#define MUL10(iv) ( iv + iv + (iv << 3) )
1917
1918void helper_fbld_ST0_A0(void)
1919{
1920 CPU86_LDouble tmp;
1921 uint64_t val;
1922 unsigned int v;
1923 int i;
1924
1925 val = 0;
1926 for(i = 8; i >= 0; i--) {
1927 v = ldub((uint8_t *)A0 + i);
1928 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1929 }
1930 tmp = val;
1931 if (ldub((uint8_t *)A0 + 9) & 0x80)
1932 tmp = -tmp;
1933 fpush();
1934 ST0 = tmp;
1935}
1936
1937void helper_fbst_ST0_A0(void)
1938{
1939 CPU86_LDouble tmp;
1940 int v;
1941 uint8_t *mem_ref, *mem_end;
1942 int64_t val;
1943
1944 tmp = rint(ST0);
1945 val = (int64_t)tmp;
1946 mem_ref = (uint8_t *)A0;
1947 mem_end = mem_ref + 9;
1948 if (val < 0) {
1949 stb(mem_end, 0x80);
1950 val = -val;
1951 } else {
1952 stb(mem_end, 0x00);
1953 }
1954 while (mem_ref < mem_end) {
1955 if (val == 0)
1956 break;
1957 v = val % 100;
1958 val = val / 100;
1959 v = ((v / 10) << 4) | (v % 10);
1960 stb(mem_ref++, v);
1961 }
1962 while (mem_ref < mem_end) {
1963 stb(mem_ref++, 0);
1964 }
1965}
1966
1967void helper_f2xm1(void)
1968{
1969 ST0 = pow(2.0,ST0) - 1.0;
1970}
1971
1972void helper_fyl2x(void)
1973{
1974 CPU86_LDouble fptemp;
1975
1976 fptemp = ST0;
1977 if (fptemp>0.0){
1978 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
1979 ST1 *= fptemp;
1980 fpop();
1981 } else {
1982 env->fpus &= (~0x4700);
1983 env->fpus |= 0x400;
1984 }
1985}
1986
1987void helper_fptan(void)
1988{
1989 CPU86_LDouble fptemp;
1990
1991 fptemp = ST0;
1992 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1993 env->fpus |= 0x400;
1994 } else {
1995 ST0 = tan(fptemp);
1996 fpush();
1997 ST0 = 1.0;
1998 env->fpus &= (~0x400); /* C2 <-- 0 */
1999 /* the above code is for |arg| < 2**52 only */
2000 }
2001}
2002
2003void helper_fpatan(void)
2004{
2005 CPU86_LDouble fptemp, fpsrcop;
2006
2007 fpsrcop = ST1;
2008 fptemp = ST0;
2009 ST1 = atan2(fpsrcop,fptemp);
2010 fpop();
2011}
2012
2013void helper_fxtract(void)
2014{
2015 CPU86_LDoubleU temp;
2016 unsigned int expdif;
2017
2018 temp.d = ST0;
2019 expdif = EXPD(temp) - EXPBIAS;
2020 /*DP exponent bias*/
2021 ST0 = expdif;
2022 fpush();
2023 BIASEXPONENT(temp);
2024 ST0 = temp.d;
2025}
2026
2027void helper_fprem1(void)
2028{
2029 CPU86_LDouble dblq, fpsrcop, fptemp;
2030 CPU86_LDoubleU fpsrcop1, fptemp1;
2031 int expdif;
2032 int q;
2033
2034 fpsrcop = ST0;
2035 fptemp = ST1;
2036 fpsrcop1.d = fpsrcop;
2037 fptemp1.d = fptemp;
2038 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2039 if (expdif < 53) {
2040 dblq = fpsrcop / fptemp;
2041 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2042 ST0 = fpsrcop - fptemp*dblq;
2043 q = (int)dblq; /* cutting off top bits is assumed here */
2044 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2045 /* (C0,C1,C3) <-- (q2,q1,q0) */
2046 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2047 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2048 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2049 } else {
2050 env->fpus |= 0x400; /* C2 <-- 1 */
2051 fptemp = pow(2.0, expdif-50);
2052 fpsrcop = (ST0 / ST1) / fptemp;
2053 /* fpsrcop = integer obtained by rounding to the nearest */
2054 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2055 floor(fpsrcop): ceil(fpsrcop);
2056 ST0 -= (ST1 * fpsrcop * fptemp);
2057 }
2058}
2059
2060void helper_fprem(void)
2061{
2062 CPU86_LDouble dblq, fpsrcop, fptemp;
2063 CPU86_LDoubleU fpsrcop1, fptemp1;
2064 int expdif;
2065 int q;
2066
2067 fpsrcop = ST0;
2068 fptemp = ST1;
2069 fpsrcop1.d = fpsrcop;
2070 fptemp1.d = fptemp;
2071 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2072 if ( expdif < 53 ) {
2073 dblq = fpsrcop / fptemp;
2074 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2075 ST0 = fpsrcop - fptemp*dblq;
2076 q = (int)dblq; /* cutting off top bits is assumed here */
2077 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2078 /* (C0,C1,C3) <-- (q2,q1,q0) */
2079 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2080 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2081 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2082 } else {
2083 env->fpus |= 0x400; /* C2 <-- 1 */
2084 fptemp = pow(2.0, expdif-50);
2085 fpsrcop = (ST0 / ST1) / fptemp;
2086 /* fpsrcop = integer obtained by chopping */
2087 fpsrcop = (fpsrcop < 0.0)?
2088 -(floor(fabs(fpsrcop))): floor(fpsrcop);
2089 ST0 -= (ST1 * fpsrcop * fptemp);
2090 }
2091}
2092
2093void helper_fyl2xp1(void)
2094{
2095 CPU86_LDouble fptemp;
2096
2097 fptemp = ST0;
2098 if ((fptemp+1.0)>0.0) {
2099 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2100 ST1 *= fptemp;
2101 fpop();
2102 } else {
2103 env->fpus &= (~0x4700);
2104 env->fpus |= 0x400;
2105 }
2106}
2107
2108void helper_fsqrt(void)
2109{
2110 CPU86_LDouble fptemp;
2111
2112 fptemp = ST0;
2113 if (fptemp<0.0) {
2114 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2115 env->fpus |= 0x400;
2116 }
2117 ST0 = sqrt(fptemp);
2118}
2119
2120void helper_fsincos(void)
2121{
2122 CPU86_LDouble fptemp;
2123
2124 fptemp = ST0;
2125 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2126 env->fpus |= 0x400;
2127 } else {
2128 ST0 = sin(fptemp);
2129 fpush();
2130 ST0 = cos(fptemp);
2131 env->fpus &= (~0x400); /* C2 <-- 0 */
2132 /* the above code is for |arg| < 2**63 only */
2133 }
2134}
2135
2136void helper_frndint(void)
2137{
2138 CPU86_LDouble a;
2139
2140 a = ST0;
2141#ifdef __arm__
2142 switch(env->fpuc & RC_MASK) {
2143 default:
2144 case RC_NEAR:
2145 asm("rndd %0, %1" : "=f" (a) : "f"(a));
2146 break;
2147 case RC_DOWN:
2148 asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2149 break;
2150 case RC_UP:
2151 asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2152 break;
2153 case RC_CHOP:
2154 asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2155 break;
2156 }
2157#else
2158 a = rint(a);
2159#endif
2160 ST0 = a;
2161}
2162
2163void helper_fscale(void)
2164{
2165 CPU86_LDouble fpsrcop, fptemp;
2166
2167 fpsrcop = 2.0;
2168 fptemp = pow(fpsrcop,ST1);
2169 ST0 *= fptemp;
2170}
2171
2172void helper_fsin(void)
2173{
2174 CPU86_LDouble fptemp;
2175
2176 fptemp = ST0;
2177 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2178 env->fpus |= 0x400;
2179 } else {
2180 ST0 = sin(fptemp);
2181 env->fpus &= (~0x400); /* C2 <-- 0 */
2182 /* the above code is for |arg| < 2**53 only */
2183 }
2184}
2185
2186void helper_fcos(void)
2187{
2188 CPU86_LDouble fptemp;
2189
2190 fptemp = ST0;
2191 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2192 env->fpus |= 0x400;
2193 } else {
2194 ST0 = cos(fptemp);
2195 env->fpus &= (~0x400); /* C2 <-- 0 */
2196 /* the above code is for |arg5 < 2**63 only */
2197 }
2198}
2199
2200void helper_fxam_ST0(void)
2201{
2202 CPU86_LDoubleU temp;
2203 int expdif;
2204
2205 temp.d = ST0;
2206
2207 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2208 if (SIGND(temp))
2209 env->fpus |= 0x200; /* C1 <-- 1 */
2210
2211 expdif = EXPD(temp);
2212 if (expdif == MAXEXPD) {
2213 if (MANTD(temp) == 0)
2214 env->fpus |= 0x500 /*Infinity*/;
2215 else
2216 env->fpus |= 0x100 /*NaN*/;
2217 } else if (expdif == 0) {
2218 if (MANTD(temp) == 0)
2219 env->fpus |= 0x4000 /*Zero*/;
2220 else
2221 env->fpus |= 0x4400 /*Denormal*/;
2222 } else {
2223 env->fpus |= 0x400;
2224 }
2225}
2226
2227void helper_fstenv(uint8_t *ptr, int data32)
2228{
2229 int fpus, fptag, exp, i;
2230 uint64_t mant;
2231 CPU86_LDoubleU tmp;
2232
2233 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2234 fptag = 0;
2235 for (i=7; i>=0; i--) {
2236 fptag <<= 2;
2237 if (env->fptags[i]) {
2238 fptag |= 3;
2239 } else {
2240 tmp.d = env->fpregs[i];
2241 exp = EXPD(tmp);
2242 mant = MANTD(tmp);
2243 if (exp == 0 && mant == 0) {
2244 /* zero */
2245 fptag |= 1;
2246 } else if (exp == 0 || exp == MAXEXPD
2247#ifdef USE_X86LDOUBLE
2248 || (mant & (1LL << 63)) == 0
2249#endif
2250 ) {
2251 /* NaNs, infinity, denormal */
2252 fptag |= 2;
2253 }
2254 }
2255 }
2256 if (data32) {
2257 /* 32 bit */
2258 stl(ptr, env->fpuc);
2259 stl(ptr + 4, fpus);
2260 stl(ptr + 8, fptag);
2261 stl(ptr + 12, 0);
2262 stl(ptr + 16, 0);
2263 stl(ptr + 20, 0);
2264 stl(ptr + 24, 0);
2265 } else {
2266 /* 16 bit */
2267 stw(ptr, env->fpuc);
2268 stw(ptr + 2, fpus);
2269 stw(ptr + 4, fptag);
2270 stw(ptr + 6, 0);
2271 stw(ptr + 8, 0);
2272 stw(ptr + 10, 0);
2273 stw(ptr + 12, 0);
2274 }
2275}
2276
2277void helper_fldenv(uint8_t *ptr, int data32)
2278{
2279 int i, fpus, fptag;
2280
2281 if (data32) {
2282 env->fpuc = lduw(ptr);
2283 fpus = lduw(ptr + 4);
2284 fptag = lduw(ptr + 8);
2285 }
2286 else {
2287 env->fpuc = lduw(ptr);
2288 fpus = lduw(ptr + 2);
2289 fptag = lduw(ptr + 4);
2290 }
2291 env->fpstt = (fpus >> 11) & 7;
2292 env->fpus = fpus & ~0x3800;
2293 for(i = 0;i < 7; i++) {
2294 env->fptags[i] = ((fptag & 3) == 3);
2295 fptag >>= 2;
2296 }
2297}
2298
2299void helper_fsave(uint8_t *ptr, int data32)
2300{
2301 CPU86_LDouble tmp;
2302 int i;
2303
2304 helper_fstenv(ptr, data32);
2305
2306 ptr += (14 << data32);
2307 for(i = 0;i < 8; i++) {
2308 tmp = ST(i);
2c0262af 2309 helper_fstt(tmp, ptr);
2c0262af
FB
2310 ptr += 10;
2311 }
2312
2313 /* fninit */
2314 env->fpus = 0;
2315 env->fpstt = 0;
2316 env->fpuc = 0x37f;
2317 env->fptags[0] = 1;
2318 env->fptags[1] = 1;
2319 env->fptags[2] = 1;
2320 env->fptags[3] = 1;
2321 env->fptags[4] = 1;
2322 env->fptags[5] = 1;
2323 env->fptags[6] = 1;
2324 env->fptags[7] = 1;
2325}
2326
2327void helper_frstor(uint8_t *ptr, int data32)
2328{
2329 CPU86_LDouble tmp;
2330 int i;
2331
2332 helper_fldenv(ptr, data32);
2333 ptr += (14 << data32);
2334
2335 for(i = 0;i < 8; i++) {
2c0262af 2336 tmp = helper_fldt(ptr);
2c0262af
FB
2337 ST(i) = tmp;
2338 ptr += 10;
2339 }
2340}
2341
61382a50
FB
2342#if !defined(CONFIG_USER_ONLY)
2343
2344#define MMUSUFFIX _mmu
2345#define GETPC() (__builtin_return_address(0))
2346
2c0262af
FB
2347#define SHIFT 0
2348#include "softmmu_template.h"
2349
2350#define SHIFT 1
2351#include "softmmu_template.h"
2352
2353#define SHIFT 2
2354#include "softmmu_template.h"
2355
2356#define SHIFT 3
2357#include "softmmu_template.h"
2358
61382a50
FB
2359#endif
2360
2361/* try to fill the TLB and return an exception if error. If retaddr is
2362 NULL, it means that the function was called in C code (i.e. not
2363 from generated code or from helper.c) */
2364/* XXX: fix it to restore all registers */
2365void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2c0262af
FB
2366{
2367 TranslationBlock *tb;
2368 int ret;
2369 unsigned long pc;
61382a50
FB
2370 CPUX86State *saved_env;
2371
2372 /* XXX: hack to restore env in all cases, even if not called from
2373 generated code */
2374 saved_env = env;
2375 env = cpu_single_env;
2376 if (is_write && page_unprotect(addr)) {
2377 /* nothing more to do: the page was write protected because
2378 there was code in it. page_unprotect() flushed the code. */
2379 }
2380
2381 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2c0262af 2382 if (ret) {
61382a50
FB
2383 if (retaddr) {
2384 /* now we have a real cpu fault */
2385 pc = (unsigned long)retaddr;
2386 tb = tb_find_pc(pc);
2387 if (tb) {
2388 /* the PC is inside the translated code. It means that we have
2389 a virtual CPU fault */
2390 cpu_restore_state(tb, env, pc);
2391 }
2c0262af
FB
2392 }
2393 raise_exception_err(EXCP0E_PAGE, env->error_code);
2394 }
61382a50 2395 env = saved_env;
2c0262af 2396}