]> git.proxmox.com Git - qemu.git/blame - target-i386/helper.c
disabled signal hacks for softmmu version (qemu should be much more portable now...)
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "exec.h"
21
22const uint8_t parity_table[256] = {
23 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
24 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
25 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
26 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
30 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
31 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
32 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
33 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55};
56
57/* modulo 17 table */
58const uint8_t rclw_table[32] = {
59 0, 1, 2, 3, 4, 5, 6, 7,
60 8, 9,10,11,12,13,14,15,
61 16, 0, 1, 2, 3, 4, 5, 6,
62 7, 8, 9,10,11,12,13,14,
63};
64
65/* modulo 9 table */
66const uint8_t rclb_table[32] = {
67 0, 1, 2, 3, 4, 5, 6, 7,
68 8, 0, 1, 2, 3, 4, 5, 6,
69 7, 8, 0, 1, 2, 3, 4, 5,
70 6, 7, 8, 0, 1, 2, 3, 4,
71};
72
73const CPU86_LDouble f15rk[7] =
74{
75 0.00000000000000000000L,
76 1.00000000000000000000L,
77 3.14159265358979323851L, /*pi*/
78 0.30102999566398119523L, /*lg2*/
79 0.69314718055994530943L, /*ln2*/
80 1.44269504088896340739L, /*l2e*/
81 3.32192809488736234781L, /*l2t*/
82};
83
84/* thread support */
85
86spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
87
88void cpu_lock(void)
89{
90 spin_lock(&global_cpu_lock);
91}
92
93void cpu_unlock(void)
94{
95 spin_unlock(&global_cpu_lock);
96}
97
98void cpu_loop_exit(void)
99{
100 /* NOTE: the register at this point must be saved by hand because
101 longjmp restore them */
102#ifdef reg_EAX
103 env->regs[R_EAX] = EAX;
104#endif
105#ifdef reg_ECX
106 env->regs[R_ECX] = ECX;
107#endif
108#ifdef reg_EDX
109 env->regs[R_EDX] = EDX;
110#endif
111#ifdef reg_EBX
112 env->regs[R_EBX] = EBX;
113#endif
114#ifdef reg_ESP
115 env->regs[R_ESP] = ESP;
116#endif
117#ifdef reg_EBP
118 env->regs[R_EBP] = EBP;
119#endif
120#ifdef reg_ESI
121 env->regs[R_ESI] = ESI;
122#endif
123#ifdef reg_EDI
124 env->regs[R_EDI] = EDI;
125#endif
126 longjmp(env->jmp_env, 1);
127}
128
129static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
130 uint32_t *esp_ptr, int dpl)
131{
132 int type, index, shift;
133
134#if 0
135 {
136 int i;
137 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
138 for(i=0;i<env->tr.limit;i++) {
139 printf("%02x ", env->tr.base[i]);
140 if ((i & 7) == 7) printf("\n");
141 }
142 printf("\n");
143 }
144#endif
145
146 if (!(env->tr.flags & DESC_P_MASK))
147 cpu_abort(env, "invalid tss");
148 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
149 if ((type & 7) != 1)
150 cpu_abort(env, "invalid tss type");
151 shift = type >> 3;
152 index = (dpl * 4 + 2) << shift;
153 if (index + (4 << shift) - 1 > env->tr.limit)
154 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
155 if (shift == 0) {
156 *esp_ptr = lduw(env->tr.base + index);
157 *ss_ptr = lduw(env->tr.base + index + 2);
158 } else {
159 *esp_ptr = ldl(env->tr.base + index);
160 *ss_ptr = lduw(env->tr.base + index + 4);
161 }
162}
163
164/* return non zero if error */
165static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
166 int selector)
167{
168 SegmentCache *dt;
169 int index;
170 uint8_t *ptr;
171
172 if (selector & 0x4)
173 dt = &env->ldt;
174 else
175 dt = &env->gdt;
176 index = selector & ~7;
177 if ((index + 7) > dt->limit)
178 return -1;
179 ptr = dt->base + index;
180 *e1_ptr = ldl(ptr);
181 *e2_ptr = ldl(ptr + 4);
182 return 0;
183}
184
185static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
186{
187 unsigned int limit;
188 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
189 if (e2 & DESC_G_MASK)
190 limit = (limit << 12) | 0xfff;
191 return limit;
192}
193
194static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
195{
196 return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
197}
198
199static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
200{
201 sc->base = get_seg_base(e1, e2);
202 sc->limit = get_seg_limit(e1, e2);
203 sc->flags = e2;
204}
205
206/* init the segment cache in vm86 mode. */
207static inline void load_seg_vm(int seg, int selector)
208{
209 selector &= 0xffff;
210 cpu_x86_load_seg_cache(env, seg, selector,
211 (uint8_t *)(selector << 4), 0xffff, 0);
212}
213
214/* protected mode interrupt */
215static void do_interrupt_protected(int intno, int is_int, int error_code,
216 unsigned int next_eip, int is_hw)
217{
218 SegmentCache *dt;
219 uint8_t *ptr, *ssp;
220 int type, dpl, selector, ss_dpl, cpl;
221 int has_error_code, new_stack, shift;
222 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
223 uint32_t old_cs, old_ss, old_esp, old_eip;
224
225 dt = &env->idt;
226 if (intno * 8 + 7 > dt->limit)
227 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
228 ptr = dt->base + intno * 8;
229 e1 = ldl(ptr);
230 e2 = ldl(ptr + 4);
231 /* check gate type */
232 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
233 switch(type) {
234 case 5: /* task gate */
235 cpu_abort(env, "task gate not supported");
236 break;
237 case 6: /* 286 interrupt gate */
238 case 7: /* 286 trap gate */
239 case 14: /* 386 interrupt gate */
240 case 15: /* 386 trap gate */
241 break;
242 default:
243 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
244 break;
245 }
246 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
247 cpl = env->hflags & HF_CPL_MASK;
248 /* check privledge if software int */
249 if (is_int && dpl < cpl)
250 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
251 /* check valid bit */
252 if (!(e2 & DESC_P_MASK))
253 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
254 selector = e1 >> 16;
255 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
256 if ((selector & 0xfffc) == 0)
257 raise_exception_err(EXCP0D_GPF, 0);
258
259 if (load_segment(&e1, &e2, selector) != 0)
260 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
261 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
262 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
263 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
264 if (dpl > cpl)
265 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
266 if (!(e2 & DESC_P_MASK))
267 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
268 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
269 /* to inner priviledge */
270 get_ss_esp_from_tss(&ss, &esp, dpl);
271 if ((ss & 0xfffc) == 0)
272 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
273 if ((ss & 3) != dpl)
274 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
275 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
276 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
277 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
278 if (ss_dpl != dpl)
279 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
280 if (!(ss_e2 & DESC_S_MASK) ||
281 (ss_e2 & DESC_CS_MASK) ||
282 !(ss_e2 & DESC_W_MASK))
283 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
284 if (!(ss_e2 & DESC_P_MASK))
285 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
286 new_stack = 1;
287 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
288 /* to same priviledge */
289 new_stack = 0;
290 } else {
291 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
292 new_stack = 0; /* avoid warning */
293 }
294
295 shift = type >> 3;
296 has_error_code = 0;
297 if (!is_int && !is_hw) {
298 switch(intno) {
299 case 8:
300 case 10:
301 case 11:
302 case 12:
303 case 13:
304 case 14:
305 case 17:
306 has_error_code = 1;
307 break;
308 }
309 }
310 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
311 if (env->eflags & VM_MASK)
312 push_size += 8;
313 push_size <<= shift;
314
315 /* XXX: check that enough room is available */
316 if (new_stack) {
317 old_esp = ESP;
318 old_ss = env->segs[R_SS].selector;
319 ss = (ss & ~3) | dpl;
320 cpu_x86_load_seg_cache(env, R_SS, ss,
321 get_seg_base(ss_e1, ss_e2),
322 get_seg_limit(ss_e1, ss_e2),
323 ss_e2);
324 } else {
325 old_esp = 0;
326 old_ss = 0;
327 esp = ESP;
328 }
329 if (is_int)
330 old_eip = next_eip;
331 else
332 old_eip = env->eip;
333 old_cs = env->segs[R_CS].selector;
334 selector = (selector & ~3) | dpl;
335 cpu_x86_load_seg_cache(env, R_CS, selector,
336 get_seg_base(e1, e2),
337 get_seg_limit(e1, e2),
338 e2);
339 cpu_x86_set_cpl(env, dpl);
340 env->eip = offset;
341 ESP = esp - push_size;
342 ssp = env->segs[R_SS].base + esp;
343 if (shift == 1) {
344 int old_eflags;
345 if (env->eflags & VM_MASK) {
346 ssp -= 4;
347 stl(ssp, env->segs[R_GS].selector);
348 ssp -= 4;
349 stl(ssp, env->segs[R_FS].selector);
350 ssp -= 4;
351 stl(ssp, env->segs[R_DS].selector);
352 ssp -= 4;
353 stl(ssp, env->segs[R_ES].selector);
354 }
355 if (new_stack) {
356 ssp -= 4;
357 stl(ssp, old_ss);
358 ssp -= 4;
359 stl(ssp, old_esp);
360 }
361 ssp -= 4;
362 old_eflags = compute_eflags();
363 stl(ssp, old_eflags);
364 ssp -= 4;
365 stl(ssp, old_cs);
366 ssp -= 4;
367 stl(ssp, old_eip);
368 if (has_error_code) {
369 ssp -= 4;
370 stl(ssp, error_code);
371 }
372 } else {
373 if (new_stack) {
374 ssp -= 2;
375 stw(ssp, old_ss);
376 ssp -= 2;
377 stw(ssp, old_esp);
378 }
379 ssp -= 2;
380 stw(ssp, compute_eflags());
381 ssp -= 2;
382 stw(ssp, old_cs);
383 ssp -= 2;
384 stw(ssp, old_eip);
385 if (has_error_code) {
386 ssp -= 2;
387 stw(ssp, error_code);
388 }
389 }
390
391 /* interrupt gate clear IF mask */
392 if ((type & 1) == 0) {
393 env->eflags &= ~IF_MASK;
394 }
395 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
396}
397
398/* real mode interrupt */
399static void do_interrupt_real(int intno, int is_int, int error_code,
400 unsigned int next_eip)
401{
402 SegmentCache *dt;
403 uint8_t *ptr, *ssp;
404 int selector;
405 uint32_t offset, esp;
406 uint32_t old_cs, old_eip;
407
408 /* real mode (simpler !) */
409 dt = &env->idt;
410 if (intno * 4 + 3 > dt->limit)
411 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
412 ptr = dt->base + intno * 4;
413 offset = lduw(ptr);
414 selector = lduw(ptr + 2);
415 esp = ESP;
416 ssp = env->segs[R_SS].base;
417 if (is_int)
418 old_eip = next_eip;
419 else
420 old_eip = env->eip;
421 old_cs = env->segs[R_CS].selector;
422 esp -= 2;
423 stw(ssp + (esp & 0xffff), compute_eflags());
424 esp -= 2;
425 stw(ssp + (esp & 0xffff), old_cs);
426 esp -= 2;
427 stw(ssp + (esp & 0xffff), old_eip);
428
429 /* update processor state */
430 ESP = (ESP & ~0xffff) | (esp & 0xffff);
431 env->eip = offset;
432 env->segs[R_CS].selector = selector;
433 env->segs[R_CS].base = (uint8_t *)(selector << 4);
434 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
435}
436
437/* fake user mode interrupt */
438void do_interrupt_user(int intno, int is_int, int error_code,
439 unsigned int next_eip)
440{
441 SegmentCache *dt;
442 uint8_t *ptr;
443 int dpl, cpl;
444 uint32_t e2;
445
446 dt = &env->idt;
447 ptr = dt->base + (intno * 8);
448 e2 = ldl(ptr + 4);
449
450 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
451 cpl = env->hflags & HF_CPL_MASK;
452 /* check privledge if software int */
453 if (is_int && dpl < cpl)
454 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
455
456 /* Since we emulate only user space, we cannot do more than
457 exiting the emulation with the suitable exception and error
458 code */
459 if (is_int)
460 EIP = next_eip;
461}
462
463/*
464 * Begin excution of an interruption. is_int is TRUE if coming from
465 * the int instruction. next_eip is the EIP value AFTER the interrupt
466 * instruction. It is only relevant if is_int is TRUE.
467 */
468void do_interrupt(int intno, int is_int, int error_code,
469 unsigned int next_eip, int is_hw)
470{
471 if (env->cr[0] & CR0_PE_MASK) {
472 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
473 } else {
474 do_interrupt_real(intno, is_int, error_code, next_eip);
475 }
476}
477
478/*
479 * Signal an interruption. It is executed in the main CPU loop.
480 * is_int is TRUE if coming from the int instruction. next_eip is the
481 * EIP value AFTER the interrupt instruction. It is only relevant if
482 * is_int is TRUE.
483 */
484void raise_interrupt(int intno, int is_int, int error_code,
485 unsigned int next_eip)
486{
487 env->exception_index = intno;
488 env->error_code = error_code;
489 env->exception_is_int = is_int;
490 env->exception_next_eip = next_eip;
491 cpu_loop_exit();
492}
493
494/* shortcuts to generate exceptions */
495void raise_exception_err(int exception_index, int error_code)
496{
497 raise_interrupt(exception_index, 0, error_code, 0);
498}
499
500void raise_exception(int exception_index)
501{
502 raise_interrupt(exception_index, 0, 0, 0);
503}
504
505#ifdef BUGGY_GCC_DIV64
506/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
507 call it from another function */
508uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
509{
510 *q_ptr = num / den;
511 return num % den;
512}
513
514int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
515{
516 *q_ptr = num / den;
517 return num % den;
518}
519#endif
520
521void helper_divl_EAX_T0(uint32_t eip)
522{
523 unsigned int den, q, r;
524 uint64_t num;
525
526 num = EAX | ((uint64_t)EDX << 32);
527 den = T0;
528 if (den == 0) {
529 EIP = eip;
530 raise_exception(EXCP00_DIVZ);
531 }
532#ifdef BUGGY_GCC_DIV64
533 r = div64(&q, num, den);
534#else
535 q = (num / den);
536 r = (num % den);
537#endif
538 EAX = q;
539 EDX = r;
540}
541
542void helper_idivl_EAX_T0(uint32_t eip)
543{
544 int den, q, r;
545 int64_t num;
546
547 num = EAX | ((uint64_t)EDX << 32);
548 den = T0;
549 if (den == 0) {
550 EIP = eip;
551 raise_exception(EXCP00_DIVZ);
552 }
553#ifdef BUGGY_GCC_DIV64
554 r = idiv64(&q, num, den);
555#else
556 q = (num / den);
557 r = (num % den);
558#endif
559 EAX = q;
560 EDX = r;
561}
562
563void helper_cmpxchg8b(void)
564{
565 uint64_t d;
566 int eflags;
567
568 eflags = cc_table[CC_OP].compute_all();
569 d = ldq((uint8_t *)A0);
570 if (d == (((uint64_t)EDX << 32) | EAX)) {
571 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
572 eflags |= CC_Z;
573 } else {
574 EDX = d >> 32;
575 EAX = d;
576 eflags &= ~CC_Z;
577 }
578 CC_SRC = eflags;
579}
580
581/* We simulate a pre-MMX pentium as in valgrind */
582#define CPUID_FP87 (1 << 0)
583#define CPUID_VME (1 << 1)
584#define CPUID_DE (1 << 2)
585#define CPUID_PSE (1 << 3)
586#define CPUID_TSC (1 << 4)
587#define CPUID_MSR (1 << 5)
588#define CPUID_PAE (1 << 6)
589#define CPUID_MCE (1 << 7)
590#define CPUID_CX8 (1 << 8)
591#define CPUID_APIC (1 << 9)
592#define CPUID_SEP (1 << 11) /* sysenter/sysexit */
593#define CPUID_MTRR (1 << 12)
594#define CPUID_PGE (1 << 13)
595#define CPUID_MCA (1 << 14)
596#define CPUID_CMOV (1 << 15)
597/* ... */
598#define CPUID_MMX (1 << 23)
599#define CPUID_FXSR (1 << 24)
600#define CPUID_SSE (1 << 25)
601#define CPUID_SSE2 (1 << 26)
602
603void helper_cpuid(void)
604{
605 if (EAX == 0) {
606 EAX = 1; /* max EAX index supported */
607 EBX = 0x756e6547;
608 ECX = 0x6c65746e;
609 EDX = 0x49656e69;
610 } else if (EAX == 1) {
611 int family, model, stepping;
612 /* EAX = 1 info */
613#if 0
614 /* pentium 75-200 */
615 family = 5;
616 model = 2;
617 stepping = 11;
618#else
619 /* pentium pro */
620 family = 6;
621 model = 1;
622 stepping = 3;
623#endif
624 EAX = (family << 8) | (model << 4) | stepping;
625 EBX = 0;
626 ECX = 0;
627 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
628 CPUID_TSC | CPUID_MSR | CPUID_MCE |
629 CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
630 }
631}
632
633void helper_lldt_T0(void)
634{
635 int selector;
636 SegmentCache *dt;
637 uint32_t e1, e2;
638 int index;
639 uint8_t *ptr;
640
641 selector = T0 & 0xffff;
642 if ((selector & 0xfffc) == 0) {
643 /* XXX: NULL selector case: invalid LDT */
644 env->ldt.base = NULL;
645 env->ldt.limit = 0;
646 } else {
647 if (selector & 0x4)
648 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
649 dt = &env->gdt;
650 index = selector & ~7;
651 if ((index + 7) > dt->limit)
652 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
653 ptr = dt->base + index;
654 e1 = ldl(ptr);
655 e2 = ldl(ptr + 4);
656 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
657 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
658 if (!(e2 & DESC_P_MASK))
659 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
660 load_seg_cache_raw_dt(&env->ldt, e1, e2);
661 }
662 env->ldt.selector = selector;
663}
664
665void helper_ltr_T0(void)
666{
667 int selector;
668 SegmentCache *dt;
669 uint32_t e1, e2;
670 int index, type;
671 uint8_t *ptr;
672
673 selector = T0 & 0xffff;
674 if ((selector & 0xfffc) == 0) {
675 /* NULL selector case: invalid LDT */
676 env->tr.base = NULL;
677 env->tr.limit = 0;
678 env->tr.flags = 0;
679 } else {
680 if (selector & 0x4)
681 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
682 dt = &env->gdt;
683 index = selector & ~7;
684 if ((index + 7) > dt->limit)
685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686 ptr = dt->base + index;
687 e1 = ldl(ptr);
688 e2 = ldl(ptr + 4);
689 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
690 if ((e2 & DESC_S_MASK) ||
691 (type != 2 && type != 9))
692 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
693 if (!(e2 & DESC_P_MASK))
694 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
695 load_seg_cache_raw_dt(&env->tr, e1, e2);
696 e2 |= 0x00000200; /* set the busy bit */
697 stl(ptr + 4, e2);
698 }
699 env->tr.selector = selector;
700}
701
702/* only works if protected mode and not VM86. Calling load_seg with
703 seg_reg == R_CS is discouraged */
704void load_seg(int seg_reg, int selector, unsigned int cur_eip)
705{
706 uint32_t e1, e2;
707
708 if ((selector & 0xfffc) == 0) {
709 /* null selector case */
710 if (seg_reg == R_SS) {
711 EIP = cur_eip;
712 raise_exception_err(EXCP0D_GPF, 0);
713 } else {
714 cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
715 }
716 } else {
717 if (load_segment(&e1, &e2, selector) != 0) {
718 EIP = cur_eip;
719 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
720 }
721 if (!(e2 & DESC_S_MASK) ||
722 (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
723 EIP = cur_eip;
724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725 }
726
727 if (seg_reg == R_SS) {
728 if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
729 EIP = cur_eip;
730 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
731 }
732 } else {
733 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
734 EIP = cur_eip;
735 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736 }
737 }
738
739 if (!(e2 & DESC_P_MASK)) {
740 EIP = cur_eip;
741 if (seg_reg == R_SS)
742 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
743 else
744 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
745 }
746 cpu_x86_load_seg_cache(env, seg_reg, selector,
747 get_seg_base(e1, e2),
748 get_seg_limit(e1, e2),
749 e2);
750#if 0
751 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
752 selector, (unsigned long)sc->base, sc->limit, sc->flags);
753#endif
754 }
755}
756
757/* protected mode jump */
758void helper_ljmp_protected_T0_T1(void)
759{
760 int new_cs, new_eip;
761 uint32_t e1, e2, cpl, dpl, rpl, limit;
762
763 new_cs = T0;
764 new_eip = T1;
765 if ((new_cs & 0xfffc) == 0)
766 raise_exception_err(EXCP0D_GPF, 0);
767 if (load_segment(&e1, &e2, new_cs) != 0)
768 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
769 cpl = env->hflags & HF_CPL_MASK;
770 if (e2 & DESC_S_MASK) {
771 if (!(e2 & DESC_CS_MASK))
772 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
773 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
774 if (e2 & DESC_CS_MASK) {
775 /* conforming code segment */
776 if (dpl > cpl)
777 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
778 } else {
779 /* non conforming code segment */
780 rpl = new_cs & 3;
781 if (rpl > cpl)
782 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
783 if (dpl != cpl)
784 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
785 }
786 if (!(e2 & DESC_P_MASK))
787 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
788 limit = get_seg_limit(e1, e2);
789 if (new_eip > limit)
790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
791 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
792 get_seg_base(e1, e2), limit, e2);
793 EIP = new_eip;
794 } else {
795 cpu_abort(env, "jmp to call/task gate not supported 0x%04x:0x%08x",
796 new_cs, new_eip);
797 }
798}
799
800/* real mode call */
801void helper_lcall_real_T0_T1(int shift, int next_eip)
802{
803 int new_cs, new_eip;
804 uint32_t esp, esp_mask;
805 uint8_t *ssp;
806
807 new_cs = T0;
808 new_eip = T1;
809 esp = ESP;
810 esp_mask = 0xffffffff;
811 if (!(env->segs[R_SS].flags & DESC_B_MASK))
812 esp_mask = 0xffff;
813 ssp = env->segs[R_SS].base;
814 if (shift) {
815 esp -= 4;
816 stl(ssp + (esp & esp_mask), env->segs[R_CS].selector);
817 esp -= 4;
818 stl(ssp + (esp & esp_mask), next_eip);
819 } else {
820 esp -= 2;
821 stw(ssp + (esp & esp_mask), env->segs[R_CS].selector);
822 esp -= 2;
823 stw(ssp + (esp & esp_mask), next_eip);
824 }
825
826 if (!(env->segs[R_SS].flags & DESC_B_MASK))
827 ESP = (ESP & ~0xffff) | (esp & 0xffff);
828 else
829 ESP = esp;
830 env->eip = new_eip;
831 env->segs[R_CS].selector = new_cs;
832 env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
833}
834
835/* protected mode call */
836void helper_lcall_protected_T0_T1(int shift, int next_eip)
837{
838 int new_cs, new_eip;
839 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
840 uint32_t ss, ss_e1, ss_e2, push_size, sp, type, ss_dpl;
841 uint32_t old_ss, old_esp, val, i, limit;
842 uint8_t *ssp, *old_ssp;
843
844 new_cs = T0;
845 new_eip = T1;
846 if ((new_cs & 0xfffc) == 0)
847 raise_exception_err(EXCP0D_GPF, 0);
848 if (load_segment(&e1, &e2, new_cs) != 0)
849 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
850 cpl = env->hflags & HF_CPL_MASK;
851 if (e2 & DESC_S_MASK) {
852 if (!(e2 & DESC_CS_MASK))
853 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
854 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
855 if (e2 & DESC_CS_MASK) {
856 /* conforming code segment */
857 if (dpl > cpl)
858 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
859 } else {
860 /* non conforming code segment */
861 rpl = new_cs & 3;
862 if (rpl > cpl)
863 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
864 if (dpl != cpl)
865 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
866 }
867 if (!(e2 & DESC_P_MASK))
868 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
869
870 sp = ESP;
871 if (!(env->segs[R_SS].flags & DESC_B_MASK))
872 sp &= 0xffff;
873 ssp = env->segs[R_SS].base + sp;
874 if (shift) {
875 ssp -= 4;
876 stl(ssp, env->segs[R_CS].selector);
877 ssp -= 4;
878 stl(ssp, next_eip);
879 } else {
880 ssp -= 2;
881 stw(ssp, env->segs[R_CS].selector);
882 ssp -= 2;
883 stw(ssp, next_eip);
884 }
885 sp -= (4 << shift);
886
887 limit = get_seg_limit(e1, e2);
888 if (new_eip > limit)
889 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
890 /* from this point, not restartable */
891 if (!(env->segs[R_SS].flags & DESC_B_MASK))
892 ESP = (ESP & 0xffff0000) | (sp & 0xffff);
893 else
894 ESP = sp;
895 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
896 get_seg_base(e1, e2), limit, e2);
897 EIP = new_eip;
898 } else {
899 /* check gate type */
900 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
901 switch(type) {
902 case 1: /* available 286 TSS */
903 case 9: /* available 386 TSS */
904 case 5: /* task gate */
905 cpu_abort(env, "task gate not supported");
906 break;
907 case 4: /* 286 call gate */
908 case 12: /* 386 call gate */
909 break;
910 default:
911 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
912 break;
913 }
914 shift = type >> 3;
915
916 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
917 rpl = new_cs & 3;
918 if (dpl < cpl || dpl < rpl)
919 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
920 /* check valid bit */
921 if (!(e2 & DESC_P_MASK))
922 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
923 selector = e1 >> 16;
924 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
925 if ((selector & 0xfffc) == 0)
926 raise_exception_err(EXCP0D_GPF, 0);
927
928 if (load_segment(&e1, &e2, selector) != 0)
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
931 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
933 if (dpl > cpl)
934 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935 if (!(e2 & DESC_P_MASK))
936 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
937
938 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
939 /* to inner priviledge */
940 get_ss_esp_from_tss(&ss, &sp, dpl);
941 if ((ss & 0xfffc) == 0)
942 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
943 if ((ss & 3) != dpl)
944 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
945 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
946 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
947 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
948 if (ss_dpl != dpl)
949 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
950 if (!(ss_e2 & DESC_S_MASK) ||
951 (ss_e2 & DESC_CS_MASK) ||
952 !(ss_e2 & DESC_W_MASK))
953 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
954 if (!(ss_e2 & DESC_P_MASK))
955 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
956
957 param_count = e2 & 0x1f;
958 push_size = ((param_count * 2) + 8) << shift;
959
960 old_esp = ESP;
961 old_ss = env->segs[R_SS].selector;
962 if (!(env->segs[R_SS].flags & DESC_B_MASK))
963 old_esp &= 0xffff;
964 old_ssp = env->segs[R_SS].base + old_esp;
965
966 /* XXX: from this point not restartable */
967 ss = (ss & ~3) | dpl;
968 cpu_x86_load_seg_cache(env, R_SS, ss,
969 get_seg_base(ss_e1, ss_e2),
970 get_seg_limit(ss_e1, ss_e2),
971 ss_e2);
972
973 if (!(env->segs[R_SS].flags & DESC_B_MASK))
974 sp &= 0xffff;
975 ssp = env->segs[R_SS].base + sp;
976 if (shift) {
977 ssp -= 4;
978 stl(ssp, old_ss);
979 ssp -= 4;
980 stl(ssp, old_esp);
981 ssp -= 4 * param_count;
982 for(i = 0; i < param_count; i++) {
983 val = ldl(old_ssp + i * 4);
984 stl(ssp + i * 4, val);
985 }
986 } else {
987 ssp -= 2;
988 stw(ssp, old_ss);
989 ssp -= 2;
990 stw(ssp, old_esp);
991 ssp -= 2 * param_count;
992 for(i = 0; i < param_count; i++) {
993 val = lduw(old_ssp + i * 2);
994 stw(ssp + i * 2, val);
995 }
996 }
997 } else {
998 /* to same priviledge */
999 if (!(env->segs[R_SS].flags & DESC_B_MASK))
1000 sp &= 0xffff;
1001 ssp = env->segs[R_SS].base + sp;
1002 push_size = (4 << shift);
1003 }
1004
1005 if (shift) {
1006 ssp -= 4;
1007 stl(ssp, env->segs[R_CS].selector);
1008 ssp -= 4;
1009 stl(ssp, next_eip);
1010 } else {
1011 ssp -= 2;
1012 stw(ssp, env->segs[R_CS].selector);
1013 ssp -= 2;
1014 stw(ssp, next_eip);
1015 }
1016
1017 sp -= push_size;
1018 selector = (selector & ~3) | dpl;
1019 cpu_x86_load_seg_cache(env, R_CS, selector,
1020 get_seg_base(e1, e2),
1021 get_seg_limit(e1, e2),
1022 e2);
1023 cpu_x86_set_cpl(env, dpl);
1024
1025 /* from this point, not restartable if same priviledge */
1026 if (!(env->segs[R_SS].flags & DESC_B_MASK))
1027 ESP = (ESP & 0xffff0000) | (sp & 0xffff);
1028 else
1029 ESP = sp;
1030 EIP = offset;
1031 }
1032}
1033
1034/* real mode iret */
1035void helper_iret_real(int shift)
1036{
1037 uint32_t sp, new_cs, new_eip, new_eflags, new_esp;
1038 uint8_t *ssp;
1039 int eflags_mask;
1040
1041 sp = ESP & 0xffff;
1042 ssp = env->segs[R_SS].base + sp;
1043 if (shift == 1) {
1044 /* 32 bits */
1045 new_eflags = ldl(ssp + 8);
1046 new_cs = ldl(ssp + 4) & 0xffff;
1047 new_eip = ldl(ssp) & 0xffff;
1048 } else {
1049 /* 16 bits */
1050 new_eflags = lduw(ssp + 4);
1051 new_cs = lduw(ssp + 2);
1052 new_eip = lduw(ssp);
1053 }
1054 new_esp = sp + (6 << shift);
1055 ESP = (ESP & 0xffff0000) |
1056 (new_esp & 0xffff);
1057 load_seg_vm(R_CS, new_cs);
1058 env->eip = new_eip;
1059 eflags_mask = FL_UPDATE_CPL0_MASK;
1060 if (shift == 0)
1061 eflags_mask &= 0xffff;
1062 load_eflags(new_eflags, eflags_mask);
1063}
1064
1065/* protected mode iret */
1066static inline void helper_ret_protected(int shift, int is_iret, int addend)
1067{
1068 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss;
1069 uint32_t new_es, new_ds, new_fs, new_gs;
1070 uint32_t e1, e2, ss_e1, ss_e2;
1071 int cpl, dpl, rpl, eflags_mask;
1072 uint8_t *ssp;
1073
1074 sp = ESP;
1075 if (!(env->segs[R_SS].flags & DESC_B_MASK))
1076 sp &= 0xffff;
1077 ssp = env->segs[R_SS].base + sp;
1078 if (shift == 1) {
1079 /* 32 bits */
1080 if (is_iret)
1081 new_eflags = ldl(ssp + 8);
1082 new_cs = ldl(ssp + 4) & 0xffff;
1083 new_eip = ldl(ssp);
1084 if (is_iret && (new_eflags & VM_MASK))
1085 goto return_to_vm86;
1086 } else {
1087 /* 16 bits */
1088 if (is_iret)
1089 new_eflags = lduw(ssp + 4);
1090 new_cs = lduw(ssp + 2);
1091 new_eip = lduw(ssp);
1092 }
1093 if ((new_cs & 0xfffc) == 0)
1094 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1095 if (load_segment(&e1, &e2, new_cs) != 0)
1096 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1097 if (!(e2 & DESC_S_MASK) ||
1098 !(e2 & DESC_CS_MASK))
1099 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1100 cpl = env->hflags & HF_CPL_MASK;
1101 rpl = new_cs & 3;
1102 if (rpl < cpl)
1103 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1104 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1105 if (e2 & DESC_CS_MASK) {
1106 if (dpl > rpl)
1107 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1108 } else {
1109 if (dpl != rpl)
1110 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1111 }
1112 if (!(e2 & DESC_P_MASK))
1113 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1114
1115 if (rpl == cpl) {
1116 /* return to same priledge level */
1117 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1118 get_seg_base(e1, e2),
1119 get_seg_limit(e1, e2),
1120 e2);
1121 new_esp = sp + (4 << shift) + ((2 * is_iret) << shift) + addend;
1122 } else {
1123 /* return to different priviledge level */
1124 ssp += (4 << shift) + ((2 * is_iret) << shift) + addend;
1125 if (shift == 1) {
1126 /* 32 bits */
1127 new_esp = ldl(ssp);
1128 new_ss = ldl(ssp + 4) & 0xffff;
1129 } else {
1130 /* 16 bits */
1131 new_esp = lduw(ssp);
1132 new_ss = lduw(ssp + 2);
1133 }
1134
1135 if ((new_ss & 3) != rpl)
1136 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1137 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1138 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1139 if (!(ss_e2 & DESC_S_MASK) ||
1140 (ss_e2 & DESC_CS_MASK) ||
1141 !(ss_e2 & DESC_W_MASK))
1142 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1143 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1144 if (dpl != rpl)
1145 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1146 if (!(ss_e2 & DESC_P_MASK))
1147 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1148
1149 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1150 get_seg_base(e1, e2),
1151 get_seg_limit(e1, e2),
1152 e2);
1153 cpu_x86_load_seg_cache(env, R_SS, new_ss,
1154 get_seg_base(ss_e1, ss_e2),
1155 get_seg_limit(ss_e1, ss_e2),
1156 ss_e2);
1157 cpu_x86_set_cpl(env, rpl);
1158 }
1159 if (env->segs[R_SS].flags & DESC_B_MASK)
1160 ESP = new_esp;
1161 else
1162 ESP = (ESP & 0xffff0000) |
1163 (new_esp & 0xffff);
1164 env->eip = new_eip;
1165 if (is_iret) {
1166 /* NOTE: 'cpl' can be different from the current CPL */
1167 if (cpl == 0)
1168 eflags_mask = FL_UPDATE_CPL0_MASK;
1169 else
1170 eflags_mask = FL_UPDATE_MASK32;
1171 if (shift == 0)
1172 eflags_mask &= 0xffff;
1173 load_eflags(new_eflags, eflags_mask);
1174 }
1175 return;
1176
1177 return_to_vm86:
1178 new_esp = ldl(ssp + 12);
1179 new_ss = ldl(ssp + 16);
1180 new_es = ldl(ssp + 20);
1181 new_ds = ldl(ssp + 24);
1182 new_fs = ldl(ssp + 28);
1183 new_gs = ldl(ssp + 32);
1184
1185 /* modify processor state */
1186 load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
1187 load_seg_vm(R_CS, new_cs);
1188 cpu_x86_set_cpl(env, 3);
1189 load_seg_vm(R_SS, new_ss);
1190 load_seg_vm(R_ES, new_es);
1191 load_seg_vm(R_DS, new_ds);
1192 load_seg_vm(R_FS, new_fs);
1193 load_seg_vm(R_GS, new_gs);
1194
1195 env->eip = new_eip;
1196 ESP = new_esp;
1197}
1198
1199void helper_iret_protected(int shift)
1200{
1201 helper_ret_protected(shift, 1, 0);
1202}
1203
1204void helper_lret_protected(int shift, int addend)
1205{
1206 helper_ret_protected(shift, 0, addend);
1207}
1208
1209void helper_movl_crN_T0(int reg)
1210{
1211 env->cr[reg] = T0;
1212 switch(reg) {
1213 case 0:
1214 cpu_x86_update_cr0(env);
1215 break;
1216 case 3:
1217 cpu_x86_update_cr3(env);
1218 break;
1219 }
1220}
1221
1222/* XXX: do more */
1223void helper_movl_drN_T0(int reg)
1224{
1225 env->dr[reg] = T0;
1226}
1227
1228void helper_invlpg(unsigned int addr)
1229{
1230 cpu_x86_flush_tlb(env, addr);
1231}
1232
1233/* rdtsc */
1234#ifndef __i386__
1235uint64_t emu_time;
1236#endif
1237
1238void helper_rdtsc(void)
1239{
1240 uint64_t val;
1241#ifdef __i386__
1242 asm("rdtsc" : "=A" (val));
1243#else
1244 /* better than nothing: the time increases */
1245 val = emu_time++;
1246#endif
1247 EAX = val;
1248 EDX = val >> 32;
1249}
1250
1251void helper_wrmsr(void)
1252{
1253 switch(ECX) {
1254 case MSR_IA32_SYSENTER_CS:
1255 env->sysenter_cs = EAX & 0xffff;
1256 break;
1257 case MSR_IA32_SYSENTER_ESP:
1258 env->sysenter_esp = EAX;
1259 break;
1260 case MSR_IA32_SYSENTER_EIP:
1261 env->sysenter_eip = EAX;
1262 break;
1263 default:
1264 /* XXX: exception ? */
1265 break;
1266 }
1267}
1268
1269void helper_rdmsr(void)
1270{
1271 switch(ECX) {
1272 case MSR_IA32_SYSENTER_CS:
1273 EAX = env->sysenter_cs;
1274 EDX = 0;
1275 break;
1276 case MSR_IA32_SYSENTER_ESP:
1277 EAX = env->sysenter_esp;
1278 EDX = 0;
1279 break;
1280 case MSR_IA32_SYSENTER_EIP:
1281 EAX = env->sysenter_eip;
1282 EDX = 0;
1283 break;
1284 default:
1285 /* XXX: exception ? */
1286 break;
1287 }
1288}
1289
1290void helper_lsl(void)
1291{
1292 unsigned int selector, limit;
1293 uint32_t e1, e2;
1294
1295 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1296 selector = T0 & 0xffff;
1297 if (load_segment(&e1, &e2, selector) != 0)
1298 return;
1299 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1300 if (e2 & (1 << 23))
1301 limit = (limit << 12) | 0xfff;
1302 T1 = limit;
1303 CC_SRC |= CC_Z;
1304}
1305
1306void helper_lar(void)
1307{
1308 unsigned int selector;
1309 uint32_t e1, e2;
1310
1311 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1312 selector = T0 & 0xffff;
1313 if (load_segment(&e1, &e2, selector) != 0)
1314 return;
1315 T1 = e2 & 0x00f0ff00;
1316 CC_SRC |= CC_Z;
1317}
1318
1319/* FPU helpers */
1320
1321#ifndef USE_X86LDOUBLE
1322void helper_fldt_ST0_A0(void)
1323{
1324 int new_fpstt;
1325 new_fpstt = (env->fpstt - 1) & 7;
1326 env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
1327 env->fpstt = new_fpstt;
1328 env->fptags[new_fpstt] = 0; /* validate stack entry */
1329}
1330
1331void helper_fstt_ST0_A0(void)
1332{
1333 helper_fstt(ST0, (uint8_t *)A0);
1334}
1335#endif
1336
1337/* BCD ops */
1338
1339#define MUL10(iv) ( iv + iv + (iv << 3) )
1340
1341void helper_fbld_ST0_A0(void)
1342{
1343 CPU86_LDouble tmp;
1344 uint64_t val;
1345 unsigned int v;
1346 int i;
1347
1348 val = 0;
1349 for(i = 8; i >= 0; i--) {
1350 v = ldub((uint8_t *)A0 + i);
1351 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
1352 }
1353 tmp = val;
1354 if (ldub((uint8_t *)A0 + 9) & 0x80)
1355 tmp = -tmp;
1356 fpush();
1357 ST0 = tmp;
1358}
1359
1360void helper_fbst_ST0_A0(void)
1361{
1362 CPU86_LDouble tmp;
1363 int v;
1364 uint8_t *mem_ref, *mem_end;
1365 int64_t val;
1366
1367 tmp = rint(ST0);
1368 val = (int64_t)tmp;
1369 mem_ref = (uint8_t *)A0;
1370 mem_end = mem_ref + 9;
1371 if (val < 0) {
1372 stb(mem_end, 0x80);
1373 val = -val;
1374 } else {
1375 stb(mem_end, 0x00);
1376 }
1377 while (mem_ref < mem_end) {
1378 if (val == 0)
1379 break;
1380 v = val % 100;
1381 val = val / 100;
1382 v = ((v / 10) << 4) | (v % 10);
1383 stb(mem_ref++, v);
1384 }
1385 while (mem_ref < mem_end) {
1386 stb(mem_ref++, 0);
1387 }
1388}
1389
1390void helper_f2xm1(void)
1391{
1392 ST0 = pow(2.0,ST0) - 1.0;
1393}
1394
1395void helper_fyl2x(void)
1396{
1397 CPU86_LDouble fptemp;
1398
1399 fptemp = ST0;
1400 if (fptemp>0.0){
1401 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
1402 ST1 *= fptemp;
1403 fpop();
1404 } else {
1405 env->fpus &= (~0x4700);
1406 env->fpus |= 0x400;
1407 }
1408}
1409
1410void helper_fptan(void)
1411{
1412 CPU86_LDouble fptemp;
1413
1414 fptemp = ST0;
1415 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1416 env->fpus |= 0x400;
1417 } else {
1418 ST0 = tan(fptemp);
1419 fpush();
1420 ST0 = 1.0;
1421 env->fpus &= (~0x400); /* C2 <-- 0 */
1422 /* the above code is for |arg| < 2**52 only */
1423 }
1424}
1425
1426void helper_fpatan(void)
1427{
1428 CPU86_LDouble fptemp, fpsrcop;
1429
1430 fpsrcop = ST1;
1431 fptemp = ST0;
1432 ST1 = atan2(fpsrcop,fptemp);
1433 fpop();
1434}
1435
1436void helper_fxtract(void)
1437{
1438 CPU86_LDoubleU temp;
1439 unsigned int expdif;
1440
1441 temp.d = ST0;
1442 expdif = EXPD(temp) - EXPBIAS;
1443 /*DP exponent bias*/
1444 ST0 = expdif;
1445 fpush();
1446 BIASEXPONENT(temp);
1447 ST0 = temp.d;
1448}
1449
1450void helper_fprem1(void)
1451{
1452 CPU86_LDouble dblq, fpsrcop, fptemp;
1453 CPU86_LDoubleU fpsrcop1, fptemp1;
1454 int expdif;
1455 int q;
1456
1457 fpsrcop = ST0;
1458 fptemp = ST1;
1459 fpsrcop1.d = fpsrcop;
1460 fptemp1.d = fptemp;
1461 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1462 if (expdif < 53) {
1463 dblq = fpsrcop / fptemp;
1464 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1465 ST0 = fpsrcop - fptemp*dblq;
1466 q = (int)dblq; /* cutting off top bits is assumed here */
1467 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1468 /* (C0,C1,C3) <-- (q2,q1,q0) */
1469 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1470 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1471 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1472 } else {
1473 env->fpus |= 0x400; /* C2 <-- 1 */
1474 fptemp = pow(2.0, expdif-50);
1475 fpsrcop = (ST0 / ST1) / fptemp;
1476 /* fpsrcop = integer obtained by rounding to the nearest */
1477 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
1478 floor(fpsrcop): ceil(fpsrcop);
1479 ST0 -= (ST1 * fpsrcop * fptemp);
1480 }
1481}
1482
1483void helper_fprem(void)
1484{
1485 CPU86_LDouble dblq, fpsrcop, fptemp;
1486 CPU86_LDoubleU fpsrcop1, fptemp1;
1487 int expdif;
1488 int q;
1489
1490 fpsrcop = ST0;
1491 fptemp = ST1;
1492 fpsrcop1.d = fpsrcop;
1493 fptemp1.d = fptemp;
1494 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
1495 if ( expdif < 53 ) {
1496 dblq = fpsrcop / fptemp;
1497 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
1498 ST0 = fpsrcop - fptemp*dblq;
1499 q = (int)dblq; /* cutting off top bits is assumed here */
1500 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1501 /* (C0,C1,C3) <-- (q2,q1,q0) */
1502 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
1503 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
1504 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
1505 } else {
1506 env->fpus |= 0x400; /* C2 <-- 1 */
1507 fptemp = pow(2.0, expdif-50);
1508 fpsrcop = (ST0 / ST1) / fptemp;
1509 /* fpsrcop = integer obtained by chopping */
1510 fpsrcop = (fpsrcop < 0.0)?
1511 -(floor(fabs(fpsrcop))): floor(fpsrcop);
1512 ST0 -= (ST1 * fpsrcop * fptemp);
1513 }
1514}
1515
1516void helper_fyl2xp1(void)
1517{
1518 CPU86_LDouble fptemp;
1519
1520 fptemp = ST0;
1521 if ((fptemp+1.0)>0.0) {
1522 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
1523 ST1 *= fptemp;
1524 fpop();
1525 } else {
1526 env->fpus &= (~0x4700);
1527 env->fpus |= 0x400;
1528 }
1529}
1530
1531void helper_fsqrt(void)
1532{
1533 CPU86_LDouble fptemp;
1534
1535 fptemp = ST0;
1536 if (fptemp<0.0) {
1537 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1538 env->fpus |= 0x400;
1539 }
1540 ST0 = sqrt(fptemp);
1541}
1542
1543void helper_fsincos(void)
1544{
1545 CPU86_LDouble fptemp;
1546
1547 fptemp = ST0;
1548 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1549 env->fpus |= 0x400;
1550 } else {
1551 ST0 = sin(fptemp);
1552 fpush();
1553 ST0 = cos(fptemp);
1554 env->fpus &= (~0x400); /* C2 <-- 0 */
1555 /* the above code is for |arg| < 2**63 only */
1556 }
1557}
1558
1559void helper_frndint(void)
1560{
1561 CPU86_LDouble a;
1562
1563 a = ST0;
1564#ifdef __arm__
1565 switch(env->fpuc & RC_MASK) {
1566 default:
1567 case RC_NEAR:
1568 asm("rndd %0, %1" : "=f" (a) : "f"(a));
1569 break;
1570 case RC_DOWN:
1571 asm("rnddm %0, %1" : "=f" (a) : "f"(a));
1572 break;
1573 case RC_UP:
1574 asm("rnddp %0, %1" : "=f" (a) : "f"(a));
1575 break;
1576 case RC_CHOP:
1577 asm("rnddz %0, %1" : "=f" (a) : "f"(a));
1578 break;
1579 }
1580#else
1581 a = rint(a);
1582#endif
1583 ST0 = a;
1584}
1585
1586void helper_fscale(void)
1587{
1588 CPU86_LDouble fpsrcop, fptemp;
1589
1590 fpsrcop = 2.0;
1591 fptemp = pow(fpsrcop,ST1);
1592 ST0 *= fptemp;
1593}
1594
1595void helper_fsin(void)
1596{
1597 CPU86_LDouble fptemp;
1598
1599 fptemp = ST0;
1600 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1601 env->fpus |= 0x400;
1602 } else {
1603 ST0 = sin(fptemp);
1604 env->fpus &= (~0x400); /* C2 <-- 0 */
1605 /* the above code is for |arg| < 2**53 only */
1606 }
1607}
1608
1609void helper_fcos(void)
1610{
1611 CPU86_LDouble fptemp;
1612
1613 fptemp = ST0;
1614 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
1615 env->fpus |= 0x400;
1616 } else {
1617 ST0 = cos(fptemp);
1618 env->fpus &= (~0x400); /* C2 <-- 0 */
1619 /* the above code is for |arg5 < 2**63 only */
1620 }
1621}
1622
1623void helper_fxam_ST0(void)
1624{
1625 CPU86_LDoubleU temp;
1626 int expdif;
1627
1628 temp.d = ST0;
1629
1630 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1631 if (SIGND(temp))
1632 env->fpus |= 0x200; /* C1 <-- 1 */
1633
1634 expdif = EXPD(temp);
1635 if (expdif == MAXEXPD) {
1636 if (MANTD(temp) == 0)
1637 env->fpus |= 0x500 /*Infinity*/;
1638 else
1639 env->fpus |= 0x100 /*NaN*/;
1640 } else if (expdif == 0) {
1641 if (MANTD(temp) == 0)
1642 env->fpus |= 0x4000 /*Zero*/;
1643 else
1644 env->fpus |= 0x4400 /*Denormal*/;
1645 } else {
1646 env->fpus |= 0x400;
1647 }
1648}
1649
1650void helper_fstenv(uint8_t *ptr, int data32)
1651{
1652 int fpus, fptag, exp, i;
1653 uint64_t mant;
1654 CPU86_LDoubleU tmp;
1655
1656 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
1657 fptag = 0;
1658 for (i=7; i>=0; i--) {
1659 fptag <<= 2;
1660 if (env->fptags[i]) {
1661 fptag |= 3;
1662 } else {
1663 tmp.d = env->fpregs[i];
1664 exp = EXPD(tmp);
1665 mant = MANTD(tmp);
1666 if (exp == 0 && mant == 0) {
1667 /* zero */
1668 fptag |= 1;
1669 } else if (exp == 0 || exp == MAXEXPD
1670#ifdef USE_X86LDOUBLE
1671 || (mant & (1LL << 63)) == 0
1672#endif
1673 ) {
1674 /* NaNs, infinity, denormal */
1675 fptag |= 2;
1676 }
1677 }
1678 }
1679 if (data32) {
1680 /* 32 bit */
1681 stl(ptr, env->fpuc);
1682 stl(ptr + 4, fpus);
1683 stl(ptr + 8, fptag);
1684 stl(ptr + 12, 0);
1685 stl(ptr + 16, 0);
1686 stl(ptr + 20, 0);
1687 stl(ptr + 24, 0);
1688 } else {
1689 /* 16 bit */
1690 stw(ptr, env->fpuc);
1691 stw(ptr + 2, fpus);
1692 stw(ptr + 4, fptag);
1693 stw(ptr + 6, 0);
1694 stw(ptr + 8, 0);
1695 stw(ptr + 10, 0);
1696 stw(ptr + 12, 0);
1697 }
1698}
1699
1700void helper_fldenv(uint8_t *ptr, int data32)
1701{
1702 int i, fpus, fptag;
1703
1704 if (data32) {
1705 env->fpuc = lduw(ptr);
1706 fpus = lduw(ptr + 4);
1707 fptag = lduw(ptr + 8);
1708 }
1709 else {
1710 env->fpuc = lduw(ptr);
1711 fpus = lduw(ptr + 2);
1712 fptag = lduw(ptr + 4);
1713 }
1714 env->fpstt = (fpus >> 11) & 7;
1715 env->fpus = fpus & ~0x3800;
1716 for(i = 0;i < 7; i++) {
1717 env->fptags[i] = ((fptag & 3) == 3);
1718 fptag >>= 2;
1719 }
1720}
1721
1722void helper_fsave(uint8_t *ptr, int data32)
1723{
1724 CPU86_LDouble tmp;
1725 int i;
1726
1727 helper_fstenv(ptr, data32);
1728
1729 ptr += (14 << data32);
1730 for(i = 0;i < 8; i++) {
1731 tmp = ST(i);
1732#ifdef USE_X86LDOUBLE
1733 *(long double *)ptr = tmp;
1734#else
1735 helper_fstt(tmp, ptr);
1736#endif
1737 ptr += 10;
1738 }
1739
1740 /* fninit */
1741 env->fpus = 0;
1742 env->fpstt = 0;
1743 env->fpuc = 0x37f;
1744 env->fptags[0] = 1;
1745 env->fptags[1] = 1;
1746 env->fptags[2] = 1;
1747 env->fptags[3] = 1;
1748 env->fptags[4] = 1;
1749 env->fptags[5] = 1;
1750 env->fptags[6] = 1;
1751 env->fptags[7] = 1;
1752}
1753
1754void helper_frstor(uint8_t *ptr, int data32)
1755{
1756 CPU86_LDouble tmp;
1757 int i;
1758
1759 helper_fldenv(ptr, data32);
1760 ptr += (14 << data32);
1761
1762 for(i = 0;i < 8; i++) {
1763#ifdef USE_X86LDOUBLE
1764 tmp = *(long double *)ptr;
1765#else
1766 tmp = helper_fldt(ptr);
1767#endif
1768 ST(i) = tmp;
1769 ptr += 10;
1770 }
1771}
1772
1773#define SHIFT 0
1774#include "softmmu_template.h"
1775
1776#define SHIFT 1
1777#include "softmmu_template.h"
1778
1779#define SHIFT 2
1780#include "softmmu_template.h"
1781
1782#define SHIFT 3
1783#include "softmmu_template.h"
1784
1785/* try to fill the TLB and return an exception if error */
1786void tlb_fill(unsigned long addr, int is_write, void *retaddr)
1787{
1788 TranslationBlock *tb;
1789 int ret;
1790 unsigned long pc;
1791 ret = cpu_x86_handle_mmu_fault(env, addr, is_write);
1792 if (ret) {
1793 /* now we have a real cpu fault */
1794 pc = (unsigned long)retaddr;
1795 tb = tb_find_pc(pc);
1796 if (tb) {
1797 /* the PC is inside the translated code. It means that we have
1798 a virtual CPU fault */
1799 cpu_restore_state(tb, env, pc);
1800 }
1801 raise_exception_err(EXCP0E_PAGE, env->error_code);
1802 }
1803}