]> git.proxmox.com Git - qemu.git/blame - target-i386/helper.c
initial x86-64 host support (Gwenole Beauchesne)
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "exec.h"
21
f3f2d9be
FB
22//#define DEBUG_PCALL
23
8145122b
FB
24#if 0
25#define raise_exception_err(a, b)\
26do {\
27 printf("raise_exception line=%d\n", __LINE__);\
28 (raise_exception_err)(a, b);\
29} while (0)
30#endif
31
2c0262af
FB
32const uint8_t parity_table[256] = {
33 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
34 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
35 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65};
66
67/* modulo 17 table */
68const uint8_t rclw_table[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 9,10,11,12,13,14,15,
71 16, 0, 1, 2, 3, 4, 5, 6,
72 7, 8, 9,10,11,12,13,14,
73};
74
75/* modulo 9 table */
76const uint8_t rclb_table[32] = {
77 0, 1, 2, 3, 4, 5, 6, 7,
78 8, 0, 1, 2, 3, 4, 5, 6,
79 7, 8, 0, 1, 2, 3, 4, 5,
80 6, 7, 8, 0, 1, 2, 3, 4,
81};
82
83const CPU86_LDouble f15rk[7] =
84{
85 0.00000000000000000000L,
86 1.00000000000000000000L,
87 3.14159265358979323851L, /*pi*/
88 0.30102999566398119523L, /*lg2*/
89 0.69314718055994530943L, /*ln2*/
90 1.44269504088896340739L, /*l2e*/
91 3.32192809488736234781L, /*l2t*/
92};
93
94/* thread support */
95
96spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
97
98void cpu_lock(void)
99{
100 spin_lock(&global_cpu_lock);
101}
102
103void cpu_unlock(void)
104{
105 spin_unlock(&global_cpu_lock);
106}
107
108void cpu_loop_exit(void)
109{
110 /* NOTE: the register at this point must be saved by hand because
111 longjmp restore them */
112#ifdef reg_EAX
113 env->regs[R_EAX] = EAX;
114#endif
115#ifdef reg_ECX
116 env->regs[R_ECX] = ECX;
117#endif
118#ifdef reg_EDX
119 env->regs[R_EDX] = EDX;
120#endif
121#ifdef reg_EBX
122 env->regs[R_EBX] = EBX;
123#endif
124#ifdef reg_ESP
125 env->regs[R_ESP] = ESP;
126#endif
127#ifdef reg_EBP
128 env->regs[R_EBP] = EBP;
129#endif
130#ifdef reg_ESI
131 env->regs[R_ESI] = ESI;
132#endif
133#ifdef reg_EDI
134 env->regs[R_EDI] = EDI;
135#endif
136 longjmp(env->jmp_env, 1);
137}
138
7e84c249
FB
139/* return non zero if error */
140static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
141 int selector)
142{
143 SegmentCache *dt;
144 int index;
145 uint8_t *ptr;
146
147 if (selector & 0x4)
148 dt = &env->ldt;
149 else
150 dt = &env->gdt;
151 index = selector & ~7;
152 if ((index + 7) > dt->limit)
153 return -1;
154 ptr = dt->base + index;
155 *e1_ptr = ldl_kernel(ptr);
156 *e2_ptr = ldl_kernel(ptr + 4);
157 return 0;
158}
159
160static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
161{
162 unsigned int limit;
163 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
164 if (e2 & DESC_G_MASK)
165 limit = (limit << 12) | 0xfff;
166 return limit;
167}
168
169static inline uint8_t *get_seg_base(uint32_t e1, uint32_t e2)
170{
171 return (uint8_t *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
172}
173
174static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
175{
176 sc->base = get_seg_base(e1, e2);
177 sc->limit = get_seg_limit(e1, e2);
178 sc->flags = e2;
179}
180
181/* init the segment cache in vm86 mode. */
182static inline void load_seg_vm(int seg, int selector)
183{
184 selector &= 0xffff;
185 cpu_x86_load_seg_cache(env, seg, selector,
186 (uint8_t *)(selector << 4), 0xffff, 0);
187}
188
2c0262af
FB
189static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
190 uint32_t *esp_ptr, int dpl)
191{
192 int type, index, shift;
193
194#if 0
195 {
196 int i;
197 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
198 for(i=0;i<env->tr.limit;i++) {
199 printf("%02x ", env->tr.base[i]);
200 if ((i & 7) == 7) printf("\n");
201 }
202 printf("\n");
203 }
204#endif
205
206 if (!(env->tr.flags & DESC_P_MASK))
207 cpu_abort(env, "invalid tss");
208 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
209 if ((type & 7) != 1)
210 cpu_abort(env, "invalid tss type");
211 shift = type >> 3;
212 index = (dpl * 4 + 2) << shift;
213 if (index + (4 << shift) - 1 > env->tr.limit)
214 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
215 if (shift == 0) {
61382a50
FB
216 *esp_ptr = lduw_kernel(env->tr.base + index);
217 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
2c0262af 218 } else {
61382a50
FB
219 *esp_ptr = ldl_kernel(env->tr.base + index);
220 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
2c0262af
FB
221 }
222}
223
7e84c249
FB
224/* XXX: merge with load_seg() */
225static void tss_load_seg(int seg_reg, int selector)
226{
227 uint32_t e1, e2;
228 int rpl, dpl, cpl;
229
230 if ((selector & 0xfffc) != 0) {
231 if (load_segment(&e1, &e2, selector) != 0)
232 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 if (!(e2 & DESC_S_MASK))
234 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235 rpl = selector & 3;
236 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
237 cpl = env->hflags & HF_CPL_MASK;
238 if (seg_reg == R_CS) {
239 if (!(e2 & DESC_CS_MASK))
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if (dpl != rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 if ((e2 & DESC_C_MASK) && dpl > rpl)
244 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245
246 } else if (seg_reg == R_SS) {
247 /* SS must be writable data */
248 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
249 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
250 if (dpl != cpl || dpl != rpl)
251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252 } else {
253 /* not readable code */
254 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
255 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
256 /* if data or non conforming code, checks the rights */
257 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
258 if (dpl < cpl || dpl < rpl)
259 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260 }
261 }
262 if (!(e2 & DESC_P_MASK))
263 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
264 cpu_x86_load_seg_cache(env, seg_reg, selector,
265 get_seg_base(e1, e2),
266 get_seg_limit(e1, e2),
267 e2);
268 } else {
269 if (seg_reg == R_SS || seg_reg == R_CS)
270 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
271 }
272}
273
274#define SWITCH_TSS_JMP 0
275#define SWITCH_TSS_IRET 1
276#define SWITCH_TSS_CALL 2
277
278/* XXX: restore CPU state in registers (PowerPC case) */
279static void switch_tss(int tss_selector,
280 uint32_t e1, uint32_t e2, int source)
2c0262af 281{
7e84c249
FB
282 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
283 uint8_t *tss_base;
284 uint32_t new_regs[8], new_segs[6];
285 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
286 uint32_t old_eflags, eflags_mask;
2c0262af
FB
287 SegmentCache *dt;
288 int index;
289 uint8_t *ptr;
290
7e84c249 291 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
dc6f57fd
FB
292#ifdef DEBUG_PCALL
293 if (loglevel)
294 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
295#endif
7e84c249
FB
296
297 /* if task gate, we read the TSS segment and we load it */
298 if (type == 5) {
299 if (!(e2 & DESC_P_MASK))
300 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
301 tss_selector = e1 >> 16;
302 if (tss_selector & 4)
303 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
304 if (load_segment(&e1, &e2, tss_selector) != 0)
305 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306 if (e2 & DESC_S_MASK)
307 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
308 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
309 if ((type & 7) != 1)
310 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
311 }
312
313 if (!(e2 & DESC_P_MASK))
314 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
315
316 if (type & 8)
317 tss_limit_max = 103;
2c0262af 318 else
7e84c249
FB
319 tss_limit_max = 43;
320 tss_limit = get_seg_limit(e1, e2);
321 tss_base = get_seg_base(e1, e2);
322 if ((tss_selector & 4) != 0 ||
323 tss_limit < tss_limit_max)
324 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
325 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
326 if (old_type & 8)
327 old_tss_limit_max = 103;
328 else
329 old_tss_limit_max = 43;
330
331 /* read all the registers from the new TSS */
332 if (type & 8) {
333 /* 32 bit */
334 new_cr3 = ldl_kernel(tss_base + 0x1c);
335 new_eip = ldl_kernel(tss_base + 0x20);
336 new_eflags = ldl_kernel(tss_base + 0x24);
337 for(i = 0; i < 8; i++)
338 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
339 for(i = 0; i < 6; i++)
340 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
341 new_ldt = lduw_kernel(tss_base + 0x60);
342 new_trap = ldl_kernel(tss_base + 0x64);
343 } else {
344 /* 16 bit */
345 new_cr3 = 0;
346 new_eip = lduw_kernel(tss_base + 0x0e);
347 new_eflags = lduw_kernel(tss_base + 0x10);
348 for(i = 0; i < 8; i++)
349 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
350 for(i = 0; i < 4; i++)
351 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
352 new_ldt = lduw_kernel(tss_base + 0x2a);
353 new_segs[R_FS] = 0;
354 new_segs[R_GS] = 0;
355 new_trap = 0;
356 }
357
358 /* NOTE: we must avoid memory exceptions during the task switch,
359 so we make dummy accesses before */
360 /* XXX: it can still fail in some cases, so a bigger hack is
361 necessary to valid the TLB after having done the accesses */
362
363 v1 = ldub_kernel(env->tr.base);
364 v2 = ldub(env->tr.base + old_tss_limit_max);
365 stb_kernel(env->tr.base, v1);
366 stb_kernel(env->tr.base + old_tss_limit_max, v2);
367
368 /* clear busy bit (it is restartable) */
369 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
370 uint8_t *ptr;
371 uint32_t e2;
372 ptr = env->gdt.base + (env->tr.selector << 3);
373 e2 = ldl_kernel(ptr + 4);
374 e2 &= ~DESC_TSS_BUSY_MASK;
375 stl_kernel(ptr + 4, e2);
376 }
377 old_eflags = compute_eflags();
378 if (source == SWITCH_TSS_IRET)
379 old_eflags &= ~NT_MASK;
380
381 /* save the current state in the old TSS */
382 if (type & 8) {
383 /* 32 bit */
384 stl_kernel(env->tr.base + 0x20, env->eip);
385 stl_kernel(env->tr.base + 0x24, old_eflags);
386 for(i = 0; i < 8; i++)
387 stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
388 for(i = 0; i < 6; i++)
389 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
390 } else {
391 /* 16 bit */
392 stw_kernel(env->tr.base + 0x0e, new_eip);
393 stw_kernel(env->tr.base + 0x10, old_eflags);
394 for(i = 0; i < 8; i++)
395 stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
396 for(i = 0; i < 4; i++)
397 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
398 }
399
400 /* now if an exception occurs, it will occurs in the next task
401 context */
402
403 if (source == SWITCH_TSS_CALL) {
404 stw_kernel(tss_base, env->tr.selector);
405 new_eflags |= NT_MASK;
406 }
407
408 /* set busy bit */
409 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
410 uint8_t *ptr;
411 uint32_t e2;
412 ptr = env->gdt.base + (tss_selector << 3);
413 e2 = ldl_kernel(ptr + 4);
414 e2 |= DESC_TSS_BUSY_MASK;
415 stl_kernel(ptr + 4, e2);
416 }
417
418 /* set the new CPU state */
419 /* from this point, any exception which occurs can give problems */
420 env->cr[0] |= CR0_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
1ac157da 427 cpu_x86_update_cr3(env, new_cr3);
7e84c249
FB
428 }
429
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
4136f33c 433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
8145122b 434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
7e84c249
FB
435 if (!(type & 8))
436 eflags_mask &= 0xffff;
437 load_eflags(new_eflags, eflags_mask);
438 for(i = 0; i < 8; i++)
439 env->regs[i] = new_regs[i];
440 if (new_eflags & VM_MASK) {
441 for(i = 0; i < 6; i++)
442 load_seg_vm(i, new_segs[i]);
443 /* in vm86, CPL is always 3 */
444 cpu_x86_set_cpl(env, 3);
445 } else {
446 /* CPL is set the RPL of CS */
447 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
448 /* first just selectors as the rest may trigger exceptions */
449 for(i = 0; i < 6; i++)
450 cpu_x86_load_seg_cache(env, i, new_segs[i], NULL, 0, 0);
451 }
452
453 env->ldt.selector = new_ldt & ~4;
454 env->ldt.base = NULL;
455 env->ldt.limit = 0;
456 env->ldt.flags = 0;
457
458 /* load the LDT */
459 if (new_ldt & 4)
460 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
461
8145122b
FB
462 if ((new_ldt & 0xfffc) != 0) {
463 dt = &env->gdt;
464 index = new_ldt & ~7;
465 if ((index + 7) > dt->limit)
466 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
467 ptr = dt->base + index;
468 e1 = ldl_kernel(ptr);
469 e2 = ldl_kernel(ptr + 4);
470 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
471 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
472 if (!(e2 & DESC_P_MASK))
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 load_seg_cache_raw_dt(&env->ldt, e1, e2);
475 }
7e84c249
FB
476
477 /* load the segments */
478 if (!(new_eflags & VM_MASK)) {
479 tss_load_seg(R_CS, new_segs[R_CS]);
480 tss_load_seg(R_SS, new_segs[R_SS]);
481 tss_load_seg(R_ES, new_segs[R_ES]);
482 tss_load_seg(R_DS, new_segs[R_DS]);
483 tss_load_seg(R_FS, new_segs[R_FS]);
484 tss_load_seg(R_GS, new_segs[R_GS]);
485 }
486
487 /* check that EIP is in the CS segment limits */
488 if (new_eip > env->segs[R_CS].limit) {
489 raise_exception_err(EXCP0D_GPF, 0);
490 }
2c0262af 491}
7e84c249
FB
492
493/* check if Port I/O is allowed in TSS */
494static inline void check_io(int addr, int size)
2c0262af 495{
7e84c249
FB
496 int io_offset, val, mask;
497
498 /* TSS must be a valid 32 bit one */
499 if (!(env->tr.flags & DESC_P_MASK) ||
500 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
501 env->tr.limit < 103)
502 goto fail;
503 io_offset = lduw_kernel(env->tr.base + 0x66);
504 io_offset += (addr >> 3);
505 /* Note: the check needs two bytes */
506 if ((io_offset + 1) > env->tr.limit)
507 goto fail;
508 val = lduw_kernel(env->tr.base + io_offset);
509 val >>= (addr & 7);
510 mask = (1 << size) - 1;
511 /* all bits must be zero to allow the I/O */
512 if ((val & mask) != 0) {
513 fail:
514 raise_exception_err(EXCP0D_GPF, 0);
515 }
2c0262af
FB
516}
517
7e84c249 518void check_iob_T0(void)
2c0262af 519{
7e84c249 520 check_io(T0, 1);
2c0262af
FB
521}
522
7e84c249 523void check_iow_T0(void)
2c0262af 524{
7e84c249 525 check_io(T0, 2);
2c0262af
FB
526}
527
7e84c249 528void check_iol_T0(void)
2c0262af 529{
7e84c249
FB
530 check_io(T0, 4);
531}
532
533void check_iob_DX(void)
534{
535 check_io(EDX & 0xffff, 1);
536}
537
538void check_iow_DX(void)
539{
540 check_io(EDX & 0xffff, 2);
541}
542
543void check_iol_DX(void)
544{
545 check_io(EDX & 0xffff, 4);
2c0262af
FB
546}
547
891b38e4
FB
548static inline unsigned int get_sp_mask(unsigned int e2)
549{
550 if (e2 & DESC_B_MASK)
551 return 0xffffffff;
552 else
553 return 0xffff;
554}
555
556/* XXX: add a is_user flag to have proper security support */
557#define PUSHW(ssp, sp, sp_mask, val)\
558{\
559 sp -= 2;\
560 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
561}
562
563#define PUSHL(ssp, sp, sp_mask, val)\
564{\
565 sp -= 4;\
566 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
567}
568
569#define POPW(ssp, sp, sp_mask, val)\
570{\
571 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
572 sp += 2;\
573}
574
575#define POPL(ssp, sp, sp_mask, val)\
576{\
577 val = ldl_kernel((ssp) + (sp & (sp_mask)));\
578 sp += 4;\
579}
580
2c0262af
FB
581/* protected mode interrupt */
582static void do_interrupt_protected(int intno, int is_int, int error_code,
583 unsigned int next_eip, int is_hw)
584{
585 SegmentCache *dt;
586 uint8_t *ptr, *ssp;
891b38e4 587 int type, dpl, selector, ss_dpl, cpl, sp_mask;
2c0262af 588 int has_error_code, new_stack, shift;
891b38e4
FB
589 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
590 uint32_t old_eip;
2c0262af 591
7e84c249
FB
592 has_error_code = 0;
593 if (!is_int && !is_hw) {
594 switch(intno) {
595 case 8:
596 case 10:
597 case 11:
598 case 12:
599 case 13:
600 case 14:
601 case 17:
602 has_error_code = 1;
603 break;
604 }
605 }
606
2c0262af
FB
607 dt = &env->idt;
608 if (intno * 8 + 7 > dt->limit)
609 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
610 ptr = dt->base + intno * 8;
61382a50
FB
611 e1 = ldl_kernel(ptr);
612 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
613 /* check gate type */
614 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
615 switch(type) {
616 case 5: /* task gate */
7e84c249
FB
617 /* must do that check here to return the correct error code */
618 if (!(e2 & DESC_P_MASK))
619 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
620 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL);
621 if (has_error_code) {
622 int mask;
623 /* push the error code */
624 shift = (env->segs[R_CS].flags >> DESC_B_SHIFT) & 1;
625 if (env->segs[R_SS].flags & DESC_B_MASK)
626 mask = 0xffffffff;
627 else
628 mask = 0xffff;
629 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
630 ssp = env->segs[R_SS].base + esp;
631 if (shift)
632 stl_kernel(ssp, error_code);
633 else
634 stw_kernel(ssp, error_code);
635 env->regs[R_ESP] = (esp & mask) | (env->regs[R_ESP] & ~mask);
636 }
637 return;
2c0262af
FB
638 case 6: /* 286 interrupt gate */
639 case 7: /* 286 trap gate */
640 case 14: /* 386 interrupt gate */
641 case 15: /* 386 trap gate */
642 break;
643 default:
644 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
645 break;
646 }
647 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
648 cpl = env->hflags & HF_CPL_MASK;
649 /* check privledge if software int */
650 if (is_int && dpl < cpl)
651 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
652 /* check valid bit */
653 if (!(e2 & DESC_P_MASK))
654 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
655 selector = e1 >> 16;
656 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
657 if ((selector & 0xfffc) == 0)
658 raise_exception_err(EXCP0D_GPF, 0);
659
660 if (load_segment(&e1, &e2, selector) != 0)
661 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
662 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
663 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
664 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
665 if (dpl > cpl)
666 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
667 if (!(e2 & DESC_P_MASK))
668 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
669 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
670 /* to inner priviledge */
671 get_ss_esp_from_tss(&ss, &esp, dpl);
672 if ((ss & 0xfffc) == 0)
673 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
674 if ((ss & 3) != dpl)
675 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
676 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
677 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
678 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
679 if (ss_dpl != dpl)
680 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
681 if (!(ss_e2 & DESC_S_MASK) ||
682 (ss_e2 & DESC_CS_MASK) ||
683 !(ss_e2 & DESC_W_MASK))
684 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
685 if (!(ss_e2 & DESC_P_MASK))
686 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
687 new_stack = 1;
891b38e4
FB
688 sp_mask = get_sp_mask(ss_e2);
689 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af
FB
690 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
691 /* to same priviledge */
8e682019
FB
692 if (env->eflags & VM_MASK)
693 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 694 new_stack = 0;
891b38e4
FB
695 sp_mask = get_sp_mask(env->segs[R_SS].flags);
696 ssp = env->segs[R_SS].base;
697 esp = ESP;
4796f5e9 698 dpl = cpl;
2c0262af
FB
699 } else {
700 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
701 new_stack = 0; /* avoid warning */
891b38e4
FB
702 sp_mask = 0; /* avoid warning */
703 ssp = NULL; /* avoid warning */
704 esp = 0; /* avoid warning */
2c0262af
FB
705 }
706
707 shift = type >> 3;
891b38e4
FB
708
709#if 0
710 /* XXX: check that enough room is available */
2c0262af
FB
711 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
712 if (env->eflags & VM_MASK)
713 push_size += 8;
714 push_size <<= shift;
891b38e4 715#endif
2c0262af
FB
716 if (is_int)
717 old_eip = next_eip;
718 else
719 old_eip = env->eip;
2c0262af 720 if (shift == 1) {
2c0262af 721 if (new_stack) {
8e682019
FB
722 if (env->eflags & VM_MASK) {
723 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
724 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
725 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
726 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
727 }
891b38e4
FB
728 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
729 PUSHL(ssp, esp, sp_mask, ESP);
2c0262af 730 }
891b38e4
FB
731 PUSHL(ssp, esp, sp_mask, compute_eflags());
732 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
733 PUSHL(ssp, esp, sp_mask, old_eip);
2c0262af 734 if (has_error_code) {
891b38e4 735 PUSHL(ssp, esp, sp_mask, error_code);
2c0262af
FB
736 }
737 } else {
738 if (new_stack) {
8e682019
FB
739 if (env->eflags & VM_MASK) {
740 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
741 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
742 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
743 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
744 }
891b38e4
FB
745 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
746 PUSHW(ssp, esp, sp_mask, ESP);
2c0262af 747 }
891b38e4
FB
748 PUSHW(ssp, esp, sp_mask, compute_eflags());
749 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
750 PUSHW(ssp, esp, sp_mask, old_eip);
2c0262af 751 if (has_error_code) {
891b38e4 752 PUSHW(ssp, esp, sp_mask, error_code);
2c0262af
FB
753 }
754 }
755
891b38e4 756 if (new_stack) {
8e682019
FB
757 if (env->eflags & VM_MASK) {
758 /* XXX: explain me why W2K hangs if the whole segment cache is
759 reset ? */
8145122b 760#if 1
8e682019
FB
761 env->segs[R_ES].selector = 0;
762 env->segs[R_ES].flags = 0;
763 env->segs[R_DS].selector = 0;
764 env->segs[R_DS].flags = 0;
765 env->segs[R_FS].selector = 0;
766 env->segs[R_FS].flags = 0;
767 env->segs[R_GS].selector = 0;
768 env->segs[R_GS].flags = 0;
8145122b
FB
769#else
770 cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0, 0);
771 cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0, 0);
772 cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0, 0);
773 cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0, 0);
774#endif
8e682019 775 }
891b38e4
FB
776 ss = (ss & ~3) | dpl;
777 cpu_x86_load_seg_cache(env, R_SS, ss,
778 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
779 }
780 ESP = (ESP & ~sp_mask) | (esp & sp_mask);
781
782 selector = (selector & ~3) | dpl;
783 cpu_x86_load_seg_cache(env, R_CS, selector,
784 get_seg_base(e1, e2),
785 get_seg_limit(e1, e2),
786 e2);
787 cpu_x86_set_cpl(env, dpl);
788 env->eip = offset;
789
2c0262af
FB
790 /* interrupt gate clear IF mask */
791 if ((type & 1) == 0) {
792 env->eflags &= ~IF_MASK;
793 }
794 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
795}
796
797/* real mode interrupt */
798static void do_interrupt_real(int intno, int is_int, int error_code,
4136f33c 799 unsigned int next_eip)
2c0262af
FB
800{
801 SegmentCache *dt;
802 uint8_t *ptr, *ssp;
803 int selector;
804 uint32_t offset, esp;
805 uint32_t old_cs, old_eip;
806
807 /* real mode (simpler !) */
808 dt = &env->idt;
809 if (intno * 4 + 3 > dt->limit)
810 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
811 ptr = dt->base + intno * 4;
61382a50
FB
812 offset = lduw_kernel(ptr);
813 selector = lduw_kernel(ptr + 2);
2c0262af
FB
814 esp = ESP;
815 ssp = env->segs[R_SS].base;
816 if (is_int)
817 old_eip = next_eip;
818 else
819 old_eip = env->eip;
820 old_cs = env->segs[R_CS].selector;
891b38e4
FB
821 /* XXX: use SS segment size ? */
822 PUSHW(ssp, esp, 0xffff, compute_eflags());
823 PUSHW(ssp, esp, 0xffff, old_cs);
824 PUSHW(ssp, esp, 0xffff, old_eip);
2c0262af
FB
825
826 /* update processor state */
827 ESP = (ESP & ~0xffff) | (esp & 0xffff);
828 env->eip = offset;
829 env->segs[R_CS].selector = selector;
830 env->segs[R_CS].base = (uint8_t *)(selector << 4);
831 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
832}
833
834/* fake user mode interrupt */
835void do_interrupt_user(int intno, int is_int, int error_code,
836 unsigned int next_eip)
837{
838 SegmentCache *dt;
839 uint8_t *ptr;
840 int dpl, cpl;
841 uint32_t e2;
842
843 dt = &env->idt;
844 ptr = dt->base + (intno * 8);
61382a50 845 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
846
847 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
848 cpl = env->hflags & HF_CPL_MASK;
849 /* check privledge if software int */
850 if (is_int && dpl < cpl)
851 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
852
853 /* Since we emulate only user space, we cannot do more than
854 exiting the emulation with the suitable exception and error
855 code */
856 if (is_int)
857 EIP = next_eip;
858}
859
860/*
861 * Begin excution of an interruption. is_int is TRUE if coming from
862 * the int instruction. next_eip is the EIP value AFTER the interrupt
863 * instruction. It is only relevant if is_int is TRUE.
864 */
865void do_interrupt(int intno, int is_int, int error_code,
866 unsigned int next_eip, int is_hw)
867{
8e682019
FB
868#if 0
869 {
870 extern FILE *stdout;
871 static int count;
58fe2f10 872 if (env->cr[0] & CR0_PE_MASK) {
8145122b 873 fprintf(stdout, "%d: v=%02x e=%04x i=%d CPL=%d CS:EIP=%04x:%08x SS:ESP=%04x:%08x",
dc6f57fd
FB
874 count, intno, error_code, is_int,
875 env->hflags & HF_CPL_MASK,
876 env->segs[R_CS].selector, EIP,
8145122b
FB
877 env->segs[R_SS].selector, ESP);
878 if (intno == 0x0e) {
879 fprintf(stdout, " CR2=%08x", env->cr[2]);
880 } else {
881 fprintf(stdout, " EAX=%08x", env->regs[R_EAX]);
882 }
883 fprintf(stdout, "\n");
884
dc6f57fd
FB
885 if (0) {
886 cpu_x86_dump_state(env, stdout, X86_DUMP_CCOP);
887#if 0
888 {
889 int i;
890 uint8_t *ptr;
891 fprintf(stdout, " code=");
892 ptr = env->segs[R_CS].base + env->eip;
893 for(i = 0; i < 16; i++) {
894 fprintf(stdout, " %02x", ldub(ptr + i));
895 }
896 fprintf(stdout, "\n");
897 }
898#endif
899 }
8e682019
FB
900 count++;
901 }
902 }
8e682019 903#endif
4136f33c
FB
904#ifdef DEBUG_PCALL
905 if (loglevel) {
906 static int count;
907 fprintf(logfile, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
908 count, intno, error_code, is_int);
909 cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
dc6f57fd 910#if 0
4136f33c
FB
911 {
912 int i;
913 uint8_t *ptr;
8e682019 914 fprintf(logfile, " code=");
4136f33c
FB
915 ptr = env->segs[R_CS].base + env->eip;
916 for(i = 0; i < 16; i++) {
8e682019 917 fprintf(logfile, " %02x", ldub(ptr + i));
4136f33c 918 }
8e682019 919 fprintf(logfile, "\n");
4136f33c
FB
920 }
921#endif
922 count++;
923 }
924#endif
2c0262af
FB
925 if (env->cr[0] & CR0_PE_MASK) {
926 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
927 } else {
928 do_interrupt_real(intno, is_int, error_code, next_eip);
929 }
930}
931
932/*
933 * Signal an interruption. It is executed in the main CPU loop.
934 * is_int is TRUE if coming from the int instruction. next_eip is the
935 * EIP value AFTER the interrupt instruction. It is only relevant if
936 * is_int is TRUE.
937 */
938void raise_interrupt(int intno, int is_int, int error_code,
939 unsigned int next_eip)
940{
941 env->exception_index = intno;
942 env->error_code = error_code;
943 env->exception_is_int = is_int;
944 env->exception_next_eip = next_eip;
945 cpu_loop_exit();
946}
947
948/* shortcuts to generate exceptions */
8145122b
FB
949
950void (raise_exception_err)(int exception_index, int error_code)
2c0262af
FB
951{
952 raise_interrupt(exception_index, 0, error_code, 0);
953}
954
955void raise_exception(int exception_index)
956{
957 raise_interrupt(exception_index, 0, 0, 0);
958}
959
960#ifdef BUGGY_GCC_DIV64
961/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
962 call it from another function */
963uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
964{
965 *q_ptr = num / den;
966 return num % den;
967}
968
969int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
970{
971 *q_ptr = num / den;
972 return num % den;
973}
974#endif
975
976void helper_divl_EAX_T0(uint32_t eip)
977{
978 unsigned int den, q, r;
979 uint64_t num;
980
981 num = EAX | ((uint64_t)EDX << 32);
982 den = T0;
983 if (den == 0) {
984 EIP = eip;
985 raise_exception(EXCP00_DIVZ);
986 }
987#ifdef BUGGY_GCC_DIV64
988 r = div64(&q, num, den);
989#else
990 q = (num / den);
991 r = (num % den);
992#endif
993 EAX = q;
994 EDX = r;
995}
996
997void helper_idivl_EAX_T0(uint32_t eip)
998{
999 int den, q, r;
1000 int64_t num;
1001
1002 num = EAX | ((uint64_t)EDX << 32);
1003 den = T0;
1004 if (den == 0) {
1005 EIP = eip;
1006 raise_exception(EXCP00_DIVZ);
1007 }
1008#ifdef BUGGY_GCC_DIV64
1009 r = idiv64(&q, num, den);
1010#else
1011 q = (num / den);
1012 r = (num % den);
1013#endif
1014 EAX = q;
1015 EDX = r;
1016}
1017
1018void helper_cmpxchg8b(void)
1019{
1020 uint64_t d;
1021 int eflags;
1022
1023 eflags = cc_table[CC_OP].compute_all();
1024 d = ldq((uint8_t *)A0);
1025 if (d == (((uint64_t)EDX << 32) | EAX)) {
1026 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
1027 eflags |= CC_Z;
1028 } else {
1029 EDX = d >> 32;
1030 EAX = d;
1031 eflags &= ~CC_Z;
1032 }
1033 CC_SRC = eflags;
1034}
1035
2c0262af
FB
1036#define CPUID_FP87 (1 << 0)
1037#define CPUID_VME (1 << 1)
1038#define CPUID_DE (1 << 2)
1039#define CPUID_PSE (1 << 3)
1040#define CPUID_TSC (1 << 4)
1041#define CPUID_MSR (1 << 5)
1042#define CPUID_PAE (1 << 6)
1043#define CPUID_MCE (1 << 7)
1044#define CPUID_CX8 (1 << 8)
1045#define CPUID_APIC (1 << 9)
1046#define CPUID_SEP (1 << 11) /* sysenter/sysexit */
1047#define CPUID_MTRR (1 << 12)
1048#define CPUID_PGE (1 << 13)
1049#define CPUID_MCA (1 << 14)
1050#define CPUID_CMOV (1 << 15)
1051/* ... */
1052#define CPUID_MMX (1 << 23)
1053#define CPUID_FXSR (1 << 24)
1054#define CPUID_SSE (1 << 25)
1055#define CPUID_SSE2 (1 << 26)
1056
1057void helper_cpuid(void)
1058{
8e682019
FB
1059 switch(EAX) {
1060 case 0:
1061 EAX = 2; /* max EAX index supported */
2c0262af
FB
1062 EBX = 0x756e6547;
1063 ECX = 0x6c65746e;
1064 EDX = 0x49656e69;
8e682019
FB
1065 break;
1066 case 1:
1067 {
1068 int family, model, stepping;
1069 /* EAX = 1 info */
2c0262af 1070#if 0
8e682019
FB
1071 /* pentium 75-200 */
1072 family = 5;
1073 model = 2;
1074 stepping = 11;
2c0262af 1075#else
8e682019
FB
1076 /* pentium pro */
1077 family = 6;
1078 model = 1;
1079 stepping = 3;
2c0262af 1080#endif
8e682019
FB
1081 EAX = (family << 8) | (model << 4) | stepping;
1082 EBX = 0;
1083 ECX = 0;
1084 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1085 CPUID_TSC | CPUID_MSR | CPUID_MCE |
1086 CPUID_CX8 | CPUID_PGE | CPUID_CMOV;
1087 }
1088 break;
1089 default:
1090 /* cache info: needed for Pentium Pro compatibility */
1091 EAX = 0x410601;
2c0262af
FB
1092 EBX = 0;
1093 ECX = 0;
8e682019
FB
1094 EDX = 0;
1095 break;
2c0262af
FB
1096 }
1097}
1098
1099void helper_lldt_T0(void)
1100{
1101 int selector;
1102 SegmentCache *dt;
1103 uint32_t e1, e2;
1104 int index;
1105 uint8_t *ptr;
1106
1107 selector = T0 & 0xffff;
1108 if ((selector & 0xfffc) == 0) {
1109 /* XXX: NULL selector case: invalid LDT */
1110 env->ldt.base = NULL;
1111 env->ldt.limit = 0;
1112 } else {
1113 if (selector & 0x4)
1114 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1115 dt = &env->gdt;
1116 index = selector & ~7;
1117 if ((index + 7) > dt->limit)
1118 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1119 ptr = dt->base + index;
61382a50
FB
1120 e1 = ldl_kernel(ptr);
1121 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
1122 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1123 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1124 if (!(e2 & DESC_P_MASK))
1125 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1126 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1127 }
1128 env->ldt.selector = selector;
1129}
1130
1131void helper_ltr_T0(void)
1132{
1133 int selector;
1134 SegmentCache *dt;
1135 uint32_t e1, e2;
1136 int index, type;
1137 uint8_t *ptr;
1138
1139 selector = T0 & 0xffff;
1140 if ((selector & 0xfffc) == 0) {
1141 /* NULL selector case: invalid LDT */
1142 env->tr.base = NULL;
1143 env->tr.limit = 0;
1144 env->tr.flags = 0;
1145 } else {
1146 if (selector & 0x4)
1147 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1148 dt = &env->gdt;
1149 index = selector & ~7;
1150 if ((index + 7) > dt->limit)
1151 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1152 ptr = dt->base + index;
61382a50
FB
1153 e1 = ldl_kernel(ptr);
1154 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
1155 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1156 if ((e2 & DESC_S_MASK) ||
7e84c249 1157 (type != 1 && type != 9))
2c0262af
FB
1158 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1159 if (!(e2 & DESC_P_MASK))
1160 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1161 load_seg_cache_raw_dt(&env->tr, e1, e2);
8e682019 1162 e2 |= DESC_TSS_BUSY_MASK;
61382a50 1163 stl_kernel(ptr + 4, e2);
2c0262af
FB
1164 }
1165 env->tr.selector = selector;
1166}
1167
3ab493de 1168/* only works if protected mode and not VM86. seg_reg must be != R_CS */
8e682019 1169void load_seg(int seg_reg, int selector)
2c0262af
FB
1170{
1171 uint32_t e1, e2;
3ab493de
FB
1172 int cpl, dpl, rpl;
1173 SegmentCache *dt;
1174 int index;
1175 uint8_t *ptr;
1176
8e682019 1177 selector &= 0xffff;
2c0262af
FB
1178 if ((selector & 0xfffc) == 0) {
1179 /* null selector case */
8e682019 1180 if (seg_reg == R_SS)
2c0262af 1181 raise_exception_err(EXCP0D_GPF, 0);
8e682019 1182 cpu_x86_load_seg_cache(env, seg_reg, selector, NULL, 0, 0);
2c0262af 1183 } else {
3ab493de
FB
1184
1185 if (selector & 0x4)
1186 dt = &env->ldt;
1187 else
1188 dt = &env->gdt;
1189 index = selector & ~7;
8e682019 1190 if ((index + 7) > dt->limit)
2c0262af 1191 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
1192 ptr = dt->base + index;
1193 e1 = ldl_kernel(ptr);
1194 e2 = ldl_kernel(ptr + 4);
1195
8e682019 1196 if (!(e2 & DESC_S_MASK))
2c0262af 1197 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
1198 rpl = selector & 3;
1199 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1200 cpl = env->hflags & HF_CPL_MASK;
2c0262af 1201 if (seg_reg == R_SS) {
3ab493de 1202 /* must be writable segment */
8e682019 1203 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2c0262af 1204 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
8e682019 1205 if (rpl != cpl || dpl != cpl)
3ab493de 1206 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 1207 } else {
3ab493de 1208 /* must be readable segment */
8e682019 1209 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2c0262af 1210 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
1211
1212 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1213 /* if not conforming code, test rights */
8e682019 1214 if (dpl < cpl || dpl < rpl)
3ab493de 1215 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de 1216 }
2c0262af
FB
1217 }
1218
1219 if (!(e2 & DESC_P_MASK)) {
2c0262af
FB
1220 if (seg_reg == R_SS)
1221 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1222 else
1223 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1224 }
3ab493de
FB
1225
1226 /* set the access bit if not already set */
1227 if (!(e2 & DESC_A_MASK)) {
1228 e2 |= DESC_A_MASK;
1229 stl_kernel(ptr + 4, e2);
1230 }
1231
2c0262af
FB
1232 cpu_x86_load_seg_cache(env, seg_reg, selector,
1233 get_seg_base(e1, e2),
1234 get_seg_limit(e1, e2),
1235 e2);
1236#if 0
1237 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1238 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1239#endif
1240 }
1241}
1242
1243/* protected mode jump */
1244void helper_ljmp_protected_T0_T1(void)
1245{
7e84c249 1246 int new_cs, new_eip, gate_cs, type;
2c0262af
FB
1247 uint32_t e1, e2, cpl, dpl, rpl, limit;
1248
1249 new_cs = T0;
1250 new_eip = T1;
1251 if ((new_cs & 0xfffc) == 0)
1252 raise_exception_err(EXCP0D_GPF, 0);
1253 if (load_segment(&e1, &e2, new_cs) != 0)
1254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1255 cpl = env->hflags & HF_CPL_MASK;
1256 if (e2 & DESC_S_MASK) {
1257 if (!(e2 & DESC_CS_MASK))
1258 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1259 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 1260 if (e2 & DESC_C_MASK) {
2c0262af
FB
1261 /* conforming code segment */
1262 if (dpl > cpl)
1263 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1264 } else {
1265 /* non conforming code segment */
1266 rpl = new_cs & 3;
1267 if (rpl > cpl)
1268 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1269 if (dpl != cpl)
1270 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1271 }
1272 if (!(e2 & DESC_P_MASK))
1273 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1274 limit = get_seg_limit(e1, e2);
1275 if (new_eip > limit)
1276 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1277 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1278 get_seg_base(e1, e2), limit, e2);
1279 EIP = new_eip;
1280 } else {
7e84c249
FB
1281 /* jump to call or task gate */
1282 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1283 rpl = new_cs & 3;
1284 cpl = env->hflags & HF_CPL_MASK;
1285 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1286 switch(type) {
1287 case 1: /* 286 TSS */
1288 case 9: /* 386 TSS */
1289 case 5: /* task gate */
1290 if (dpl < cpl || dpl < rpl)
1291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1292 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP);
1293 break;
1294 case 4: /* 286 call gate */
1295 case 12: /* 386 call gate */
1296 if ((dpl < cpl) || (dpl < rpl))
1297 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1298 if (!(e2 & DESC_P_MASK))
1299 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1300 gate_cs = e1 >> 16;
1301 if (load_segment(&e1, &e2, gate_cs) != 0)
1302 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1303 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1304 /* must be code segment */
1305 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1306 (DESC_S_MASK | DESC_CS_MASK)))
1307 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1308 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1309 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
1310 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1311 if (!(e2 & DESC_P_MASK))
1312 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
1313 new_eip = (e1 & 0xffff);
1314 if (type == 12)
1315 new_eip |= (e2 & 0xffff0000);
1316 limit = get_seg_limit(e1, e2);
1317 if (new_eip > limit)
1318 raise_exception_err(EXCP0D_GPF, 0);
1319 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1320 get_seg_base(e1, e2), limit, e2);
1321 EIP = new_eip;
1322 break;
1323 default:
1324 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1325 break;
1326 }
2c0262af
FB
1327 }
1328}
1329
1330/* real mode call */
1331void helper_lcall_real_T0_T1(int shift, int next_eip)
1332{
1333 int new_cs, new_eip;
1334 uint32_t esp, esp_mask;
1335 uint8_t *ssp;
1336
1337 new_cs = T0;
1338 new_eip = T1;
1339 esp = ESP;
891b38e4 1340 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af
FB
1341 ssp = env->segs[R_SS].base;
1342 if (shift) {
891b38e4
FB
1343 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1344 PUSHL(ssp, esp, esp_mask, next_eip);
2c0262af 1345 } else {
891b38e4
FB
1346 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1347 PUSHW(ssp, esp, esp_mask, next_eip);
2c0262af
FB
1348 }
1349
891b38e4 1350 ESP = (ESP & ~esp_mask) | (esp & esp_mask);
2c0262af
FB
1351 env->eip = new_eip;
1352 env->segs[R_CS].selector = new_cs;
1353 env->segs[R_CS].base = (uint8_t *)(new_cs << 4);
1354}
1355
1356/* protected mode call */
1357void helper_lcall_protected_T0_T1(int shift, int next_eip)
1358{
891b38e4 1359 int new_cs, new_eip, new_stack, i;
2c0262af 1360 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
891b38e4
FB
1361 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
1362 uint32_t val, limit, old_sp_mask;
2c0262af
FB
1363 uint8_t *ssp, *old_ssp;
1364
1365 new_cs = T0;
1366 new_eip = T1;
f3f2d9be
FB
1367#ifdef DEBUG_PCALL
1368 if (loglevel) {
1369 fprintf(logfile, "lcall %04x:%08x\n",
1370 new_cs, new_eip);
4136f33c 1371 cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
f3f2d9be
FB
1372 }
1373#endif
2c0262af
FB
1374 if ((new_cs & 0xfffc) == 0)
1375 raise_exception_err(EXCP0D_GPF, 0);
1376 if (load_segment(&e1, &e2, new_cs) != 0)
1377 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1378 cpl = env->hflags & HF_CPL_MASK;
f3f2d9be
FB
1379#ifdef DEBUG_PCALL
1380 if (loglevel) {
1381 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
1382 }
1383#endif
2c0262af
FB
1384 if (e2 & DESC_S_MASK) {
1385 if (!(e2 & DESC_CS_MASK))
1386 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1387 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 1388 if (e2 & DESC_C_MASK) {
2c0262af
FB
1389 /* conforming code segment */
1390 if (dpl > cpl)
1391 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1392 } else {
1393 /* non conforming code segment */
1394 rpl = new_cs & 3;
1395 if (rpl > cpl)
1396 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1397 if (dpl != cpl)
1398 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1399 }
1400 if (!(e2 & DESC_P_MASK))
1401 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1402
1403 sp = ESP;
891b38e4
FB
1404 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1405 ssp = env->segs[R_SS].base;
2c0262af 1406 if (shift) {
891b38e4
FB
1407 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1408 PUSHL(ssp, sp, sp_mask, next_eip);
2c0262af 1409 } else {
891b38e4
FB
1410 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1411 PUSHW(ssp, sp, sp_mask, next_eip);
2c0262af 1412 }
2c0262af
FB
1413
1414 limit = get_seg_limit(e1, e2);
1415 if (new_eip > limit)
1416 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1417 /* from this point, not restartable */
891b38e4 1418 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
1419 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1420 get_seg_base(e1, e2), limit, e2);
1421 EIP = new_eip;
1422 } else {
1423 /* check gate type */
1424 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
7e84c249
FB
1425 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1426 rpl = new_cs & 3;
2c0262af
FB
1427 switch(type) {
1428 case 1: /* available 286 TSS */
1429 case 9: /* available 386 TSS */
1430 case 5: /* task gate */
7e84c249
FB
1431 if (dpl < cpl || dpl < rpl)
1432 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1433 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL);
8145122b 1434 return;
2c0262af
FB
1435 case 4: /* 286 call gate */
1436 case 12: /* 386 call gate */
1437 break;
1438 default:
1439 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1440 break;
1441 }
1442 shift = type >> 3;
1443
2c0262af
FB
1444 if (dpl < cpl || dpl < rpl)
1445 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1446 /* check valid bit */
1447 if (!(e2 & DESC_P_MASK))
1448 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1449 selector = e1 >> 16;
1450 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
f3f2d9be 1451 param_count = e2 & 0x1f;
2c0262af
FB
1452 if ((selector & 0xfffc) == 0)
1453 raise_exception_err(EXCP0D_GPF, 0);
1454
1455 if (load_segment(&e1, &e2, selector) != 0)
1456 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1457 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1458 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1459 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1460 if (dpl > cpl)
1461 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1462 if (!(e2 & DESC_P_MASK))
1463 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1464
1465 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1466 /* to inner priviledge */
1467 get_ss_esp_from_tss(&ss, &sp, dpl);
f3f2d9be
FB
1468#ifdef DEBUG_PCALL
1469 if (loglevel)
1470 fprintf(logfile, "ss=%04x sp=%04x param_count=%d ESP=%x\n",
1471 ss, sp, param_count, ESP);
1472#endif
2c0262af
FB
1473 if ((ss & 0xfffc) == 0)
1474 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1475 if ((ss & 3) != dpl)
1476 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1477 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
1478 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1479 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1480 if (ss_dpl != dpl)
1481 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1482 if (!(ss_e2 & DESC_S_MASK) ||
1483 (ss_e2 & DESC_CS_MASK) ||
1484 !(ss_e2 & DESC_W_MASK))
1485 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1486 if (!(ss_e2 & DESC_P_MASK))
1487 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
1488
891b38e4 1489 // push_size = ((param_count * 2) + 8) << shift;
2c0262af 1490
891b38e4
FB
1491 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1492 old_ssp = env->segs[R_SS].base;
2c0262af 1493
891b38e4
FB
1494 sp_mask = get_sp_mask(ss_e2);
1495 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 1496 if (shift) {
891b38e4
FB
1497 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
1498 PUSHL(ssp, sp, sp_mask, ESP);
1499 for(i = param_count - 1; i >= 0; i--) {
1500 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
1501 PUSHL(ssp, sp, sp_mask, val);
2c0262af
FB
1502 }
1503 } else {
891b38e4
FB
1504 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
1505 PUSHW(ssp, sp, sp_mask, ESP);
1506 for(i = param_count - 1; i >= 0; i--) {
1507 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
1508 PUSHW(ssp, sp, sp_mask, val);
2c0262af
FB
1509 }
1510 }
891b38e4 1511 new_stack = 1;
2c0262af
FB
1512 } else {
1513 /* to same priviledge */
891b38e4
FB
1514 sp = ESP;
1515 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1516 ssp = env->segs[R_SS].base;
1517 // push_size = (4 << shift);
1518 new_stack = 0;
2c0262af
FB
1519 }
1520
1521 if (shift) {
891b38e4
FB
1522 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1523 PUSHL(ssp, sp, sp_mask, next_eip);
2c0262af 1524 } else {
891b38e4
FB
1525 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1526 PUSHW(ssp, sp, sp_mask, next_eip);
1527 }
1528
1529 /* from this point, not restartable */
1530
1531 if (new_stack) {
1532 ss = (ss & ~3) | dpl;
1533 cpu_x86_load_seg_cache(env, R_SS, ss,
1534 ssp,
1535 get_seg_limit(ss_e1, ss_e2),
1536 ss_e2);
2c0262af
FB
1537 }
1538
2c0262af
FB
1539 selector = (selector & ~3) | dpl;
1540 cpu_x86_load_seg_cache(env, R_CS, selector,
1541 get_seg_base(e1, e2),
1542 get_seg_limit(e1, e2),
1543 e2);
1544 cpu_x86_set_cpl(env, dpl);
891b38e4 1545 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
1546 EIP = offset;
1547 }
1548}
1549
7e84c249 1550/* real and vm86 mode iret */
2c0262af
FB
1551void helper_iret_real(int shift)
1552{
891b38e4 1553 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2c0262af
FB
1554 uint8_t *ssp;
1555 int eflags_mask;
7e84c249 1556
891b38e4
FB
1557 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
1558 sp = ESP;
1559 ssp = env->segs[R_SS].base;
2c0262af
FB
1560 if (shift == 1) {
1561 /* 32 bits */
891b38e4
FB
1562 POPL(ssp, sp, sp_mask, new_eip);
1563 POPL(ssp, sp, sp_mask, new_cs);
1564 new_cs &= 0xffff;
1565 POPL(ssp, sp, sp_mask, new_eflags);
2c0262af
FB
1566 } else {
1567 /* 16 bits */
891b38e4
FB
1568 POPW(ssp, sp, sp_mask, new_eip);
1569 POPW(ssp, sp, sp_mask, new_cs);
1570 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 1571 }
4136f33c 1572 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
1573 load_seg_vm(R_CS, new_cs);
1574 env->eip = new_eip;
7e84c249 1575 if (env->eflags & VM_MASK)
8145122b 1576 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
7e84c249 1577 else
8145122b 1578 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2c0262af
FB
1579 if (shift == 0)
1580 eflags_mask &= 0xffff;
1581 load_eflags(new_eflags, eflags_mask);
1582}
1583
8e682019
FB
1584static inline void validate_seg(int seg_reg, int cpl)
1585{
1586 int dpl;
1587 uint32_t e2;
1588
1589 e2 = env->segs[seg_reg].flags;
1590 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1591 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1592 /* data or non conforming code segment */
1593 if (dpl < cpl) {
1594 cpu_x86_load_seg_cache(env, seg_reg, 0, NULL, 0, 0);
1595 }
1596 }
1597}
1598
2c0262af
FB
1599/* protected mode iret */
1600static inline void helper_ret_protected(int shift, int is_iret, int addend)
1601{
891b38e4 1602 uint32_t sp, new_cs, new_eip, new_eflags, new_esp, new_ss, sp_mask;
2c0262af
FB
1603 uint32_t new_es, new_ds, new_fs, new_gs;
1604 uint32_t e1, e2, ss_e1, ss_e2;
4136f33c 1605 int cpl, dpl, rpl, eflags_mask, iopl;
2c0262af
FB
1606 uint8_t *ssp;
1607
891b38e4 1608 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af 1609 sp = ESP;
891b38e4 1610 ssp = env->segs[R_SS].base;
2c0262af
FB
1611 if (shift == 1) {
1612 /* 32 bits */
891b38e4
FB
1613 POPL(ssp, sp, sp_mask, new_eip);
1614 POPL(ssp, sp, sp_mask, new_cs);
1615 new_cs &= 0xffff;
1616 if (is_iret) {
1617 POPL(ssp, sp, sp_mask, new_eflags);
1618 if (new_eflags & VM_MASK)
1619 goto return_to_vm86;
1620 }
2c0262af
FB
1621 } else {
1622 /* 16 bits */
891b38e4
FB
1623 POPW(ssp, sp, sp_mask, new_eip);
1624 POPW(ssp, sp, sp_mask, new_cs);
2c0262af 1625 if (is_iret)
891b38e4 1626 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 1627 }
891b38e4
FB
1628#ifdef DEBUG_PCALL
1629 if (loglevel) {
4136f33c
FB
1630 fprintf(logfile, "lret new %04x:%08x addend=0x%x\n",
1631 new_cs, new_eip, addend);
1632 cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
891b38e4
FB
1633 }
1634#endif
2c0262af
FB
1635 if ((new_cs & 0xfffc) == 0)
1636 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1637 if (load_segment(&e1, &e2, new_cs) != 0)
1638 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1639 if (!(e2 & DESC_S_MASK) ||
1640 !(e2 & DESC_CS_MASK))
1641 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1642 cpl = env->hflags & HF_CPL_MASK;
1643 rpl = new_cs & 3;
1644 if (rpl < cpl)
1645 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1646 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 1647 if (e2 & DESC_C_MASK) {
2c0262af
FB
1648 if (dpl > rpl)
1649 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1650 } else {
1651 if (dpl != rpl)
1652 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1653 }
1654 if (!(e2 & DESC_P_MASK))
1655 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1656
891b38e4 1657 sp += addend;
2c0262af
FB
1658 if (rpl == cpl) {
1659 /* return to same priledge level */
1660 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1661 get_seg_base(e1, e2),
1662 get_seg_limit(e1, e2),
1663 e2);
2c0262af
FB
1664 } else {
1665 /* return to different priviledge level */
2c0262af
FB
1666 if (shift == 1) {
1667 /* 32 bits */
891b38e4
FB
1668 POPL(ssp, sp, sp_mask, new_esp);
1669 POPL(ssp, sp, sp_mask, new_ss);
1670 new_ss &= 0xffff;
2c0262af
FB
1671 } else {
1672 /* 16 bits */
891b38e4
FB
1673 POPW(ssp, sp, sp_mask, new_esp);
1674 POPW(ssp, sp, sp_mask, new_ss);
2c0262af
FB
1675 }
1676
1677 if ((new_ss & 3) != rpl)
1678 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1679 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
1680 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1681 if (!(ss_e2 & DESC_S_MASK) ||
1682 (ss_e2 & DESC_CS_MASK) ||
1683 !(ss_e2 & DESC_W_MASK))
1684 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1685 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1686 if (dpl != rpl)
1687 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
1688 if (!(ss_e2 & DESC_P_MASK))
1689 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
1690
1691 cpu_x86_load_seg_cache(env, R_CS, new_cs,
1692 get_seg_base(e1, e2),
1693 get_seg_limit(e1, e2),
1694 e2);
1695 cpu_x86_load_seg_cache(env, R_SS, new_ss,
1696 get_seg_base(ss_e1, ss_e2),
1697 get_seg_limit(ss_e1, ss_e2),
1698 ss_e2);
1699 cpu_x86_set_cpl(env, rpl);
891b38e4
FB
1700 sp = new_esp;
1701 /* XXX: change sp_mask according to old segment ? */
8e682019
FB
1702
1703 /* validate data segments */
1704 validate_seg(R_ES, cpl);
1705 validate_seg(R_DS, cpl);
1706 validate_seg(R_FS, cpl);
1707 validate_seg(R_GS, cpl);
2c0262af 1708 }
891b38e4 1709 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
1710 env->eip = new_eip;
1711 if (is_iret) {
4136f33c 1712 /* NOTE: 'cpl' is the _old_ CPL */
8145122b 1713 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2c0262af 1714 if (cpl == 0)
4136f33c
FB
1715 eflags_mask |= IOPL_MASK;
1716 iopl = (env->eflags >> IOPL_SHIFT) & 3;
1717 if (cpl <= iopl)
1718 eflags_mask |= IF_MASK;
2c0262af
FB
1719 if (shift == 0)
1720 eflags_mask &= 0xffff;
1721 load_eflags(new_eflags, eflags_mask);
1722 }
1723 return;
1724
1725 return_to_vm86:
891b38e4
FB
1726 POPL(ssp, sp, sp_mask, new_esp);
1727 POPL(ssp, sp, sp_mask, new_ss);
1728 POPL(ssp, sp, sp_mask, new_es);
1729 POPL(ssp, sp, sp_mask, new_ds);
1730 POPL(ssp, sp, sp_mask, new_fs);
1731 POPL(ssp, sp, sp_mask, new_gs);
2c0262af
FB
1732
1733 /* modify processor state */
4136f33c 1734 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
8145122b 1735 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
891b38e4 1736 load_seg_vm(R_CS, new_cs & 0xffff);
2c0262af 1737 cpu_x86_set_cpl(env, 3);
891b38e4
FB
1738 load_seg_vm(R_SS, new_ss & 0xffff);
1739 load_seg_vm(R_ES, new_es & 0xffff);
1740 load_seg_vm(R_DS, new_ds & 0xffff);
1741 load_seg_vm(R_FS, new_fs & 0xffff);
1742 load_seg_vm(R_GS, new_gs & 0xffff);
2c0262af
FB
1743
1744 env->eip = new_eip;
1745 ESP = new_esp;
1746}
1747
1748void helper_iret_protected(int shift)
1749{
7e84c249
FB
1750 int tss_selector, type;
1751 uint32_t e1, e2;
1752
1753 /* specific case for TSS */
1754 if (env->eflags & NT_MASK) {
1755 tss_selector = lduw_kernel(env->tr.base + 0);
1756 if (tss_selector & 4)
1757 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1758 if (load_segment(&e1, &e2, tss_selector) != 0)
1759 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1760 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
1761 /* NOTE: we check both segment and busy TSS */
1762 if (type != 3)
1763 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
1764 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET);
1765 } else {
1766 helper_ret_protected(shift, 1, 0);
1767 }
2c0262af
FB
1768}
1769
1770void helper_lret_protected(int shift, int addend)
1771{
1772 helper_ret_protected(shift, 0, addend);
1773}
1774
1775void helper_movl_crN_T0(int reg)
1776{
2c0262af
FB
1777 switch(reg) {
1778 case 0:
1ac157da 1779 cpu_x86_update_cr0(env, T0);
2c0262af
FB
1780 break;
1781 case 3:
1ac157da
FB
1782 cpu_x86_update_cr3(env, T0);
1783 break;
1784 case 4:
1785 cpu_x86_update_cr4(env, T0);
1786 break;
1787 default:
1788 env->cr[reg] = T0;
2c0262af
FB
1789 break;
1790 }
1791}
1792
1793/* XXX: do more */
1794void helper_movl_drN_T0(int reg)
1795{
1796 env->dr[reg] = T0;
1797}
1798
1799void helper_invlpg(unsigned int addr)
1800{
1801 cpu_x86_flush_tlb(env, addr);
1802}
1803
1804/* rdtsc */
bc51c5c9 1805#if !defined(__i386__) && !defined(__x86_64__)
2c0262af
FB
1806uint64_t emu_time;
1807#endif
1808
1809void helper_rdtsc(void)
1810{
1811 uint64_t val;
bc51c5c9 1812#if defined(__i386__) || defined(__x86_64__)
2c0262af
FB
1813 asm("rdtsc" : "=A" (val));
1814#else
1815 /* better than nothing: the time increases */
1816 val = emu_time++;
1817#endif
1818 EAX = val;
1819 EDX = val >> 32;
1820}
1821
1822void helper_wrmsr(void)
1823{
1824 switch(ECX) {
1825 case MSR_IA32_SYSENTER_CS:
1826 env->sysenter_cs = EAX & 0xffff;
1827 break;
1828 case MSR_IA32_SYSENTER_ESP:
1829 env->sysenter_esp = EAX;
1830 break;
1831 case MSR_IA32_SYSENTER_EIP:
1832 env->sysenter_eip = EAX;
1833 break;
1834 default:
1835 /* XXX: exception ? */
1836 break;
1837 }
1838}
1839
1840void helper_rdmsr(void)
1841{
1842 switch(ECX) {
1843 case MSR_IA32_SYSENTER_CS:
1844 EAX = env->sysenter_cs;
1845 EDX = 0;
1846 break;
1847 case MSR_IA32_SYSENTER_ESP:
1848 EAX = env->sysenter_esp;
1849 EDX = 0;
1850 break;
1851 case MSR_IA32_SYSENTER_EIP:
1852 EAX = env->sysenter_eip;
1853 EDX = 0;
1854 break;
1855 default:
1856 /* XXX: exception ? */
1857 break;
1858 }
1859}
1860
1861void helper_lsl(void)
1862{
1863 unsigned int selector, limit;
1864 uint32_t e1, e2;
3ab493de 1865 int rpl, dpl, cpl, type;
2c0262af
FB
1866
1867 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1868 selector = T0 & 0xffff;
1869 if (load_segment(&e1, &e2, selector) != 0)
1870 return;
3ab493de
FB
1871 rpl = selector & 3;
1872 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1873 cpl = env->hflags & HF_CPL_MASK;
1874 if (e2 & DESC_S_MASK) {
1875 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1876 /* conforming */
1877 } else {
1878 if (dpl < cpl || dpl < rpl)
1879 return;
1880 }
1881 } else {
1882 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1883 switch(type) {
1884 case 1:
1885 case 2:
1886 case 3:
1887 case 9:
1888 case 11:
1889 break;
1890 default:
1891 return;
1892 }
1893 if (dpl < cpl || dpl < rpl)
1894 return;
1895 }
1896 limit = get_seg_limit(e1, e2);
2c0262af
FB
1897 T1 = limit;
1898 CC_SRC |= CC_Z;
1899}
1900
1901void helper_lar(void)
1902{
1903 unsigned int selector;
1904 uint32_t e1, e2;
3ab493de 1905 int rpl, dpl, cpl, type;
2c0262af
FB
1906
1907 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1908 selector = T0 & 0xffff;
3ab493de
FB
1909 if ((selector & 0xfffc) == 0)
1910 return;
2c0262af
FB
1911 if (load_segment(&e1, &e2, selector) != 0)
1912 return;
3ab493de
FB
1913 rpl = selector & 3;
1914 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1915 cpl = env->hflags & HF_CPL_MASK;
1916 if (e2 & DESC_S_MASK) {
1917 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
1918 /* conforming */
1919 } else {
1920 if (dpl < cpl || dpl < rpl)
1921 return;
1922 }
1923 } else {
1924 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1925 switch(type) {
1926 case 1:
1927 case 2:
1928 case 3:
1929 case 4:
1930 case 5:
1931 case 9:
1932 case 11:
1933 case 12:
1934 break;
1935 default:
1936 return;
1937 }
1938 if (dpl < cpl || dpl < rpl)
1939 return;
1940 }
2c0262af
FB
1941 T1 = e2 & 0x00f0ff00;
1942 CC_SRC |= CC_Z;
1943}
1944
3ab493de
FB
1945void helper_verr(void)
1946{
1947 unsigned int selector;
1948 uint32_t e1, e2;
1949 int rpl, dpl, cpl;
1950
1951 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1952 selector = T0 & 0xffff;
1953 if ((selector & 0xfffc) == 0)
1954 return;
1955 if (load_segment(&e1, &e2, selector) != 0)
1956 return;
1957 if (!(e2 & DESC_S_MASK))
1958 return;
1959 rpl = selector & 3;
1960 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1961 cpl = env->hflags & HF_CPL_MASK;
1962 if (e2 & DESC_CS_MASK) {
1963 if (!(e2 & DESC_R_MASK))
1964 return;
1965 if (!(e2 & DESC_C_MASK)) {
1966 if (dpl < cpl || dpl < rpl)
1967 return;
1968 }
1969 } else {
1970 if (dpl < cpl || dpl < rpl)
1971 return;
1972 }
f3f2d9be 1973 CC_SRC |= CC_Z;
3ab493de
FB
1974}
1975
1976void helper_verw(void)
1977{
1978 unsigned int selector;
1979 uint32_t e1, e2;
1980 int rpl, dpl, cpl;
1981
1982 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1983 selector = T0 & 0xffff;
1984 if ((selector & 0xfffc) == 0)
1985 return;
1986 if (load_segment(&e1, &e2, selector) != 0)
1987 return;
1988 if (!(e2 & DESC_S_MASK))
1989 return;
1990 rpl = selector & 3;
1991 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1992 cpl = env->hflags & HF_CPL_MASK;
1993 if (e2 & DESC_CS_MASK) {
1994 return;
1995 } else {
1996 if (dpl < cpl || dpl < rpl)
1997 return;
1998 if (!(e2 & DESC_W_MASK))
1999 return;
2000 }
f3f2d9be 2001 CC_SRC |= CC_Z;
3ab493de
FB
2002}
2003
2c0262af
FB
2004/* FPU helpers */
2005
2c0262af
FB
2006void helper_fldt_ST0_A0(void)
2007{
2008 int new_fpstt;
2009 new_fpstt = (env->fpstt - 1) & 7;
2010 env->fpregs[new_fpstt] = helper_fldt((uint8_t *)A0);
2011 env->fpstt = new_fpstt;
2012 env->fptags[new_fpstt] = 0; /* validate stack entry */
2013}
2014
2015void helper_fstt_ST0_A0(void)
2016{
2017 helper_fstt(ST0, (uint8_t *)A0);
2018}
2c0262af
FB
2019
2020/* BCD ops */
2021
2022#define MUL10(iv) ( iv + iv + (iv << 3) )
2023
2024void helper_fbld_ST0_A0(void)
2025{
2026 CPU86_LDouble tmp;
2027 uint64_t val;
2028 unsigned int v;
2029 int i;
2030
2031 val = 0;
2032 for(i = 8; i >= 0; i--) {
2033 v = ldub((uint8_t *)A0 + i);
2034 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
2035 }
2036 tmp = val;
2037 if (ldub((uint8_t *)A0 + 9) & 0x80)
2038 tmp = -tmp;
2039 fpush();
2040 ST0 = tmp;
2041}
2042
2043void helper_fbst_ST0_A0(void)
2044{
2045 CPU86_LDouble tmp;
2046 int v;
2047 uint8_t *mem_ref, *mem_end;
2048 int64_t val;
2049
2050 tmp = rint(ST0);
2051 val = (int64_t)tmp;
2052 mem_ref = (uint8_t *)A0;
2053 mem_end = mem_ref + 9;
2054 if (val < 0) {
2055 stb(mem_end, 0x80);
2056 val = -val;
2057 } else {
2058 stb(mem_end, 0x00);
2059 }
2060 while (mem_ref < mem_end) {
2061 if (val == 0)
2062 break;
2063 v = val % 100;
2064 val = val / 100;
2065 v = ((v / 10) << 4) | (v % 10);
2066 stb(mem_ref++, v);
2067 }
2068 while (mem_ref < mem_end) {
2069 stb(mem_ref++, 0);
2070 }
2071}
2072
2073void helper_f2xm1(void)
2074{
2075 ST0 = pow(2.0,ST0) - 1.0;
2076}
2077
2078void helper_fyl2x(void)
2079{
2080 CPU86_LDouble fptemp;
2081
2082 fptemp = ST0;
2083 if (fptemp>0.0){
2084 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
2085 ST1 *= fptemp;
2086 fpop();
2087 } else {
2088 env->fpus &= (~0x4700);
2089 env->fpus |= 0x400;
2090 }
2091}
2092
2093void helper_fptan(void)
2094{
2095 CPU86_LDouble fptemp;
2096
2097 fptemp = ST0;
2098 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2099 env->fpus |= 0x400;
2100 } else {
2101 ST0 = tan(fptemp);
2102 fpush();
2103 ST0 = 1.0;
2104 env->fpus &= (~0x400); /* C2 <-- 0 */
2105 /* the above code is for |arg| < 2**52 only */
2106 }
2107}
2108
2109void helper_fpatan(void)
2110{
2111 CPU86_LDouble fptemp, fpsrcop;
2112
2113 fpsrcop = ST1;
2114 fptemp = ST0;
2115 ST1 = atan2(fpsrcop,fptemp);
2116 fpop();
2117}
2118
2119void helper_fxtract(void)
2120{
2121 CPU86_LDoubleU temp;
2122 unsigned int expdif;
2123
2124 temp.d = ST0;
2125 expdif = EXPD(temp) - EXPBIAS;
2126 /*DP exponent bias*/
2127 ST0 = expdif;
2128 fpush();
2129 BIASEXPONENT(temp);
2130 ST0 = temp.d;
2131}
2132
2133void helper_fprem1(void)
2134{
2135 CPU86_LDouble dblq, fpsrcop, fptemp;
2136 CPU86_LDoubleU fpsrcop1, fptemp1;
2137 int expdif;
2138 int q;
2139
2140 fpsrcop = ST0;
2141 fptemp = ST1;
2142 fpsrcop1.d = fpsrcop;
2143 fptemp1.d = fptemp;
2144 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2145 if (expdif < 53) {
2146 dblq = fpsrcop / fptemp;
2147 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2148 ST0 = fpsrcop - fptemp*dblq;
2149 q = (int)dblq; /* cutting off top bits is assumed here */
2150 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2151 /* (C0,C1,C3) <-- (q2,q1,q0) */
2152 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2153 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2154 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2155 } else {
2156 env->fpus |= 0x400; /* C2 <-- 1 */
2157 fptemp = pow(2.0, expdif-50);
2158 fpsrcop = (ST0 / ST1) / fptemp;
2159 /* fpsrcop = integer obtained by rounding to the nearest */
2160 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2161 floor(fpsrcop): ceil(fpsrcop);
2162 ST0 -= (ST1 * fpsrcop * fptemp);
2163 }
2164}
2165
2166void helper_fprem(void)
2167{
2168 CPU86_LDouble dblq, fpsrcop, fptemp;
2169 CPU86_LDoubleU fpsrcop1, fptemp1;
2170 int expdif;
2171 int q;
2172
2173 fpsrcop = ST0;
2174 fptemp = ST1;
2175 fpsrcop1.d = fpsrcop;
2176 fptemp1.d = fptemp;
2177 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2178 if ( expdif < 53 ) {
2179 dblq = fpsrcop / fptemp;
2180 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2181 ST0 = fpsrcop - fptemp*dblq;
2182 q = (int)dblq; /* cutting off top bits is assumed here */
2183 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2184 /* (C0,C1,C3) <-- (q2,q1,q0) */
2185 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2186 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2187 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2188 } else {
2189 env->fpus |= 0x400; /* C2 <-- 1 */
2190 fptemp = pow(2.0, expdif-50);
2191 fpsrcop = (ST0 / ST1) / fptemp;
2192 /* fpsrcop = integer obtained by chopping */
2193 fpsrcop = (fpsrcop < 0.0)?
2194 -(floor(fabs(fpsrcop))): floor(fpsrcop);
2195 ST0 -= (ST1 * fpsrcop * fptemp);
2196 }
2197}
2198
2199void helper_fyl2xp1(void)
2200{
2201 CPU86_LDouble fptemp;
2202
2203 fptemp = ST0;
2204 if ((fptemp+1.0)>0.0) {
2205 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2206 ST1 *= fptemp;
2207 fpop();
2208 } else {
2209 env->fpus &= (~0x4700);
2210 env->fpus |= 0x400;
2211 }
2212}
2213
2214void helper_fsqrt(void)
2215{
2216 CPU86_LDouble fptemp;
2217
2218 fptemp = ST0;
2219 if (fptemp<0.0) {
2220 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2221 env->fpus |= 0x400;
2222 }
2223 ST0 = sqrt(fptemp);
2224}
2225
2226void helper_fsincos(void)
2227{
2228 CPU86_LDouble fptemp;
2229
2230 fptemp = ST0;
2231 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2232 env->fpus |= 0x400;
2233 } else {
2234 ST0 = sin(fptemp);
2235 fpush();
2236 ST0 = cos(fptemp);
2237 env->fpus &= (~0x400); /* C2 <-- 0 */
2238 /* the above code is for |arg| < 2**63 only */
2239 }
2240}
2241
2242void helper_frndint(void)
2243{
2244 CPU86_LDouble a;
2245
2246 a = ST0;
2247#ifdef __arm__
2248 switch(env->fpuc & RC_MASK) {
2249 default:
2250 case RC_NEAR:
2251 asm("rndd %0, %1" : "=f" (a) : "f"(a));
2252 break;
2253 case RC_DOWN:
2254 asm("rnddm %0, %1" : "=f" (a) : "f"(a));
2255 break;
2256 case RC_UP:
2257 asm("rnddp %0, %1" : "=f" (a) : "f"(a));
2258 break;
2259 case RC_CHOP:
2260 asm("rnddz %0, %1" : "=f" (a) : "f"(a));
2261 break;
2262 }
2263#else
2264 a = rint(a);
2265#endif
2266 ST0 = a;
2267}
2268
2269void helper_fscale(void)
2270{
2271 CPU86_LDouble fpsrcop, fptemp;
2272
2273 fpsrcop = 2.0;
2274 fptemp = pow(fpsrcop,ST1);
2275 ST0 *= fptemp;
2276}
2277
2278void helper_fsin(void)
2279{
2280 CPU86_LDouble fptemp;
2281
2282 fptemp = ST0;
2283 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2284 env->fpus |= 0x400;
2285 } else {
2286 ST0 = sin(fptemp);
2287 env->fpus &= (~0x400); /* C2 <-- 0 */
2288 /* the above code is for |arg| < 2**53 only */
2289 }
2290}
2291
2292void helper_fcos(void)
2293{
2294 CPU86_LDouble fptemp;
2295
2296 fptemp = ST0;
2297 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2298 env->fpus |= 0x400;
2299 } else {
2300 ST0 = cos(fptemp);
2301 env->fpus &= (~0x400); /* C2 <-- 0 */
2302 /* the above code is for |arg5 < 2**63 only */
2303 }
2304}
2305
2306void helper_fxam_ST0(void)
2307{
2308 CPU86_LDoubleU temp;
2309 int expdif;
2310
2311 temp.d = ST0;
2312
2313 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2314 if (SIGND(temp))
2315 env->fpus |= 0x200; /* C1 <-- 1 */
2316
2317 expdif = EXPD(temp);
2318 if (expdif == MAXEXPD) {
2319 if (MANTD(temp) == 0)
2320 env->fpus |= 0x500 /*Infinity*/;
2321 else
2322 env->fpus |= 0x100 /*NaN*/;
2323 } else if (expdif == 0) {
2324 if (MANTD(temp) == 0)
2325 env->fpus |= 0x4000 /*Zero*/;
2326 else
2327 env->fpus |= 0x4400 /*Denormal*/;
2328 } else {
2329 env->fpus |= 0x400;
2330 }
2331}
2332
2333void helper_fstenv(uint8_t *ptr, int data32)
2334{
2335 int fpus, fptag, exp, i;
2336 uint64_t mant;
2337 CPU86_LDoubleU tmp;
2338
2339 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2340 fptag = 0;
2341 for (i=7; i>=0; i--) {
2342 fptag <<= 2;
2343 if (env->fptags[i]) {
2344 fptag |= 3;
2345 } else {
2346 tmp.d = env->fpregs[i];
2347 exp = EXPD(tmp);
2348 mant = MANTD(tmp);
2349 if (exp == 0 && mant == 0) {
2350 /* zero */
2351 fptag |= 1;
2352 } else if (exp == 0 || exp == MAXEXPD
2353#ifdef USE_X86LDOUBLE
2354 || (mant & (1LL << 63)) == 0
2355#endif
2356 ) {
2357 /* NaNs, infinity, denormal */
2358 fptag |= 2;
2359 }
2360 }
2361 }
2362 if (data32) {
2363 /* 32 bit */
2364 stl(ptr, env->fpuc);
2365 stl(ptr + 4, fpus);
2366 stl(ptr + 8, fptag);
2edcdce3
FB
2367 stl(ptr + 12, 0); /* fpip */
2368 stl(ptr + 16, 0); /* fpcs */
2369 stl(ptr + 20, 0); /* fpoo */
2370 stl(ptr + 24, 0); /* fpos */
2c0262af
FB
2371 } else {
2372 /* 16 bit */
2373 stw(ptr, env->fpuc);
2374 stw(ptr + 2, fpus);
2375 stw(ptr + 4, fptag);
2376 stw(ptr + 6, 0);
2377 stw(ptr + 8, 0);
2378 stw(ptr + 10, 0);
2379 stw(ptr + 12, 0);
2380 }
2381}
2382
2383void helper_fldenv(uint8_t *ptr, int data32)
2384{
2385 int i, fpus, fptag;
2386
2387 if (data32) {
2388 env->fpuc = lduw(ptr);
2389 fpus = lduw(ptr + 4);
2390 fptag = lduw(ptr + 8);
2391 }
2392 else {
2393 env->fpuc = lduw(ptr);
2394 fpus = lduw(ptr + 2);
2395 fptag = lduw(ptr + 4);
2396 }
2397 env->fpstt = (fpus >> 11) & 7;
2398 env->fpus = fpus & ~0x3800;
2edcdce3 2399 for(i = 0;i < 8; i++) {
2c0262af
FB
2400 env->fptags[i] = ((fptag & 3) == 3);
2401 fptag >>= 2;
2402 }
2403}
2404
2405void helper_fsave(uint8_t *ptr, int data32)
2406{
2407 CPU86_LDouble tmp;
2408 int i;
2409
2410 helper_fstenv(ptr, data32);
2411
2412 ptr += (14 << data32);
2413 for(i = 0;i < 8; i++) {
2414 tmp = ST(i);
2c0262af 2415 helper_fstt(tmp, ptr);
2c0262af
FB
2416 ptr += 10;
2417 }
2418
2419 /* fninit */
2420 env->fpus = 0;
2421 env->fpstt = 0;
2422 env->fpuc = 0x37f;
2423 env->fptags[0] = 1;
2424 env->fptags[1] = 1;
2425 env->fptags[2] = 1;
2426 env->fptags[3] = 1;
2427 env->fptags[4] = 1;
2428 env->fptags[5] = 1;
2429 env->fptags[6] = 1;
2430 env->fptags[7] = 1;
2431}
2432
2433void helper_frstor(uint8_t *ptr, int data32)
2434{
2435 CPU86_LDouble tmp;
2436 int i;
2437
2438 helper_fldenv(ptr, data32);
2439 ptr += (14 << data32);
2440
2441 for(i = 0;i < 8; i++) {
2c0262af 2442 tmp = helper_fldt(ptr);
2c0262af
FB
2443 ST(i) = tmp;
2444 ptr += 10;
2445 }
2446}
2447
61382a50
FB
2448#if !defined(CONFIG_USER_ONLY)
2449
2450#define MMUSUFFIX _mmu
2451#define GETPC() (__builtin_return_address(0))
2452
2c0262af
FB
2453#define SHIFT 0
2454#include "softmmu_template.h"
2455
2456#define SHIFT 1
2457#include "softmmu_template.h"
2458
2459#define SHIFT 2
2460#include "softmmu_template.h"
2461
2462#define SHIFT 3
2463#include "softmmu_template.h"
2464
61382a50
FB
2465#endif
2466
2467/* try to fill the TLB and return an exception if error. If retaddr is
2468 NULL, it means that the function was called in C code (i.e. not
2469 from generated code or from helper.c) */
2470/* XXX: fix it to restore all registers */
2471void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr)
2c0262af
FB
2472{
2473 TranslationBlock *tb;
2474 int ret;
2475 unsigned long pc;
61382a50
FB
2476 CPUX86State *saved_env;
2477
2478 /* XXX: hack to restore env in all cases, even if not called from
2479 generated code */
2480 saved_env = env;
2481 env = cpu_single_env;
61382a50
FB
2482
2483 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2c0262af 2484 if (ret) {
61382a50
FB
2485 if (retaddr) {
2486 /* now we have a real cpu fault */
2487 pc = (unsigned long)retaddr;
2488 tb = tb_find_pc(pc);
2489 if (tb) {
2490 /* the PC is inside the translated code. It means that we have
2491 a virtual CPU fault */
58fe2f10 2492 cpu_restore_state(tb, env, pc, NULL);
61382a50 2493 }
2c0262af
FB
2494 }
2495 raise_exception_err(EXCP0E_PAGE, env->error_code);
2496 }
61382a50 2497 env = saved_env;
2c0262af 2498}