]> git.proxmox.com Git - qemu.git/blob - op-i386.c
Alpha update (Falk Hueffner)
[qemu.git] / op-i386.c
1 /*
2 * i386 micro operations
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "exec-i386.h"
21
22 /* NOTE: data are not static to force relocation generation by GCC */
23
24 uint8_t parity_table[256] = {
25 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
26 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
30 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
31 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
32 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
33 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
34 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 };
58
59 /* modulo 17 table */
60 const uint8_t rclw_table[32] = {
61 0, 1, 2, 3, 4, 5, 6, 7,
62 8, 9,10,11,12,13,14,15,
63 16, 0, 1, 2, 3, 4, 5, 6,
64 7, 8, 9,10,11,12,13,14,
65 };
66
67 /* modulo 9 table */
68 const uint8_t rclb_table[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 0, 1, 2, 3, 4, 5, 6,
71 7, 8, 0, 1, 2, 3, 4, 5,
72 6, 7, 8, 0, 1, 2, 3, 4,
73 };
74
75 #ifdef USE_X86LDOUBLE
76 /* an array of Intel 80-bit FP constants, to be loaded via integer ops */
77 typedef unsigned short f15ld[5];
78 const f15ld f15rk[] =
79 {
80 /*0*/ {0x0000,0x0000,0x0000,0x0000,0x0000},
81 /*1*/ {0x0000,0x0000,0x0000,0x8000,0x3fff},
82 /*pi*/ {0xc235,0x2168,0xdaa2,0xc90f,0x4000},
83 /*lg2*/ {0xf799,0xfbcf,0x9a84,0x9a20,0x3ffd},
84 /*ln2*/ {0x79ac,0xd1cf,0x17f7,0xb172,0x3ffe},
85 /*l2e*/ {0xf0bc,0x5c17,0x3b29,0xb8aa,0x3fff},
86 /*l2t*/ {0x8afe,0xcd1b,0x784b,0xd49a,0x4000}
87 };
88 #else
89 /* the same, 64-bit version */
90 typedef unsigned short f15ld[4];
91 const f15ld f15rk[] =
92 {
93 #ifndef WORDS_BIGENDIAN
94 /*0*/ {0x0000,0x0000,0x0000,0x0000},
95 /*1*/ {0x0000,0x0000,0x0000,0x3ff0},
96 /*pi*/ {0x2d18,0x5444,0x21fb,0x4009},
97 /*lg2*/ {0x79ff,0x509f,0x4413,0x3fd3},
98 /*ln2*/ {0x39ef,0xfefa,0x2e42,0x3fe6},
99 /*l2e*/ {0x82fe,0x652b,0x1547,0x3ff7},
100 /*l2t*/ {0xa371,0x0979,0x934f,0x400a}
101 #else
102 /*0*/ {0x0000,0x0000,0x0000,0x0000},
103 /*1*/ {0x3ff0,0x0000,0x0000,0x0000},
104 /*pi*/ {0x4009,0x21fb,0x5444,0x2d18},
105 /*lg2*/ {0x3fd3,0x4413,0x509f,0x79ff},
106 /*ln2*/ {0x3fe6,0x2e42,0xfefa,0x39ef},
107 /*l2e*/ {0x3ff7,0x1547,0x652b,0x82fe},
108 /*l2t*/ {0x400a,0x934f,0x0979,0xa371}
109 #endif
110 };
111 #endif
112
113 /* n must be a constant to be efficient */
114 static inline int lshift(int x, int n)
115 {
116 if (n >= 0)
117 return x << n;
118 else
119 return x >> (-n);
120 }
121
122 /* we define the various pieces of code used by the JIT */
123
124 #define REG EAX
125 #define REGNAME _EAX
126 #include "opreg_template.h"
127 #undef REG
128 #undef REGNAME
129
130 #define REG ECX
131 #define REGNAME _ECX
132 #include "opreg_template.h"
133 #undef REG
134 #undef REGNAME
135
136 #define REG EDX
137 #define REGNAME _EDX
138 #include "opreg_template.h"
139 #undef REG
140 #undef REGNAME
141
142 #define REG EBX
143 #define REGNAME _EBX
144 #include "opreg_template.h"
145 #undef REG
146 #undef REGNAME
147
148 #define REG ESP
149 #define REGNAME _ESP
150 #include "opreg_template.h"
151 #undef REG
152 #undef REGNAME
153
154 #define REG EBP
155 #define REGNAME _EBP
156 #include "opreg_template.h"
157 #undef REG
158 #undef REGNAME
159
160 #define REG ESI
161 #define REGNAME _ESI
162 #include "opreg_template.h"
163 #undef REG
164 #undef REGNAME
165
166 #define REG EDI
167 #define REGNAME _EDI
168 #include "opreg_template.h"
169 #undef REG
170 #undef REGNAME
171
172 /* operations with flags */
173
174 void OPPROTO op_addl_T0_T1_cc(void)
175 {
176 CC_SRC = T0;
177 T0 += T1;
178 CC_DST = T0;
179 }
180
181 void OPPROTO op_orl_T0_T1_cc(void)
182 {
183 T0 |= T1;
184 CC_DST = T0;
185 }
186
187 void OPPROTO op_andl_T0_T1_cc(void)
188 {
189 T0 &= T1;
190 CC_DST = T0;
191 }
192
193 void OPPROTO op_subl_T0_T1_cc(void)
194 {
195 CC_SRC = T0;
196 T0 -= T1;
197 CC_DST = T0;
198 }
199
200 void OPPROTO op_xorl_T0_T1_cc(void)
201 {
202 T0 ^= T1;
203 CC_DST = T0;
204 }
205
206 void OPPROTO op_cmpl_T0_T1_cc(void)
207 {
208 CC_SRC = T0;
209 CC_DST = T0 - T1;
210 }
211
212 void OPPROTO op_negl_T0_cc(void)
213 {
214 CC_SRC = 0;
215 T0 = -T0;
216 CC_DST = T0;
217 }
218
219 void OPPROTO op_incl_T0_cc(void)
220 {
221 CC_SRC = cc_table[CC_OP].compute_c();
222 T0++;
223 CC_DST = T0;
224 }
225
226 void OPPROTO op_decl_T0_cc(void)
227 {
228 CC_SRC = cc_table[CC_OP].compute_c();
229 T0--;
230 CC_DST = T0;
231 }
232
233 void OPPROTO op_testl_T0_T1_cc(void)
234 {
235 CC_DST = T0 & T1;
236 }
237
238 /* operations without flags */
239
240 void OPPROTO op_addl_T0_T1(void)
241 {
242 T0 += T1;
243 }
244
245 void OPPROTO op_orl_T0_T1(void)
246 {
247 T0 |= T1;
248 }
249
250 void OPPROTO op_andl_T0_T1(void)
251 {
252 T0 &= T1;
253 }
254
255 void OPPROTO op_subl_T0_T1(void)
256 {
257 T0 -= T1;
258 }
259
260 void OPPROTO op_xorl_T0_T1(void)
261 {
262 T0 ^= T1;
263 }
264
265 void OPPROTO op_negl_T0(void)
266 {
267 T0 = -T0;
268 }
269
270 void OPPROTO op_incl_T0(void)
271 {
272 T0++;
273 }
274
275 void OPPROTO op_decl_T0(void)
276 {
277 T0--;
278 }
279
280 void OPPROTO op_notl_T0(void)
281 {
282 T0 = ~T0;
283 }
284
285 void OPPROTO op_bswapl_T0(void)
286 {
287 T0 = bswap32(T0);
288 }
289
290 /* multiply/divide */
291 void OPPROTO op_mulb_AL_T0(void)
292 {
293 unsigned int res;
294 res = (uint8_t)EAX * (uint8_t)T0;
295 EAX = (EAX & 0xffff0000) | res;
296 CC_SRC = (res & 0xff00);
297 }
298
299 void OPPROTO op_imulb_AL_T0(void)
300 {
301 int res;
302 res = (int8_t)EAX * (int8_t)T0;
303 EAX = (EAX & 0xffff0000) | (res & 0xffff);
304 CC_SRC = (res != (int8_t)res);
305 }
306
307 void OPPROTO op_mulw_AX_T0(void)
308 {
309 unsigned int res;
310 res = (uint16_t)EAX * (uint16_t)T0;
311 EAX = (EAX & 0xffff0000) | (res & 0xffff);
312 EDX = (EDX & 0xffff0000) | ((res >> 16) & 0xffff);
313 CC_SRC = res >> 16;
314 }
315
316 void OPPROTO op_imulw_AX_T0(void)
317 {
318 int res;
319 res = (int16_t)EAX * (int16_t)T0;
320 EAX = (EAX & 0xffff0000) | (res & 0xffff);
321 EDX = (EDX & 0xffff0000) | ((res >> 16) & 0xffff);
322 CC_SRC = (res != (int16_t)res);
323 }
324
325 void OPPROTO op_mull_EAX_T0(void)
326 {
327 uint64_t res;
328 res = (uint64_t)((uint32_t)EAX) * (uint64_t)((uint32_t)T0);
329 EAX = res;
330 EDX = res >> 32;
331 CC_SRC = res >> 32;
332 }
333
334 void OPPROTO op_imull_EAX_T0(void)
335 {
336 int64_t res;
337 res = (int64_t)((int32_t)EAX) * (int64_t)((int32_t)T0);
338 EAX = res;
339 EDX = res >> 32;
340 CC_SRC = (res != (int32_t)res);
341 }
342
343 void OPPROTO op_imulw_T0_T1(void)
344 {
345 int res;
346 res = (int16_t)T0 * (int16_t)T1;
347 T0 = res;
348 CC_SRC = (res != (int16_t)res);
349 }
350
351 void OPPROTO op_imull_T0_T1(void)
352 {
353 int64_t res;
354 res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1);
355 T0 = res;
356 CC_SRC = (res != (int32_t)res);
357 }
358
359 /* division, flags are undefined */
360 /* XXX: add exceptions for overflow */
361 void OPPROTO op_divb_AL_T0(void)
362 {
363 unsigned int num, den, q, r;
364
365 num = (EAX & 0xffff);
366 den = (T0 & 0xff);
367 if (den == 0) {
368 EIP = PARAM1;
369 raise_exception(EXCP00_DIVZ);
370 }
371 q = (num / den) & 0xff;
372 r = (num % den) & 0xff;
373 EAX = (EAX & 0xffff0000) | (r << 8) | q;
374 }
375
376 void OPPROTO op_idivb_AL_T0(void)
377 {
378 int num, den, q, r;
379
380 num = (int16_t)EAX;
381 den = (int8_t)T0;
382 if (den == 0) {
383 EIP = PARAM1;
384 raise_exception(EXCP00_DIVZ);
385 }
386 q = (num / den) & 0xff;
387 r = (num % den) & 0xff;
388 EAX = (EAX & 0xffff0000) | (r << 8) | q;
389 }
390
391 void OPPROTO op_divw_AX_T0(void)
392 {
393 unsigned int num, den, q, r;
394
395 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
396 den = (T0 & 0xffff);
397 if (den == 0) {
398 EIP = PARAM1;
399 raise_exception(EXCP00_DIVZ);
400 }
401 q = (num / den) & 0xffff;
402 r = (num % den) & 0xffff;
403 EAX = (EAX & 0xffff0000) | q;
404 EDX = (EDX & 0xffff0000) | r;
405 }
406
407 void OPPROTO op_idivw_AX_T0(void)
408 {
409 int num, den, q, r;
410
411 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
412 den = (int16_t)T0;
413 if (den == 0) {
414 EIP = PARAM1;
415 raise_exception(EXCP00_DIVZ);
416 }
417 q = (num / den) & 0xffff;
418 r = (num % den) & 0xffff;
419 EAX = (EAX & 0xffff0000) | q;
420 EDX = (EDX & 0xffff0000) | r;
421 }
422
423 #ifdef BUGGY_GCC_DIV64
424 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
425 call it from another function */
426 uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
427 {
428 *q_ptr = num / den;
429 return num % den;
430 }
431
432 int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
433 {
434 *q_ptr = num / den;
435 return num % den;
436 }
437 #endif
438
439 void OPPROTO op_divl_EAX_T0(void)
440 {
441 unsigned int den, q, r;
442 uint64_t num;
443
444 num = EAX | ((uint64_t)EDX << 32);
445 den = T0;
446 if (den == 0) {
447 EIP = PARAM1;
448 raise_exception(EXCP00_DIVZ);
449 }
450 #ifdef BUGGY_GCC_DIV64
451 r = div64(&q, num, den);
452 #else
453 q = (num / den);
454 r = (num % den);
455 #endif
456 EAX = q;
457 EDX = r;
458 }
459
460 void OPPROTO op_idivl_EAX_T0(void)
461 {
462 int den, q, r;
463 int64_t num;
464
465 num = EAX | ((uint64_t)EDX << 32);
466 den = T0;
467 if (den == 0) {
468 EIP = PARAM1;
469 raise_exception(EXCP00_DIVZ);
470 }
471 #ifdef BUGGY_GCC_DIV64
472 r = idiv64(&q, num, den);
473 #else
474 q = (num / den);
475 r = (num % den);
476 #endif
477 EAX = q;
478 EDX = r;
479 }
480
481 /* constant load & misc op */
482
483 void OPPROTO op_movl_T0_im(void)
484 {
485 T0 = PARAM1;
486 }
487
488 void OPPROTO op_addl_T0_im(void)
489 {
490 T0 += PARAM1;
491 }
492
493 void OPPROTO op_andl_T0_ffff(void)
494 {
495 T0 = T0 & 0xffff;
496 }
497
498 void OPPROTO op_movl_T0_T1(void)
499 {
500 T0 = T1;
501 }
502
503 void OPPROTO op_movl_T1_im(void)
504 {
505 T1 = PARAM1;
506 }
507
508 void OPPROTO op_addl_T1_im(void)
509 {
510 T1 += PARAM1;
511 }
512
513 void OPPROTO op_movl_T1_A0(void)
514 {
515 T1 = A0;
516 }
517
518 void OPPROTO op_movl_A0_im(void)
519 {
520 A0 = PARAM1;
521 }
522
523 void OPPROTO op_addl_A0_im(void)
524 {
525 A0 += PARAM1;
526 }
527
528 void OPPROTO op_addl_A0_AL(void)
529 {
530 A0 += (EAX & 0xff);
531 }
532
533 void OPPROTO op_andl_A0_ffff(void)
534 {
535 A0 = A0 & 0xffff;
536 }
537
538 /* memory access */
539
540 void OPPROTO op_ldub_T0_A0(void)
541 {
542 T0 = ldub((uint8_t *)A0);
543 }
544
545 void OPPROTO op_ldsb_T0_A0(void)
546 {
547 T0 = ldsb((int8_t *)A0);
548 }
549
550 void OPPROTO op_lduw_T0_A0(void)
551 {
552 T0 = lduw((uint8_t *)A0);
553 }
554
555 void OPPROTO op_ldsw_T0_A0(void)
556 {
557 T0 = ldsw((int8_t *)A0);
558 }
559
560 void OPPROTO op_ldl_T0_A0(void)
561 {
562 T0 = ldl((uint8_t *)A0);
563 }
564
565 void OPPROTO op_ldub_T1_A0(void)
566 {
567 T1 = ldub((uint8_t *)A0);
568 }
569
570 void OPPROTO op_ldsb_T1_A0(void)
571 {
572 T1 = ldsb((int8_t *)A0);
573 }
574
575 void OPPROTO op_lduw_T1_A0(void)
576 {
577 T1 = lduw((uint8_t *)A0);
578 }
579
580 void OPPROTO op_ldsw_T1_A0(void)
581 {
582 T1 = ldsw((int8_t *)A0);
583 }
584
585 void OPPROTO op_ldl_T1_A0(void)
586 {
587 T1 = ldl((uint8_t *)A0);
588 }
589
590 void OPPROTO op_stb_T0_A0(void)
591 {
592 stb((uint8_t *)A0, T0);
593 }
594
595 void OPPROTO op_stw_T0_A0(void)
596 {
597 stw((uint8_t *)A0, T0);
598 }
599
600 void OPPROTO op_stl_T0_A0(void)
601 {
602 stl((uint8_t *)A0, T0);
603 }
604
605 /* used for bit operations */
606
607 void OPPROTO op_add_bitw_A0_T1(void)
608 {
609 A0 += ((int32_t)T1 >> 4) << 1;
610 }
611
612 void OPPROTO op_add_bitl_A0_T1(void)
613 {
614 A0 += ((int32_t)T1 >> 5) << 2;
615 }
616
617 /* indirect jump */
618
619 void OPPROTO op_jmp_T0(void)
620 {
621 EIP = T0;
622 }
623
624 void OPPROTO op_jmp_im(void)
625 {
626 EIP = PARAM1;
627 }
628
629 #if 0
630 /* full interrupt support (only useful for real CPU emulation, not
631 finished) - I won't do it any time soon, finish it if you want ! */
632 void raise_interrupt(int intno, int is_int, int error_code,
633 unsigned int next_eip)
634 {
635 SegmentDescriptorTable *dt;
636 uint8_t *ptr;
637 int type, dpl, cpl;
638 uint32_t e1, e2;
639
640 dt = &env->idt;
641 if (intno * 8 + 7 > dt->limit)
642 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
643 ptr = dt->base + intno * 8;
644 e1 = ldl(ptr);
645 e2 = ldl(ptr + 4);
646 /* check gate type */
647 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
648 switch(type) {
649 case 5: /* task gate */
650 case 6: /* 286 interrupt gate */
651 case 7: /* 286 trap gate */
652 case 14: /* 386 interrupt gate */
653 case 15: /* 386 trap gate */
654 break;
655 default:
656 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
657 break;
658 }
659 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
660 cpl = env->segs[R_CS] & 3;
661 /* check privledge if software int */
662 if (is_int && dpl < cpl)
663 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
664 /* check valid bit */
665 if (!(e2 & DESC_P_MASK))
666 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
667 }
668
669 #else
670
671 /*
672 * is_int is TRUE if coming from the int instruction. next_eip is the
673 * EIP value AFTER the interrupt instruction. It is only relevant if
674 * is_int is TRUE.
675 */
676 void raise_interrupt(int intno, int is_int, int error_code,
677 unsigned int next_eip)
678 {
679 SegmentDescriptorTable *dt;
680 uint8_t *ptr;
681 int dpl, cpl;
682 uint32_t e2;
683
684 dt = &env->idt;
685 ptr = dt->base + (intno * 8);
686 e2 = ldl(ptr + 4);
687
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689 cpl = 3;
690 /* check privledge if software int */
691 if (is_int && dpl < cpl)
692 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
693
694 /* Since we emulate only user space, we cannot do more than
695 exiting the emulation with the suitable exception and error
696 code */
697 if (is_int)
698 EIP = next_eip;
699 env->exception_index = intno;
700 env->error_code = error_code;
701
702 cpu_loop_exit();
703 }
704
705 #endif
706
707 /* shortcuts to generate exceptions */
708 void raise_exception_err(int exception_index, int error_code)
709 {
710 raise_interrupt(exception_index, 0, error_code, 0);
711 }
712
713 void raise_exception(int exception_index)
714 {
715 raise_interrupt(exception_index, 0, 0, 0);
716 }
717
718 void OPPROTO op_raise_interrupt(void)
719 {
720 int intno;
721 unsigned int next_eip;
722 intno = PARAM1;
723 next_eip = PARAM2;
724 raise_interrupt(intno, 1, 0, next_eip);
725 }
726
727 void OPPROTO op_raise_exception(void)
728 {
729 int exception_index;
730 exception_index = PARAM1;
731 raise_exception(exception_index);
732 }
733
734 void OPPROTO op_into(void)
735 {
736 int eflags;
737 eflags = cc_table[CC_OP].compute_all();
738 if (eflags & CC_O) {
739 raise_interrupt(EXCP04_INTO, 1, 0, PARAM1);
740 }
741 FORCE_RET();
742 }
743
744 void OPPROTO op_cli(void)
745 {
746 env->eflags &= ~IF_MASK;
747 }
748
749 void OPPROTO op_sti(void)
750 {
751 env->eflags |= IF_MASK;
752 }
753
754 #if 0
755 /* vm86plus instructions */
756 void OPPROTO op_cli_vm(void)
757 {
758 env->eflags &= ~VIF_MASK;
759 }
760
761 void OPPROTO op_sti_vm(void)
762 {
763 env->eflags |= VIF_MASK;
764 if (env->eflags & VIP_MASK) {
765 EIP = PARAM1;
766 raise_exception(EXCP0D_GPF);
767 }
768 FORCE_RET();
769 }
770 #endif
771
772 void OPPROTO op_boundw(void)
773 {
774 int low, high, v;
775 low = ldsw((uint8_t *)A0);
776 high = ldsw((uint8_t *)A0 + 2);
777 v = (int16_t)T0;
778 if (v < low || v > high) {
779 EIP = PARAM1;
780 raise_exception(EXCP05_BOUND);
781 }
782 FORCE_RET();
783 }
784
785 void OPPROTO op_boundl(void)
786 {
787 int low, high, v;
788 low = ldl((uint8_t *)A0);
789 high = ldl((uint8_t *)A0 + 4);
790 v = T0;
791 if (v < low || v > high) {
792 EIP = PARAM1;
793 raise_exception(EXCP05_BOUND);
794 }
795 FORCE_RET();
796 }
797
798 void OPPROTO op_cmpxchg8b(void)
799 {
800 uint64_t d;
801 int eflags;
802
803 eflags = cc_table[CC_OP].compute_all();
804 d = ldq((uint8_t *)A0);
805 if (d == (((uint64_t)EDX << 32) | EAX)) {
806 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
807 eflags |= CC_Z;
808 } else {
809 EDX = d >> 32;
810 EAX = d;
811 eflags &= ~CC_Z;
812 }
813 CC_SRC = eflags;
814 FORCE_RET();
815 }
816
817 #if defined(__powerpc__)
818
819 /* on PowerPC we patch the jump instruction directly */
820 #define JUMP_TB(tbparam, n, eip)\
821 do {\
822 static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
823 asm volatile ("b %0" : : "i" (&__op_jmp ## n));\
824 label ## n:\
825 T0 = (long)(tbparam) + (n);\
826 EIP = eip;\
827 } while (0)
828
829 #else
830
831 /* jump to next block operations (more portable code, does not need
832 cache flushing, but slower because of indirect jump) */
833 #define JUMP_TB(tbparam, n, eip)\
834 do {\
835 static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
836 goto *((TranslationBlock *)tbparam)->tb_next[n];\
837 label ## n:\
838 T0 = (long)(tbparam) + (n);\
839 EIP = eip;\
840 } while (0)
841
842 #endif
843
844 void OPPROTO op_jmp_tb_next(void)
845 {
846 JUMP_TB(PARAM1, 0, PARAM2);
847 }
848
849 void OPPROTO op_movl_T0_0(void)
850 {
851 T0 = 0;
852 }
853
854 /* multiple size ops */
855
856 #define ldul ldl
857
858 #define SHIFT 0
859 #include "ops_template.h"
860 #undef SHIFT
861
862 #define SHIFT 1
863 #include "ops_template.h"
864 #undef SHIFT
865
866 #define SHIFT 2
867 #include "ops_template.h"
868 #undef SHIFT
869
870 /* sign extend */
871
872 void OPPROTO op_movsbl_T0_T0(void)
873 {
874 T0 = (int8_t)T0;
875 }
876
877 void OPPROTO op_movzbl_T0_T0(void)
878 {
879 T0 = (uint8_t)T0;
880 }
881
882 void OPPROTO op_movswl_T0_T0(void)
883 {
884 T0 = (int16_t)T0;
885 }
886
887 void OPPROTO op_movzwl_T0_T0(void)
888 {
889 T0 = (uint16_t)T0;
890 }
891
892 void OPPROTO op_movswl_EAX_AX(void)
893 {
894 EAX = (int16_t)EAX;
895 }
896
897 void OPPROTO op_movsbw_AX_AL(void)
898 {
899 EAX = (EAX & 0xffff0000) | ((int8_t)EAX & 0xffff);
900 }
901
902 void OPPROTO op_movslq_EDX_EAX(void)
903 {
904 EDX = (int32_t)EAX >> 31;
905 }
906
907 void OPPROTO op_movswl_DX_AX(void)
908 {
909 EDX = (EDX & 0xffff0000) | (((int16_t)EAX >> 15) & 0xffff);
910 }
911
912 /* push/pop */
913
914 void op_pushl_T0(void)
915 {
916 uint32_t offset;
917 offset = ESP - 4;
918 stl((void *)offset, T0);
919 /* modify ESP after to handle exceptions correctly */
920 ESP = offset;
921 }
922
923 void op_pushw_T0(void)
924 {
925 uint32_t offset;
926 offset = ESP - 2;
927 stw((void *)offset, T0);
928 /* modify ESP after to handle exceptions correctly */
929 ESP = offset;
930 }
931
932 void op_pushl_ss32_T0(void)
933 {
934 uint32_t offset;
935 offset = ESP - 4;
936 stl(env->seg_cache[R_SS].base + offset, T0);
937 /* modify ESP after to handle exceptions correctly */
938 ESP = offset;
939 }
940
941 void op_pushw_ss32_T0(void)
942 {
943 uint32_t offset;
944 offset = ESP - 2;
945 stw(env->seg_cache[R_SS].base + offset, T0);
946 /* modify ESP after to handle exceptions correctly */
947 ESP = offset;
948 }
949
950 void op_pushl_ss16_T0(void)
951 {
952 uint32_t offset;
953 offset = (ESP - 4) & 0xffff;
954 stl(env->seg_cache[R_SS].base + offset, T0);
955 /* modify ESP after to handle exceptions correctly */
956 ESP = (ESP & ~0xffff) | offset;
957 }
958
959 void op_pushw_ss16_T0(void)
960 {
961 uint32_t offset;
962 offset = (ESP - 2) & 0xffff;
963 stw(env->seg_cache[R_SS].base + offset, T0);
964 /* modify ESP after to handle exceptions correctly */
965 ESP = (ESP & ~0xffff) | offset;
966 }
967
968 /* NOTE: ESP update is done after */
969 void op_popl_T0(void)
970 {
971 T0 = ldl((void *)ESP);
972 }
973
974 void op_popw_T0(void)
975 {
976 T0 = lduw((void *)ESP);
977 }
978
979 void op_popl_ss32_T0(void)
980 {
981 T0 = ldl(env->seg_cache[R_SS].base + ESP);
982 }
983
984 void op_popw_ss32_T0(void)
985 {
986 T0 = lduw(env->seg_cache[R_SS].base + ESP);
987 }
988
989 void op_popl_ss16_T0(void)
990 {
991 T0 = ldl(env->seg_cache[R_SS].base + (ESP & 0xffff));
992 }
993
994 void op_popw_ss16_T0(void)
995 {
996 T0 = lduw(env->seg_cache[R_SS].base + (ESP & 0xffff));
997 }
998
999 void op_addl_ESP_4(void)
1000 {
1001 ESP += 4;
1002 }
1003
1004 void op_addl_ESP_2(void)
1005 {
1006 ESP += 2;
1007 }
1008
1009 void op_addw_ESP_4(void)
1010 {
1011 ESP = (ESP & ~0xffff) | ((ESP + 4) & 0xffff);
1012 }
1013
1014 void op_addw_ESP_2(void)
1015 {
1016 ESP = (ESP & ~0xffff) | ((ESP + 2) & 0xffff);
1017 }
1018
1019 void op_addl_ESP_im(void)
1020 {
1021 ESP += PARAM1;
1022 }
1023
1024 void op_addw_ESP_im(void)
1025 {
1026 ESP = (ESP & ~0xffff) | ((ESP + PARAM1) & 0xffff);
1027 }
1028
1029 /* rdtsc */
1030 #ifndef __i386__
1031 uint64_t emu_time;
1032 #endif
1033
1034 void OPPROTO op_rdtsc(void)
1035 {
1036 uint64_t val;
1037 #ifdef __i386__
1038 asm("rdtsc" : "=A" (val));
1039 #else
1040 /* better than nothing: the time increases */
1041 val = emu_time++;
1042 #endif
1043 EAX = val;
1044 EDX = val >> 32;
1045 }
1046
1047 /* We simulate a pre-MMX pentium as in valgrind */
1048 #define CPUID_FP87 (1 << 0)
1049 #define CPUID_VME (1 << 1)
1050 #define CPUID_DE (1 << 2)
1051 #define CPUID_PSE (1 << 3)
1052 #define CPUID_TSC (1 << 4)
1053 #define CPUID_MSR (1 << 5)
1054 #define CPUID_PAE (1 << 6)
1055 #define CPUID_MCE (1 << 7)
1056 #define CPUID_CX8 (1 << 8)
1057 #define CPUID_APIC (1 << 9)
1058 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
1059 #define CPUID_MTRR (1 << 12)
1060 #define CPUID_PGE (1 << 13)
1061 #define CPUID_MCA (1 << 14)
1062 #define CPUID_CMOV (1 << 15)
1063 /* ... */
1064 #define CPUID_MMX (1 << 23)
1065 #define CPUID_FXSR (1 << 24)
1066 #define CPUID_SSE (1 << 25)
1067 #define CPUID_SSE2 (1 << 26)
1068
1069 void helper_cpuid(void)
1070 {
1071 if (EAX == 0) {
1072 EAX = 1; /* max EAX index supported */
1073 EBX = 0x756e6547;
1074 ECX = 0x6c65746e;
1075 EDX = 0x49656e69;
1076 } else if (EAX == 1) {
1077 /* EAX = 1 info */
1078 EAX = 0x52b;
1079 EBX = 0;
1080 ECX = 0;
1081 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1082 CPUID_TSC | CPUID_MSR | CPUID_MCE |
1083 CPUID_CX8;
1084 }
1085 }
1086
1087 void OPPROTO op_cpuid(void)
1088 {
1089 helper_cpuid();
1090 }
1091
1092 /* bcd */
1093
1094 /* XXX: exception */
1095 void OPPROTO op_aam(void)
1096 {
1097 int base = PARAM1;
1098 int al, ah;
1099 al = EAX & 0xff;
1100 ah = al / base;
1101 al = al % base;
1102 EAX = (EAX & ~0xffff) | al | (ah << 8);
1103 CC_DST = al;
1104 }
1105
1106 void OPPROTO op_aad(void)
1107 {
1108 int base = PARAM1;
1109 int al, ah;
1110 al = EAX & 0xff;
1111 ah = (EAX >> 8) & 0xff;
1112 al = ((ah * base) + al) & 0xff;
1113 EAX = (EAX & ~0xffff) | al;
1114 CC_DST = al;
1115 }
1116
1117 void OPPROTO op_aaa(void)
1118 {
1119 int icarry;
1120 int al, ah, af;
1121 int eflags;
1122
1123 eflags = cc_table[CC_OP].compute_all();
1124 af = eflags & CC_A;
1125 al = EAX & 0xff;
1126 ah = (EAX >> 8) & 0xff;
1127
1128 icarry = (al > 0xf9);
1129 if (((al & 0x0f) > 9 ) || af) {
1130 al = (al + 6) & 0x0f;
1131 ah = (ah + 1 + icarry) & 0xff;
1132 eflags |= CC_C | CC_A;
1133 } else {
1134 eflags &= ~(CC_C | CC_A);
1135 al &= 0x0f;
1136 }
1137 EAX = (EAX & ~0xffff) | al | (ah << 8);
1138 CC_SRC = eflags;
1139 }
1140
1141 void OPPROTO op_aas(void)
1142 {
1143 int icarry;
1144 int al, ah, af;
1145 int eflags;
1146
1147 eflags = cc_table[CC_OP].compute_all();
1148 af = eflags & CC_A;
1149 al = EAX & 0xff;
1150 ah = (EAX >> 8) & 0xff;
1151
1152 icarry = (al < 6);
1153 if (((al & 0x0f) > 9 ) || af) {
1154 al = (al - 6) & 0x0f;
1155 ah = (ah - 1 - icarry) & 0xff;
1156 eflags |= CC_C | CC_A;
1157 } else {
1158 eflags &= ~(CC_C | CC_A);
1159 al &= 0x0f;
1160 }
1161 EAX = (EAX & ~0xffff) | al | (ah << 8);
1162 CC_SRC = eflags;
1163 }
1164
1165 void OPPROTO op_daa(void)
1166 {
1167 int al, af, cf;
1168 int eflags;
1169
1170 eflags = cc_table[CC_OP].compute_all();
1171 cf = eflags & CC_C;
1172 af = eflags & CC_A;
1173 al = EAX & 0xff;
1174
1175 eflags = 0;
1176 if (((al & 0x0f) > 9 ) || af) {
1177 al = (al + 6) & 0xff;
1178 eflags |= CC_A;
1179 }
1180 if ((al > 0x9f) || cf) {
1181 al = (al + 0x60) & 0xff;
1182 eflags |= CC_C;
1183 }
1184 EAX = (EAX & ~0xff) | al;
1185 /* well, speed is not an issue here, so we compute the flags by hand */
1186 eflags |= (al == 0) << 6; /* zf */
1187 eflags |= parity_table[al]; /* pf */
1188 eflags |= (al & 0x80); /* sf */
1189 CC_SRC = eflags;
1190 }
1191
1192 void OPPROTO op_das(void)
1193 {
1194 int al, al1, af, cf;
1195 int eflags;
1196
1197 eflags = cc_table[CC_OP].compute_all();
1198 cf = eflags & CC_C;
1199 af = eflags & CC_A;
1200 al = EAX & 0xff;
1201
1202 eflags = 0;
1203 al1 = al;
1204 if (((al & 0x0f) > 9 ) || af) {
1205 eflags |= CC_A;
1206 if (al < 6 || cf)
1207 eflags |= CC_C;
1208 al = (al - 6) & 0xff;
1209 }
1210 if ((al1 > 0x99) || cf) {
1211 al = (al - 0x60) & 0xff;
1212 eflags |= CC_C;
1213 }
1214 EAX = (EAX & ~0xff) | al;
1215 /* well, speed is not an issue here, so we compute the flags by hand */
1216 eflags |= (al == 0) << 6; /* zf */
1217 eflags |= parity_table[al]; /* pf */
1218 eflags |= (al & 0x80); /* sf */
1219 CC_SRC = eflags;
1220 }
1221
1222 /* segment handling */
1223
1224 /* only works if protected mode and not VM86 */
1225 void load_seg(int seg_reg, int selector, unsigned cur_eip)
1226 {
1227 SegmentCache *sc;
1228 SegmentDescriptorTable *dt;
1229 int index;
1230 uint32_t e1, e2;
1231 uint8_t *ptr;
1232
1233 sc = &env->seg_cache[seg_reg];
1234 if ((selector & 0xfffc) == 0) {
1235 /* null selector case */
1236 if (seg_reg == R_SS) {
1237 EIP = cur_eip;
1238 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1239 } else {
1240 /* XXX: each access should trigger an exception */
1241 sc->base = NULL;
1242 sc->limit = 0;
1243 sc->seg_32bit = 1;
1244 }
1245 } else {
1246 if (selector & 0x4)
1247 dt = &env->ldt;
1248 else
1249 dt = &env->gdt;
1250 index = selector & ~7;
1251 if ((index + 7) > dt->limit) {
1252 EIP = cur_eip;
1253 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1254 }
1255 ptr = dt->base + index;
1256 e1 = ldl(ptr);
1257 e2 = ldl(ptr + 4);
1258 if (!(e2 & DESC_S_MASK) ||
1259 (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1260 EIP = cur_eip;
1261 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1262 }
1263
1264 if (seg_reg == R_SS) {
1265 if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
1266 EIP = cur_eip;
1267 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1268 }
1269 } else {
1270 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1271 EIP = cur_eip;
1272 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1273 }
1274 }
1275
1276 if (!(e2 & DESC_P_MASK)) {
1277 EIP = cur_eip;
1278 if (seg_reg == R_SS)
1279 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1280 else
1281 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1282 }
1283
1284 sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1285 sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1286 if (e2 & (1 << 23))
1287 sc->limit = (sc->limit << 12) | 0xfff;
1288 sc->seg_32bit = (e2 >> 22) & 1;
1289 #if 0
1290 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx seg_32bit=%d\n",
1291 selector, (unsigned long)sc->base, sc->limit, sc->seg_32bit);
1292 #endif
1293 }
1294 env->segs[seg_reg] = selector;
1295 }
1296
1297 void OPPROTO op_movl_seg_T0(void)
1298 {
1299 load_seg(PARAM1, T0 & 0xffff, PARAM2);
1300 }
1301
1302 /* faster VM86 version */
1303 void OPPROTO op_movl_seg_T0_vm(void)
1304 {
1305 int selector;
1306
1307 selector = T0 & 0xffff;
1308 /* env->segs[] access */
1309 *(uint32_t *)((char *)env + PARAM1) = selector;
1310 /* env->seg_cache[] access */
1311 ((SegmentCache *)((char *)env + PARAM2))->base = (void *)(selector << 4);
1312 }
1313
1314 void OPPROTO op_movl_T0_seg(void)
1315 {
1316 T0 = env->segs[PARAM1];
1317 }
1318
1319 void OPPROTO op_movl_A0_seg(void)
1320 {
1321 A0 = *(unsigned long *)((char *)env + PARAM1);
1322 }
1323
1324 void OPPROTO op_addl_A0_seg(void)
1325 {
1326 A0 += *(unsigned long *)((char *)env + PARAM1);
1327 }
1328
1329 void helper_lsl(void)
1330 {
1331 unsigned int selector, limit;
1332 SegmentDescriptorTable *dt;
1333 int index;
1334 uint32_t e1, e2;
1335 uint8_t *ptr;
1336
1337 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1338 selector = T0 & 0xffff;
1339 if (selector & 0x4)
1340 dt = &env->ldt;
1341 else
1342 dt = &env->gdt;
1343 index = selector & ~7;
1344 if ((index + 7) > dt->limit)
1345 return;
1346 ptr = dt->base + index;
1347 e1 = ldl(ptr);
1348 e2 = ldl(ptr + 4);
1349 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1350 if (e2 & (1 << 23))
1351 limit = (limit << 12) | 0xfff;
1352 T1 = limit;
1353 CC_SRC |= CC_Z;
1354 }
1355
1356 void OPPROTO op_lsl(void)
1357 {
1358 helper_lsl();
1359 }
1360
1361 void helper_lar(void)
1362 {
1363 unsigned int selector;
1364 SegmentDescriptorTable *dt;
1365 int index;
1366 uint32_t e2;
1367 uint8_t *ptr;
1368
1369 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1370 selector = T0 & 0xffff;
1371 if (selector & 0x4)
1372 dt = &env->ldt;
1373 else
1374 dt = &env->gdt;
1375 index = selector & ~7;
1376 if ((index + 7) > dt->limit)
1377 return;
1378 ptr = dt->base + index;
1379 e2 = ldl(ptr + 4);
1380 T1 = e2 & 0x00f0ff00;
1381 CC_SRC |= CC_Z;
1382 }
1383
1384 void OPPROTO op_lar(void)
1385 {
1386 helper_lar();
1387 }
1388
1389 /* flags handling */
1390
1391 /* slow jumps cases : in order to avoid calling a function with a
1392 pointer (which can generate a stack frame on PowerPC), we use
1393 op_setcc to set T0 and then call op_jcc. */
1394 void OPPROTO op_jcc(void)
1395 {
1396 if (T0)
1397 JUMP_TB(PARAM1, 0, PARAM2);
1398 else
1399 JUMP_TB(PARAM1, 1, PARAM3);
1400 FORCE_RET();
1401 }
1402
1403 /* slow set cases (compute x86 flags) */
1404 void OPPROTO op_seto_T0_cc(void)
1405 {
1406 int eflags;
1407 eflags = cc_table[CC_OP].compute_all();
1408 T0 = (eflags >> 11) & 1;
1409 }
1410
1411 void OPPROTO op_setb_T0_cc(void)
1412 {
1413 T0 = cc_table[CC_OP].compute_c();
1414 }
1415
1416 void OPPROTO op_setz_T0_cc(void)
1417 {
1418 int eflags;
1419 eflags = cc_table[CC_OP].compute_all();
1420 T0 = (eflags >> 6) & 1;
1421 }
1422
1423 void OPPROTO op_setbe_T0_cc(void)
1424 {
1425 int eflags;
1426 eflags = cc_table[CC_OP].compute_all();
1427 T0 = (eflags & (CC_Z | CC_C)) != 0;
1428 }
1429
1430 void OPPROTO op_sets_T0_cc(void)
1431 {
1432 int eflags;
1433 eflags = cc_table[CC_OP].compute_all();
1434 T0 = (eflags >> 7) & 1;
1435 }
1436
1437 void OPPROTO op_setp_T0_cc(void)
1438 {
1439 int eflags;
1440 eflags = cc_table[CC_OP].compute_all();
1441 T0 = (eflags >> 2) & 1;
1442 }
1443
1444 void OPPROTO op_setl_T0_cc(void)
1445 {
1446 int eflags;
1447 eflags = cc_table[CC_OP].compute_all();
1448 T0 = ((eflags ^ (eflags >> 4)) >> 7) & 1;
1449 }
1450
1451 void OPPROTO op_setle_T0_cc(void)
1452 {
1453 int eflags;
1454 eflags = cc_table[CC_OP].compute_all();
1455 T0 = (((eflags ^ (eflags >> 4)) & 0x80) || (eflags & CC_Z)) != 0;
1456 }
1457
1458 void OPPROTO op_xor_T0_1(void)
1459 {
1460 T0 ^= 1;
1461 }
1462
1463 void OPPROTO op_set_cc_op(void)
1464 {
1465 CC_OP = PARAM1;
1466 }
1467
1468 #define FL_UPDATE_MASK32 (TF_MASK | AC_MASK | ID_MASK)
1469 #define FL_UPDATE_MASK16 (TF_MASK)
1470
1471 void OPPROTO op_movl_eflags_T0(void)
1472 {
1473 int eflags;
1474 eflags = T0;
1475 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1476 DF = 1 - (2 * ((eflags >> 10) & 1));
1477 /* we also update some system flags as in user mode */
1478 env->eflags = (env->eflags & ~FL_UPDATE_MASK32) | (eflags & FL_UPDATE_MASK32);
1479 }
1480
1481 void OPPROTO op_movw_eflags_T0(void)
1482 {
1483 int eflags;
1484 eflags = T0;
1485 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1486 DF = 1 - (2 * ((eflags >> 10) & 1));
1487 /* we also update some system flags as in user mode */
1488 env->eflags = (env->eflags & ~FL_UPDATE_MASK16) | (eflags & FL_UPDATE_MASK16);
1489 }
1490
1491 #if 0
1492 /* vm86plus version */
1493 void OPPROTO op_movw_eflags_T0_vm(void)
1494 {
1495 int eflags;
1496 eflags = T0;
1497 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1498 DF = 1 - (2 * ((eflags >> 10) & 1));
1499 /* we also update some system flags as in user mode */
1500 env->eflags = (env->eflags & ~(FL_UPDATE_MASK16 | VIF_MASK)) |
1501 (eflags & FL_UPDATE_MASK16);
1502 if (eflags & IF_MASK) {
1503 env->eflags |= VIF_MASK;
1504 if (env->eflags & VIP_MASK) {
1505 EIP = PARAM1;
1506 raise_exception(EXCP0D_GPF);
1507 }
1508 }
1509 FORCE_RET();
1510 }
1511
1512 void OPPROTO op_movl_eflags_T0_vm(void)
1513 {
1514 int eflags;
1515 eflags = T0;
1516 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1517 DF = 1 - (2 * ((eflags >> 10) & 1));
1518 /* we also update some system flags as in user mode */
1519 env->eflags = (env->eflags & ~(FL_UPDATE_MASK32 | VIF_MASK)) |
1520 (eflags & FL_UPDATE_MASK32);
1521 if (eflags & IF_MASK) {
1522 env->eflags |= VIF_MASK;
1523 if (env->eflags & VIP_MASK) {
1524 EIP = PARAM1;
1525 raise_exception(EXCP0D_GPF);
1526 }
1527 }
1528 FORCE_RET();
1529 }
1530 #endif
1531
1532 /* XXX: compute only O flag */
1533 void OPPROTO op_movb_eflags_T0(void)
1534 {
1535 int of;
1536 of = cc_table[CC_OP].compute_all() & CC_O;
1537 CC_SRC = (T0 & (CC_S | CC_Z | CC_A | CC_P | CC_C)) | of;
1538 }
1539
1540 void OPPROTO op_movl_T0_eflags(void)
1541 {
1542 int eflags;
1543 eflags = cc_table[CC_OP].compute_all();
1544 eflags |= (DF & DF_MASK);
1545 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
1546 T0 = eflags;
1547 }
1548
1549 /* vm86plus version */
1550 #if 0
1551 void OPPROTO op_movl_T0_eflags_vm(void)
1552 {
1553 int eflags;
1554 eflags = cc_table[CC_OP].compute_all();
1555 eflags |= (DF & DF_MASK);
1556 eflags |= env->eflags & ~(VM_MASK | RF_MASK | IF_MASK);
1557 if (env->eflags & VIF_MASK)
1558 eflags |= IF_MASK;
1559 T0 = eflags;
1560 }
1561 #endif
1562
1563 void OPPROTO op_cld(void)
1564 {
1565 DF = 1;
1566 }
1567
1568 void OPPROTO op_std(void)
1569 {
1570 DF = -1;
1571 }
1572
1573 void OPPROTO op_clc(void)
1574 {
1575 int eflags;
1576 eflags = cc_table[CC_OP].compute_all();
1577 eflags &= ~CC_C;
1578 CC_SRC = eflags;
1579 }
1580
1581 void OPPROTO op_stc(void)
1582 {
1583 int eflags;
1584 eflags = cc_table[CC_OP].compute_all();
1585 eflags |= CC_C;
1586 CC_SRC = eflags;
1587 }
1588
1589 void OPPROTO op_cmc(void)
1590 {
1591 int eflags;
1592 eflags = cc_table[CC_OP].compute_all();
1593 eflags ^= CC_C;
1594 CC_SRC = eflags;
1595 }
1596
1597 void OPPROTO op_salc(void)
1598 {
1599 int cf;
1600 cf = cc_table[CC_OP].compute_c();
1601 EAX = (EAX & ~0xff) | ((-cf) & 0xff);
1602 }
1603
1604 static int compute_all_eflags(void)
1605 {
1606 return CC_SRC;
1607 }
1608
1609 static int compute_c_eflags(void)
1610 {
1611 return CC_SRC & CC_C;
1612 }
1613
1614 static int compute_c_mul(void)
1615 {
1616 int cf;
1617 cf = (CC_SRC != 0);
1618 return cf;
1619 }
1620
1621 static int compute_all_mul(void)
1622 {
1623 int cf, pf, af, zf, sf, of;
1624 cf = (CC_SRC != 0);
1625 pf = 0; /* undefined */
1626 af = 0; /* undefined */
1627 zf = 0; /* undefined */
1628 sf = 0; /* undefined */
1629 of = cf << 11;
1630 return cf | pf | af | zf | sf | of;
1631 }
1632
1633 CCTable cc_table[CC_OP_NB] = {
1634 [CC_OP_DYNAMIC] = { /* should never happen */ },
1635
1636 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
1637
1638 [CC_OP_MUL] = { compute_all_mul, compute_c_mul },
1639
1640 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
1641 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
1642 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
1643
1644 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
1645 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
1646 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
1647
1648 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
1649 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
1650 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
1651
1652 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
1653 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
1654 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
1655
1656 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
1657 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
1658 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
1659
1660 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
1661 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
1662 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
1663
1664 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
1665 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
1666 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
1667
1668 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
1669 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
1670 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
1671
1672 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
1673 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
1674 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
1675 };
1676
1677 /* floating point support. Some of the code for complicated x87
1678 functions comes from the LGPL'ed x86 emulator found in the Willows
1679 TWIN windows emulator. */
1680
1681 #ifdef USE_X86LDOUBLE
1682 /* use long double functions */
1683 #define lrint lrintl
1684 #define llrint llrintl
1685 #define fabs fabsl
1686 #define sin sinl
1687 #define cos cosl
1688 #define sqrt sqrtl
1689 #define pow powl
1690 #define log logl
1691 #define tan tanl
1692 #define atan2 atan2l
1693 #define floor floorl
1694 #define ceil ceill
1695 #define rint rintl
1696 #endif
1697
1698 extern int lrint(CPU86_LDouble x);
1699 extern int64_t llrint(CPU86_LDouble x);
1700 extern CPU86_LDouble fabs(CPU86_LDouble x);
1701 extern CPU86_LDouble sin(CPU86_LDouble x);
1702 extern CPU86_LDouble cos(CPU86_LDouble x);
1703 extern CPU86_LDouble sqrt(CPU86_LDouble x);
1704 extern CPU86_LDouble pow(CPU86_LDouble, CPU86_LDouble);
1705 extern CPU86_LDouble log(CPU86_LDouble x);
1706 extern CPU86_LDouble tan(CPU86_LDouble x);
1707 extern CPU86_LDouble atan2(CPU86_LDouble, CPU86_LDouble);
1708 extern CPU86_LDouble floor(CPU86_LDouble x);
1709 extern CPU86_LDouble ceil(CPU86_LDouble x);
1710 extern CPU86_LDouble rint(CPU86_LDouble x);
1711
1712 #if defined(__powerpc__)
1713 extern CPU86_LDouble copysign(CPU86_LDouble, CPU86_LDouble);
1714
1715 /* correct (but slow) PowerPC rint() (glibc version is incorrect) */
1716 double qemu_rint(double x)
1717 {
1718 double y = 4503599627370496.0;
1719 if (fabs(x) >= y)
1720 return x;
1721 if (x < 0)
1722 y = -y;
1723 y = (x + y) - y;
1724 if (y == 0.0)
1725 y = copysign(y, x);
1726 return y;
1727 }
1728
1729 #define rint qemu_rint
1730 #endif
1731
1732 #define RC_MASK 0xc00
1733 #define RC_NEAR 0x000
1734 #define RC_DOWN 0x400
1735 #define RC_UP 0x800
1736 #define RC_CHOP 0xc00
1737
1738 #define MAXTAN 9223372036854775808.0
1739
1740 #ifdef USE_X86LDOUBLE
1741
1742 /* only for x86 */
1743 typedef union {
1744 long double d;
1745 struct {
1746 unsigned long long lower;
1747 unsigned short upper;
1748 } l;
1749 } CPU86_LDoubleU;
1750
1751 /* the following deal with x86 long double-precision numbers */
1752 #define MAXEXPD 0x7fff
1753 #define EXPBIAS 16383
1754 #define EXPD(fp) (fp.l.upper & 0x7fff)
1755 #define SIGND(fp) ((fp.l.upper) & 0x8000)
1756 #define MANTD(fp) (fp.l.lower)
1757 #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
1758
1759 #else
1760
1761 typedef union {
1762 double d;
1763 #ifndef WORDS_BIGENDIAN
1764 struct {
1765 uint32_t lower;
1766 int32_t upper;
1767 } l;
1768 #else
1769 struct {
1770 int32_t upper;
1771 uint32_t lower;
1772 } l;
1773 #endif
1774 int64_t ll;
1775 } CPU86_LDoubleU;
1776
1777 /* the following deal with IEEE double-precision numbers */
1778 #define MAXEXPD 0x7ff
1779 #define EXPBIAS 1023
1780 #define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF)
1781 #define SIGND(fp) ((fp.l.upper) & 0x80000000)
1782 #define MANTD(fp) (fp.ll & ((1LL << 52) - 1))
1783 #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7ff << 20)) | (EXPBIAS << 20)
1784 #endif
1785
1786 /* fp load FT0 */
1787
1788 void OPPROTO op_flds_FT0_A0(void)
1789 {
1790 #ifdef USE_FP_CONVERT
1791 FP_CONVERT.i32 = ldl((void *)A0);
1792 FT0 = FP_CONVERT.f;
1793 #else
1794 FT0 = ldfl((void *)A0);
1795 #endif
1796 }
1797
1798 void OPPROTO op_fldl_FT0_A0(void)
1799 {
1800 #ifdef USE_FP_CONVERT
1801 FP_CONVERT.i64 = ldq((void *)A0);
1802 FT0 = FP_CONVERT.d;
1803 #else
1804 FT0 = ldfq((void *)A0);
1805 #endif
1806 }
1807
1808 /* helpers are needed to avoid static constant reference. XXX: find a better way */
1809 #ifdef USE_INT_TO_FLOAT_HELPERS
1810
1811 void helper_fild_FT0_A0(void)
1812 {
1813 FT0 = (CPU86_LDouble)ldsw((void *)A0);
1814 }
1815
1816 void helper_fildl_FT0_A0(void)
1817 {
1818 FT0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
1819 }
1820
1821 void helper_fildll_FT0_A0(void)
1822 {
1823 FT0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
1824 }
1825
1826 void OPPROTO op_fild_FT0_A0(void)
1827 {
1828 helper_fild_FT0_A0();
1829 }
1830
1831 void OPPROTO op_fildl_FT0_A0(void)
1832 {
1833 helper_fildl_FT0_A0();
1834 }
1835
1836 void OPPROTO op_fildll_FT0_A0(void)
1837 {
1838 helper_fildll_FT0_A0();
1839 }
1840
1841 #else
1842
1843 void OPPROTO op_fild_FT0_A0(void)
1844 {
1845 #ifdef USE_FP_CONVERT
1846 FP_CONVERT.i32 = ldsw((void *)A0);
1847 FT0 = (CPU86_LDouble)FP_CONVERT.i32;
1848 #else
1849 FT0 = (CPU86_LDouble)ldsw((void *)A0);
1850 #endif
1851 }
1852
1853 void OPPROTO op_fildl_FT0_A0(void)
1854 {
1855 #ifdef USE_FP_CONVERT
1856 FP_CONVERT.i32 = (int32_t) ldl((void *)A0);
1857 FT0 = (CPU86_LDouble)FP_CONVERT.i32;
1858 #else
1859 FT0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
1860 #endif
1861 }
1862
1863 void OPPROTO op_fildll_FT0_A0(void)
1864 {
1865 #ifdef USE_FP_CONVERT
1866 FP_CONVERT.i64 = (int64_t) ldq((void *)A0);
1867 FT0 = (CPU86_LDouble)FP_CONVERT.i64;
1868 #else
1869 FT0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
1870 #endif
1871 }
1872 #endif
1873
1874 /* fp load ST0 */
1875
1876 void OPPROTO op_flds_ST0_A0(void)
1877 {
1878 #ifdef USE_FP_CONVERT
1879 FP_CONVERT.i32 = ldl((void *)A0);
1880 ST0 = FP_CONVERT.f;
1881 #else
1882 ST0 = ldfl((void *)A0);
1883 #endif
1884 }
1885
1886 void OPPROTO op_fldl_ST0_A0(void)
1887 {
1888 #ifdef USE_FP_CONVERT
1889 FP_CONVERT.i64 = ldq((void *)A0);
1890 ST0 = FP_CONVERT.d;
1891 #else
1892 ST0 = ldfq((void *)A0);
1893 #endif
1894 }
1895
1896 #ifdef USE_X86LDOUBLE
1897 void OPPROTO op_fldt_ST0_A0(void)
1898 {
1899 ST0 = *(long double *)A0;
1900 }
1901 #else
1902 static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
1903 {
1904 CPU86_LDoubleU temp;
1905 int upper, e;
1906 /* mantissa */
1907 upper = lduw(ptr + 8);
1908 /* XXX: handle overflow ? */
1909 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
1910 e |= (upper >> 4) & 0x800; /* sign */
1911 temp.ll = ((ldq(ptr) >> 11) & ((1LL << 52) - 1)) | ((uint64_t)e << 52);
1912 return temp.d;
1913 }
1914
1915 void helper_fldt_ST0_A0(void)
1916 {
1917 ST0 = helper_fldt((uint8_t *)A0);
1918 }
1919
1920 void OPPROTO op_fldt_ST0_A0(void)
1921 {
1922 helper_fldt_ST0_A0();
1923 }
1924 #endif
1925
1926 /* helpers are needed to avoid static constant reference. XXX: find a better way */
1927 #ifdef USE_INT_TO_FLOAT_HELPERS
1928
1929 void helper_fild_ST0_A0(void)
1930 {
1931 ST0 = (CPU86_LDouble)ldsw((void *)A0);
1932 }
1933
1934 void helper_fildl_ST0_A0(void)
1935 {
1936 ST0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
1937 }
1938
1939 void helper_fildll_ST0_A0(void)
1940 {
1941 ST0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
1942 }
1943
1944 void OPPROTO op_fild_ST0_A0(void)
1945 {
1946 helper_fild_ST0_A0();
1947 }
1948
1949 void OPPROTO op_fildl_ST0_A0(void)
1950 {
1951 helper_fildl_ST0_A0();
1952 }
1953
1954 void OPPROTO op_fildll_ST0_A0(void)
1955 {
1956 helper_fildll_ST0_A0();
1957 }
1958
1959 #else
1960
1961 void OPPROTO op_fild_ST0_A0(void)
1962 {
1963 #ifdef USE_FP_CONVERT
1964 FP_CONVERT.i32 = ldsw((void *)A0);
1965 ST0 = (CPU86_LDouble)FP_CONVERT.i32;
1966 #else
1967 ST0 = (CPU86_LDouble)ldsw((void *)A0);
1968 #endif
1969 }
1970
1971 void OPPROTO op_fildl_ST0_A0(void)
1972 {
1973 #ifdef USE_FP_CONVERT
1974 FP_CONVERT.i32 = (int32_t) ldl((void *)A0);
1975 ST0 = (CPU86_LDouble)FP_CONVERT.i32;
1976 #else
1977 ST0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
1978 #endif
1979 }
1980
1981 void OPPROTO op_fildll_ST0_A0(void)
1982 {
1983 #ifdef USE_FP_CONVERT
1984 FP_CONVERT.i64 = (int64_t) ldq((void *)A0);
1985 ST0 = (CPU86_LDouble)FP_CONVERT.i64;
1986 #else
1987 ST0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
1988 #endif
1989 }
1990
1991 #endif
1992
1993 /* fp store */
1994
1995 void OPPROTO op_fsts_ST0_A0(void)
1996 {
1997 #ifdef USE_FP_CONVERT
1998 FP_CONVERT.d = ST0;
1999 stfl((void *)A0, FP_CONVERT.f);
2000 #else
2001 stfl((void *)A0, (float)ST0);
2002 #endif
2003 }
2004
2005 void OPPROTO op_fstl_ST0_A0(void)
2006 {
2007 stfq((void *)A0, (double)ST0);
2008 }
2009
2010 #ifdef USE_X86LDOUBLE
2011 void OPPROTO op_fstt_ST0_A0(void)
2012 {
2013 *(long double *)A0 = ST0;
2014 }
2015 #else
2016
2017 static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
2018 {
2019 CPU86_LDoubleU temp;
2020 int e;
2021 temp.d = f;
2022 /* mantissa */
2023 stq(ptr, (MANTD(temp) << 11) | (1LL << 63));
2024 /* exponent + sign */
2025 e = EXPD(temp) - EXPBIAS + 16383;
2026 e |= SIGND(temp) >> 16;
2027 stw(ptr + 8, e);
2028 }
2029
2030 void helper_fstt_ST0_A0(void)
2031 {
2032 helper_fstt(ST0, (uint8_t *)A0);
2033 }
2034
2035 void OPPROTO op_fstt_ST0_A0(void)
2036 {
2037 helper_fstt_ST0_A0();
2038 }
2039 #endif
2040
2041 void OPPROTO op_fist_ST0_A0(void)
2042 {
2043 #if defined(__sparc__) && !defined(__sparc_v9__)
2044 register CPU86_LDouble d asm("o0");
2045 #else
2046 CPU86_LDouble d;
2047 #endif
2048 int val;
2049
2050 d = ST0;
2051 val = lrint(d);
2052 stw((void *)A0, val);
2053 }
2054
2055 void OPPROTO op_fistl_ST0_A0(void)
2056 {
2057 #if defined(__sparc__) && !defined(__sparc_v9__)
2058 register CPU86_LDouble d asm("o0");
2059 #else
2060 CPU86_LDouble d;
2061 #endif
2062 int val;
2063
2064 d = ST0;
2065 val = lrint(d);
2066 stl((void *)A0, val);
2067 }
2068
2069 void OPPROTO op_fistll_ST0_A0(void)
2070 {
2071 #if defined(__sparc__) && !defined(__sparc_v9__)
2072 register CPU86_LDouble d asm("o0");
2073 #else
2074 CPU86_LDouble d;
2075 #endif
2076 int64_t val;
2077
2078 d = ST0;
2079 val = llrint(d);
2080 stq((void *)A0, val);
2081 }
2082
2083 /* BCD ops */
2084
2085 #define MUL10(iv) ( iv + iv + (iv << 3) )
2086
2087 void helper_fbld_ST0_A0(void)
2088 {
2089 uint8_t *seg;
2090 CPU86_LDouble fpsrcop;
2091 int m32i;
2092 unsigned int v;
2093
2094 /* in this code, seg/m32i will be used as temporary ptr/int */
2095 seg = (uint8_t *)A0 + 8;
2096 v = ldub(seg--);
2097 /* XXX: raise exception */
2098 if (v != 0)
2099 return;
2100 v = ldub(seg--);
2101 /* XXX: raise exception */
2102 if ((v & 0xf0) != 0)
2103 return;
2104 m32i = v; /* <-- d14 */
2105 v = ldub(seg--);
2106 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d13 */
2107 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d12 */
2108 v = ldub(seg--);
2109 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d11 */
2110 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d10 */
2111 v = ldub(seg--);
2112 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d9 */
2113 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d8 */
2114 fpsrcop = ((CPU86_LDouble)m32i) * 100000000.0;
2115
2116 v = ldub(seg--);
2117 m32i = (v >> 4); /* <-- d7 */
2118 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d6 */
2119 v = ldub(seg--);
2120 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d5 */
2121 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d4 */
2122 v = ldub(seg--);
2123 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d3 */
2124 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d2 */
2125 v = ldub(seg);
2126 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d1 */
2127 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d0 */
2128 fpsrcop += ((CPU86_LDouble)m32i);
2129 if ( ldub(seg+9) & 0x80 )
2130 fpsrcop = -fpsrcop;
2131 ST0 = fpsrcop;
2132 }
2133
2134 void OPPROTO op_fbld_ST0_A0(void)
2135 {
2136 helper_fbld_ST0_A0();
2137 }
2138
2139 void helper_fbst_ST0_A0(void)
2140 {
2141 CPU86_LDouble fptemp;
2142 CPU86_LDouble fpsrcop;
2143 int v;
2144 uint8_t *mem_ref, *mem_end;
2145
2146 fpsrcop = rint(ST0);
2147 mem_ref = (uint8_t *)A0;
2148 mem_end = mem_ref + 8;
2149 if ( fpsrcop < 0.0 ) {
2150 stw(mem_end, 0x8000);
2151 fpsrcop = -fpsrcop;
2152 } else {
2153 stw(mem_end, 0x0000);
2154 }
2155 while (mem_ref < mem_end) {
2156 if (fpsrcop == 0.0)
2157 break;
2158 fptemp = floor(fpsrcop/10.0);
2159 v = ((int)(fpsrcop - fptemp*10.0));
2160 if (fptemp == 0.0) {
2161 stb(mem_ref++, v);
2162 break;
2163 }
2164 fpsrcop = fptemp;
2165 fptemp = floor(fpsrcop/10.0);
2166 v |= (((int)(fpsrcop - fptemp*10.0)) << 4);
2167 stb(mem_ref++, v);
2168 fpsrcop = fptemp;
2169 }
2170 while (mem_ref < mem_end) {
2171 stb(mem_ref++, 0);
2172 }
2173 }
2174
2175 void OPPROTO op_fbst_ST0_A0(void)
2176 {
2177 helper_fbst_ST0_A0();
2178 }
2179
2180 /* FPU move */
2181
2182 static inline void fpush(void)
2183 {
2184 env->fpstt = (env->fpstt - 1) & 7;
2185 env->fptags[env->fpstt] = 0; /* validate stack entry */
2186 }
2187
2188 static inline void fpop(void)
2189 {
2190 env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
2191 env->fpstt = (env->fpstt + 1) & 7;
2192 }
2193
2194 void OPPROTO op_fpush(void)
2195 {
2196 fpush();
2197 }
2198
2199 void OPPROTO op_fpop(void)
2200 {
2201 fpop();
2202 }
2203
2204 void OPPROTO op_fdecstp(void)
2205 {
2206 env->fpstt = (env->fpstt - 1) & 7;
2207 env->fpus &= (~0x4700);
2208 }
2209
2210 void OPPROTO op_fincstp(void)
2211 {
2212 env->fpstt = (env->fpstt + 1) & 7;
2213 env->fpus &= (~0x4700);
2214 }
2215
2216 void OPPROTO op_fmov_ST0_FT0(void)
2217 {
2218 ST0 = FT0;
2219 }
2220
2221 void OPPROTO op_fmov_FT0_STN(void)
2222 {
2223 FT0 = ST(PARAM1);
2224 }
2225
2226 void OPPROTO op_fmov_ST0_STN(void)
2227 {
2228 ST0 = ST(PARAM1);
2229 }
2230
2231 void OPPROTO op_fmov_STN_ST0(void)
2232 {
2233 ST(PARAM1) = ST0;
2234 }
2235
2236 void OPPROTO op_fxchg_ST0_STN(void)
2237 {
2238 CPU86_LDouble tmp;
2239 tmp = ST(PARAM1);
2240 ST(PARAM1) = ST0;
2241 ST0 = tmp;
2242 }
2243
2244 /* FPU operations */
2245
2246 /* XXX: handle nans */
2247 void OPPROTO op_fcom_ST0_FT0(void)
2248 {
2249 env->fpus &= (~0x4500); /* (C3,C2,C0) <-- 000 */
2250 if (ST0 < FT0)
2251 env->fpus |= 0x100; /* (C3,C2,C0) <-- 001 */
2252 else if (ST0 == FT0)
2253 env->fpus |= 0x4000; /* (C3,C2,C0) <-- 100 */
2254 FORCE_RET();
2255 }
2256
2257 /* XXX: handle nans */
2258 void OPPROTO op_fucom_ST0_FT0(void)
2259 {
2260 env->fpus &= (~0x4500); /* (C3,C2,C0) <-- 000 */
2261 if (ST0 < FT0)
2262 env->fpus |= 0x100; /* (C3,C2,C0) <-- 001 */
2263 else if (ST0 == FT0)
2264 env->fpus |= 0x4000; /* (C3,C2,C0) <-- 100 */
2265 FORCE_RET();
2266 }
2267
2268 /* XXX: handle nans */
2269 void OPPROTO op_fcomi_ST0_FT0(void)
2270 {
2271 int eflags;
2272 eflags = cc_table[CC_OP].compute_all();
2273 eflags &= ~(CC_Z | CC_P | CC_C);
2274 if (ST0 < FT0)
2275 eflags |= CC_C;
2276 else if (ST0 == FT0)
2277 eflags |= CC_Z;
2278 CC_SRC = eflags;
2279 FORCE_RET();
2280 }
2281
2282 /* XXX: handle nans */
2283 void OPPROTO op_fucomi_ST0_FT0(void)
2284 {
2285 int eflags;
2286 eflags = cc_table[CC_OP].compute_all();
2287 eflags &= ~(CC_Z | CC_P | CC_C);
2288 if (ST0 < FT0)
2289 eflags |= CC_C;
2290 else if (ST0 == FT0)
2291 eflags |= CC_Z;
2292 CC_SRC = eflags;
2293 FORCE_RET();
2294 }
2295
2296 void OPPROTO op_fadd_ST0_FT0(void)
2297 {
2298 ST0 += FT0;
2299 }
2300
2301 void OPPROTO op_fmul_ST0_FT0(void)
2302 {
2303 ST0 *= FT0;
2304 }
2305
2306 void OPPROTO op_fsub_ST0_FT0(void)
2307 {
2308 ST0 -= FT0;
2309 }
2310
2311 void OPPROTO op_fsubr_ST0_FT0(void)
2312 {
2313 ST0 = FT0 - ST0;
2314 }
2315
2316 void OPPROTO op_fdiv_ST0_FT0(void)
2317 {
2318 ST0 /= FT0;
2319 }
2320
2321 void OPPROTO op_fdivr_ST0_FT0(void)
2322 {
2323 ST0 = FT0 / ST0;
2324 }
2325
2326 /* fp operations between STN and ST0 */
2327
2328 void OPPROTO op_fadd_STN_ST0(void)
2329 {
2330 ST(PARAM1) += ST0;
2331 }
2332
2333 void OPPROTO op_fmul_STN_ST0(void)
2334 {
2335 ST(PARAM1) *= ST0;
2336 }
2337
2338 void OPPROTO op_fsub_STN_ST0(void)
2339 {
2340 ST(PARAM1) -= ST0;
2341 }
2342
2343 void OPPROTO op_fsubr_STN_ST0(void)
2344 {
2345 CPU86_LDouble *p;
2346 p = &ST(PARAM1);
2347 *p = ST0 - *p;
2348 }
2349
2350 void OPPROTO op_fdiv_STN_ST0(void)
2351 {
2352 ST(PARAM1) /= ST0;
2353 }
2354
2355 void OPPROTO op_fdivr_STN_ST0(void)
2356 {
2357 CPU86_LDouble *p;
2358 p = &ST(PARAM1);
2359 *p = ST0 / *p;
2360 }
2361
2362 /* misc FPU operations */
2363 void OPPROTO op_fchs_ST0(void)
2364 {
2365 ST0 = -ST0;
2366 }
2367
2368 void OPPROTO op_fabs_ST0(void)
2369 {
2370 ST0 = fabs(ST0);
2371 }
2372
2373 void helper_fxam_ST0(void)
2374 {
2375 CPU86_LDoubleU temp;
2376 int expdif;
2377
2378 temp.d = ST0;
2379
2380 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2381 if (SIGND(temp))
2382 env->fpus |= 0x200; /* C1 <-- 1 */
2383
2384 expdif = EXPD(temp);
2385 if (expdif == MAXEXPD) {
2386 if (MANTD(temp) == 0)
2387 env->fpus |= 0x500 /*Infinity*/;
2388 else
2389 env->fpus |= 0x100 /*NaN*/;
2390 } else if (expdif == 0) {
2391 if (MANTD(temp) == 0)
2392 env->fpus |= 0x4000 /*Zero*/;
2393 else
2394 env->fpus |= 0x4400 /*Denormal*/;
2395 } else {
2396 env->fpus |= 0x400;
2397 }
2398 }
2399
2400 void OPPROTO op_fxam_ST0(void)
2401 {
2402 helper_fxam_ST0();
2403 }
2404
2405 void OPPROTO op_fld1_ST0(void)
2406 {
2407 ST0 = *(CPU86_LDouble *)&f15rk[1];
2408 }
2409
2410 void OPPROTO op_fldl2t_ST0(void)
2411 {
2412 ST0 = *(CPU86_LDouble *)&f15rk[6];
2413 }
2414
2415 void OPPROTO op_fldl2e_ST0(void)
2416 {
2417 ST0 = *(CPU86_LDouble *)&f15rk[5];
2418 }
2419
2420 void OPPROTO op_fldpi_ST0(void)
2421 {
2422 ST0 = *(CPU86_LDouble *)&f15rk[2];
2423 }
2424
2425 void OPPROTO op_fldlg2_ST0(void)
2426 {
2427 ST0 = *(CPU86_LDouble *)&f15rk[3];
2428 }
2429
2430 void OPPROTO op_fldln2_ST0(void)
2431 {
2432 ST0 = *(CPU86_LDouble *)&f15rk[4];
2433 }
2434
2435 void OPPROTO op_fldz_ST0(void)
2436 {
2437 ST0 = *(CPU86_LDouble *)&f15rk[0];
2438 }
2439
2440 void OPPROTO op_fldz_FT0(void)
2441 {
2442 ST0 = *(CPU86_LDouble *)&f15rk[0];
2443 }
2444
2445 void helper_f2xm1(void)
2446 {
2447 ST0 = pow(2.0,ST0) - 1.0;
2448 }
2449
2450 void helper_fyl2x(void)
2451 {
2452 CPU86_LDouble fptemp;
2453
2454 fptemp = ST0;
2455 if (fptemp>0.0){
2456 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
2457 ST1 *= fptemp;
2458 fpop();
2459 } else {
2460 env->fpus &= (~0x4700);
2461 env->fpus |= 0x400;
2462 }
2463 }
2464
2465 void helper_fptan(void)
2466 {
2467 CPU86_LDouble fptemp;
2468
2469 fptemp = ST0;
2470 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2471 env->fpus |= 0x400;
2472 } else {
2473 ST0 = tan(fptemp);
2474 fpush();
2475 ST0 = 1.0;
2476 env->fpus &= (~0x400); /* C2 <-- 0 */
2477 /* the above code is for |arg| < 2**52 only */
2478 }
2479 }
2480
2481 void helper_fpatan(void)
2482 {
2483 CPU86_LDouble fptemp, fpsrcop;
2484
2485 fpsrcop = ST1;
2486 fptemp = ST0;
2487 ST1 = atan2(fpsrcop,fptemp);
2488 fpop();
2489 }
2490
2491 void helper_fxtract(void)
2492 {
2493 CPU86_LDoubleU temp;
2494 unsigned int expdif;
2495
2496 temp.d = ST0;
2497 expdif = EXPD(temp) - EXPBIAS;
2498 /*DP exponent bias*/
2499 ST0 = expdif;
2500 fpush();
2501 BIASEXPONENT(temp);
2502 ST0 = temp.d;
2503 }
2504
2505 void helper_fprem1(void)
2506 {
2507 CPU86_LDouble dblq, fpsrcop, fptemp;
2508 CPU86_LDoubleU fpsrcop1, fptemp1;
2509 int expdif;
2510 int q;
2511
2512 fpsrcop = ST0;
2513 fptemp = ST1;
2514 fpsrcop1.d = fpsrcop;
2515 fptemp1.d = fptemp;
2516 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2517 if (expdif < 53) {
2518 dblq = fpsrcop / fptemp;
2519 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2520 ST0 = fpsrcop - fptemp*dblq;
2521 q = (int)dblq; /* cutting off top bits is assumed here */
2522 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2523 /* (C0,C1,C3) <-- (q2,q1,q0) */
2524 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2525 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2526 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2527 } else {
2528 env->fpus |= 0x400; /* C2 <-- 1 */
2529 fptemp = pow(2.0, expdif-50);
2530 fpsrcop = (ST0 / ST1) / fptemp;
2531 /* fpsrcop = integer obtained by rounding to the nearest */
2532 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2533 floor(fpsrcop): ceil(fpsrcop);
2534 ST0 -= (ST1 * fpsrcop * fptemp);
2535 }
2536 }
2537
2538 void helper_fprem(void)
2539 {
2540 CPU86_LDouble dblq, fpsrcop, fptemp;
2541 CPU86_LDoubleU fpsrcop1, fptemp1;
2542 int expdif;
2543 int q;
2544
2545 fpsrcop = ST0;
2546 fptemp = ST1;
2547 fpsrcop1.d = fpsrcop;
2548 fptemp1.d = fptemp;
2549 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2550 if ( expdif < 53 ) {
2551 dblq = fpsrcop / fptemp;
2552 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2553 ST0 = fpsrcop - fptemp*dblq;
2554 q = (int)dblq; /* cutting off top bits is assumed here */
2555 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2556 /* (C0,C1,C3) <-- (q2,q1,q0) */
2557 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2558 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2559 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2560 } else {
2561 env->fpus |= 0x400; /* C2 <-- 1 */
2562 fptemp = pow(2.0, expdif-50);
2563 fpsrcop = (ST0 / ST1) / fptemp;
2564 /* fpsrcop = integer obtained by chopping */
2565 fpsrcop = (fpsrcop < 0.0)?
2566 -(floor(fabs(fpsrcop))): floor(fpsrcop);
2567 ST0 -= (ST1 * fpsrcop * fptemp);
2568 }
2569 }
2570
2571 void helper_fyl2xp1(void)
2572 {
2573 CPU86_LDouble fptemp;
2574
2575 fptemp = ST0;
2576 if ((fptemp+1.0)>0.0) {
2577 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2578 ST1 *= fptemp;
2579 fpop();
2580 } else {
2581 env->fpus &= (~0x4700);
2582 env->fpus |= 0x400;
2583 }
2584 }
2585
2586 void helper_fsqrt(void)
2587 {
2588 CPU86_LDouble fptemp;
2589
2590 fptemp = ST0;
2591 if (fptemp<0.0) {
2592 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2593 env->fpus |= 0x400;
2594 }
2595 ST0 = sqrt(fptemp);
2596 }
2597
2598 void helper_fsincos(void)
2599 {
2600 CPU86_LDouble fptemp;
2601
2602 fptemp = ST0;
2603 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2604 env->fpus |= 0x400;
2605 } else {
2606 ST0 = sin(fptemp);
2607 fpush();
2608 ST0 = cos(fptemp);
2609 env->fpus &= (~0x400); /* C2 <-- 0 */
2610 /* the above code is for |arg| < 2**63 only */
2611 }
2612 }
2613
2614 void helper_frndint(void)
2615 {
2616 ST0 = rint(ST0);
2617 }
2618
2619 void helper_fscale(void)
2620 {
2621 CPU86_LDouble fpsrcop, fptemp;
2622
2623 fpsrcop = 2.0;
2624 fptemp = pow(fpsrcop,ST1);
2625 ST0 *= fptemp;
2626 }
2627
2628 void helper_fsin(void)
2629 {
2630 CPU86_LDouble fptemp;
2631
2632 fptemp = ST0;
2633 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2634 env->fpus |= 0x400;
2635 } else {
2636 ST0 = sin(fptemp);
2637 env->fpus &= (~0x400); /* C2 <-- 0 */
2638 /* the above code is for |arg| < 2**53 only */
2639 }
2640 }
2641
2642 void helper_fcos(void)
2643 {
2644 CPU86_LDouble fptemp;
2645
2646 fptemp = ST0;
2647 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2648 env->fpus |= 0x400;
2649 } else {
2650 ST0 = cos(fptemp);
2651 env->fpus &= (~0x400); /* C2 <-- 0 */
2652 /* the above code is for |arg5 < 2**63 only */
2653 }
2654 }
2655
2656 /* associated heplers to reduce generated code length and to simplify
2657 relocation (FP constants are usually stored in .rodata section) */
2658
2659 void OPPROTO op_f2xm1(void)
2660 {
2661 helper_f2xm1();
2662 }
2663
2664 void OPPROTO op_fyl2x(void)
2665 {
2666 helper_fyl2x();
2667 }
2668
2669 void OPPROTO op_fptan(void)
2670 {
2671 helper_fptan();
2672 }
2673
2674 void OPPROTO op_fpatan(void)
2675 {
2676 helper_fpatan();
2677 }
2678
2679 void OPPROTO op_fxtract(void)
2680 {
2681 helper_fxtract();
2682 }
2683
2684 void OPPROTO op_fprem1(void)
2685 {
2686 helper_fprem1();
2687 }
2688
2689
2690 void OPPROTO op_fprem(void)
2691 {
2692 helper_fprem();
2693 }
2694
2695 void OPPROTO op_fyl2xp1(void)
2696 {
2697 helper_fyl2xp1();
2698 }
2699
2700 void OPPROTO op_fsqrt(void)
2701 {
2702 helper_fsqrt();
2703 }
2704
2705 void OPPROTO op_fsincos(void)
2706 {
2707 helper_fsincos();
2708 }
2709
2710 void OPPROTO op_frndint(void)
2711 {
2712 helper_frndint();
2713 }
2714
2715 void OPPROTO op_fscale(void)
2716 {
2717 helper_fscale();
2718 }
2719
2720 void OPPROTO op_fsin(void)
2721 {
2722 helper_fsin();
2723 }
2724
2725 void OPPROTO op_fcos(void)
2726 {
2727 helper_fcos();
2728 }
2729
2730 void OPPROTO op_fnstsw_A0(void)
2731 {
2732 int fpus;
2733 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2734 stw((void *)A0, fpus);
2735 }
2736
2737 void OPPROTO op_fnstsw_EAX(void)
2738 {
2739 int fpus;
2740 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2741 EAX = (EAX & 0xffff0000) | fpus;
2742 }
2743
2744 void OPPROTO op_fnstcw_A0(void)
2745 {
2746 stw((void *)A0, env->fpuc);
2747 }
2748
2749 void OPPROTO op_fldcw_A0(void)
2750 {
2751 int rnd_type;
2752 env->fpuc = lduw((void *)A0);
2753 /* set rounding mode */
2754 switch(env->fpuc & RC_MASK) {
2755 default:
2756 case RC_NEAR:
2757 rnd_type = FE_TONEAREST;
2758 break;
2759 case RC_DOWN:
2760 rnd_type = FE_DOWNWARD;
2761 break;
2762 case RC_UP:
2763 rnd_type = FE_UPWARD;
2764 break;
2765 case RC_CHOP:
2766 rnd_type = FE_TOWARDZERO;
2767 break;
2768 }
2769 fesetround(rnd_type);
2770 }
2771
2772 void OPPROTO op_fclex(void)
2773 {
2774 env->fpus &= 0x7f00;
2775 }
2776
2777 void OPPROTO op_fninit(void)
2778 {
2779 env->fpus = 0;
2780 env->fpstt = 0;
2781 env->fpuc = 0x37f;
2782 env->fptags[0] = 1;
2783 env->fptags[1] = 1;
2784 env->fptags[2] = 1;
2785 env->fptags[3] = 1;
2786 env->fptags[4] = 1;
2787 env->fptags[5] = 1;
2788 env->fptags[6] = 1;
2789 env->fptags[7] = 1;
2790 }
2791
2792 void helper_fstenv(uint8_t *ptr, int data32)
2793 {
2794 int fpus, fptag, exp, i;
2795 uint64_t mant;
2796 CPU86_LDoubleU tmp;
2797
2798 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2799 fptag = 0;
2800 for (i=7; i>=0; i--) {
2801 fptag <<= 2;
2802 if (env->fptags[i]) {
2803 fptag |= 3;
2804 } else {
2805 tmp.d = env->fpregs[i];
2806 exp = EXPD(tmp);
2807 mant = MANTD(tmp);
2808 if (exp == 0 && mant == 0) {
2809 /* zero */
2810 fptag |= 1;
2811 } else if (exp == 0 || exp == MAXEXPD
2812 #ifdef USE_X86LDOUBLE
2813 || (mant & (1LL << 63)) == 0
2814 #endif
2815 ) {
2816 /* NaNs, infinity, denormal */
2817 fptag |= 2;
2818 }
2819 }
2820 }
2821 if (data32) {
2822 /* 32 bit */
2823 stl(ptr, env->fpuc);
2824 stl(ptr + 4, fpus);
2825 stl(ptr + 8, fptag);
2826 stl(ptr + 12, 0);
2827 stl(ptr + 16, 0);
2828 stl(ptr + 20, 0);
2829 stl(ptr + 24, 0);
2830 } else {
2831 /* 16 bit */
2832 stw(ptr, env->fpuc);
2833 stw(ptr + 2, fpus);
2834 stw(ptr + 4, fptag);
2835 stw(ptr + 6, 0);
2836 stw(ptr + 8, 0);
2837 stw(ptr + 10, 0);
2838 stw(ptr + 12, 0);
2839 }
2840 }
2841
2842 void helper_fldenv(uint8_t *ptr, int data32)
2843 {
2844 int i, fpus, fptag;
2845
2846 if (data32) {
2847 env->fpuc = lduw(ptr);
2848 fpus = lduw(ptr + 4);
2849 fptag = lduw(ptr + 8);
2850 }
2851 else {
2852 env->fpuc = lduw(ptr);
2853 fpus = lduw(ptr + 2);
2854 fptag = lduw(ptr + 4);
2855 }
2856 env->fpstt = (fpus >> 11) & 7;
2857 env->fpus = fpus & ~0x3800;
2858 for(i = 0;i < 7; i++) {
2859 env->fptags[i] = ((fptag & 3) == 3);
2860 fptag >>= 2;
2861 }
2862 }
2863
2864 void helper_fsave(uint8_t *ptr, int data32)
2865 {
2866 CPU86_LDouble tmp;
2867 int i;
2868
2869 helper_fstenv(ptr, data32);
2870
2871 ptr += (14 << data32);
2872 for(i = 0;i < 8; i++) {
2873 tmp = ST(i);
2874 #ifdef USE_X86LDOUBLE
2875 *(long double *)ptr = tmp;
2876 #else
2877 helper_fstt(tmp, ptr);
2878 #endif
2879 ptr += 10;
2880 }
2881
2882 /* fninit */
2883 env->fpus = 0;
2884 env->fpstt = 0;
2885 env->fpuc = 0x37f;
2886 env->fptags[0] = 1;
2887 env->fptags[1] = 1;
2888 env->fptags[2] = 1;
2889 env->fptags[3] = 1;
2890 env->fptags[4] = 1;
2891 env->fptags[5] = 1;
2892 env->fptags[6] = 1;
2893 env->fptags[7] = 1;
2894 }
2895
2896 void helper_frstor(uint8_t *ptr, int data32)
2897 {
2898 CPU86_LDouble tmp;
2899 int i;
2900
2901 helper_fldenv(ptr, data32);
2902 ptr += (14 << data32);
2903
2904 for(i = 0;i < 8; i++) {
2905 #ifdef USE_X86LDOUBLE
2906 tmp = *(long double *)ptr;
2907 #else
2908 tmp = helper_fldt(ptr);
2909 #endif
2910 ST(i) = tmp;
2911 ptr += 10;
2912 }
2913 }
2914
2915 void OPPROTO op_fnstenv_A0(void)
2916 {
2917 helper_fstenv((uint8_t *)A0, PARAM1);
2918 }
2919
2920 void OPPROTO op_fldenv_A0(void)
2921 {
2922 helper_fldenv((uint8_t *)A0, PARAM1);
2923 }
2924
2925 void OPPROTO op_fnsave_A0(void)
2926 {
2927 helper_fsave((uint8_t *)A0, PARAM1);
2928 }
2929
2930 void OPPROTO op_frstor_A0(void)
2931 {
2932 helper_frstor((uint8_t *)A0, PARAM1);
2933 }
2934
2935 /* threading support */
2936 void OPPROTO op_lock(void)
2937 {
2938 cpu_lock();
2939 }
2940
2941 void OPPROTO op_unlock(void)
2942 {
2943 cpu_unlock();
2944 }