4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define CPU_NO_GLOBAL_REGS
22 #include "host-utils.h"
27 #define raise_exception_err(a, b)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
35 const uint8_t parity_table
[256] = {
36 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
41 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
71 const uint8_t rclw_table
[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
79 const uint8_t rclb_table
[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
86 const CPU86_LDouble f15rk
[7] =
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
97 /* broken thread support */
99 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
101 void helper_lock(void)
103 spin_lock(&global_cpu_lock
);
106 void helper_unlock(void)
108 spin_unlock(&global_cpu_lock
);
111 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
113 load_eflags(t0
, update_mask
);
116 target_ulong
helper_read_eflags(void)
119 eflags
= cc_table
[CC_OP
].compute_all();
120 eflags
|= (DF
& DF_MASK
);
121 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
125 /* return non zero if error */
126 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
137 index
= selector
& ~7;
138 if ((index
+ 7) > dt
->limit
)
140 ptr
= dt
->base
+ index
;
141 *e1_ptr
= ldl_kernel(ptr
);
142 *e2_ptr
= ldl_kernel(ptr
+ 4);
146 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
149 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
150 if (e2
& DESC_G_MASK
)
151 limit
= (limit
<< 12) | 0xfff;
155 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
157 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
160 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
162 sc
->base
= get_seg_base(e1
, e2
);
163 sc
->limit
= get_seg_limit(e1
, e2
);
167 /* init the segment cache in vm86 mode. */
168 static inline void load_seg_vm(int seg
, int selector
)
171 cpu_x86_load_seg_cache(env
, seg
, selector
,
172 (selector
<< 4), 0xffff, 0);
175 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
176 uint32_t *esp_ptr
, int dpl
)
178 int type
, index
, shift
;
183 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
184 for(i
=0;i
<env
->tr
.limit
;i
++) {
185 printf("%02x ", env
->tr
.base
[i
]);
186 if ((i
& 7) == 7) printf("\n");
192 if (!(env
->tr
.flags
& DESC_P_MASK
))
193 cpu_abort(env
, "invalid tss");
194 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
196 cpu_abort(env
, "invalid tss type");
198 index
= (dpl
* 4 + 2) << shift
;
199 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
200 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
202 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
203 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
205 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
206 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
210 /* XXX: merge with load_seg() */
211 static void tss_load_seg(int seg_reg
, int selector
)
216 if ((selector
& 0xfffc) != 0) {
217 if (load_segment(&e1
, &e2
, selector
) != 0)
218 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
219 if (!(e2
& DESC_S_MASK
))
220 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
222 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
223 cpl
= env
->hflags
& HF_CPL_MASK
;
224 if (seg_reg
== R_CS
) {
225 if (!(e2
& DESC_CS_MASK
))
226 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
227 /* XXX: is it correct ? */
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
232 } else if (seg_reg
== R_SS
) {
233 /* SS must be writable data */
234 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
235 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
236 if (dpl
!= cpl
|| dpl
!= rpl
)
237 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
239 /* not readable code */
240 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
241 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
244 if (dpl
< cpl
|| dpl
< rpl
)
245 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
248 if (!(e2
& DESC_P_MASK
))
249 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
250 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
251 get_seg_base(e1
, e2
),
252 get_seg_limit(e1
, e2
),
255 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
256 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
260 #define SWITCH_TSS_JMP 0
261 #define SWITCH_TSS_IRET 1
262 #define SWITCH_TSS_CALL 2
264 /* XXX: restore CPU state in registers (PowerPC case) */
265 static void switch_tss(int tss_selector
,
266 uint32_t e1
, uint32_t e2
, int source
,
269 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
270 target_ulong tss_base
;
271 uint32_t new_regs
[8], new_segs
[6];
272 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
273 uint32_t old_eflags
, eflags_mask
;
278 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
280 if (loglevel
& CPU_LOG_PCALL
)
281 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
284 /* if task gate, we read the TSS segment and we load it */
286 if (!(e2
& DESC_P_MASK
))
287 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
288 tss_selector
= e1
>> 16;
289 if (tss_selector
& 4)
290 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
291 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
292 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
293 if (e2
& DESC_S_MASK
)
294 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
295 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
297 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
300 if (!(e2
& DESC_P_MASK
))
301 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
307 tss_limit
= get_seg_limit(e1
, e2
);
308 tss_base
= get_seg_base(e1
, e2
);
309 if ((tss_selector
& 4) != 0 ||
310 tss_limit
< tss_limit_max
)
311 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
312 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
314 old_tss_limit_max
= 103;
316 old_tss_limit_max
= 43;
318 /* read all the registers from the new TSS */
321 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
322 new_eip
= ldl_kernel(tss_base
+ 0x20);
323 new_eflags
= ldl_kernel(tss_base
+ 0x24);
324 for(i
= 0; i
< 8; i
++)
325 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
326 for(i
= 0; i
< 6; i
++)
327 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
328 new_ldt
= lduw_kernel(tss_base
+ 0x60);
329 new_trap
= ldl_kernel(tss_base
+ 0x64);
333 new_eip
= lduw_kernel(tss_base
+ 0x0e);
334 new_eflags
= lduw_kernel(tss_base
+ 0x10);
335 for(i
= 0; i
< 8; i
++)
336 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
337 for(i
= 0; i
< 4; i
++)
338 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
339 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
350 v1
= ldub_kernel(env
->tr
.base
);
351 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
352 stb_kernel(env
->tr
.base
, v1
);
353 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
355 /* clear busy bit (it is restartable) */
356 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
359 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
360 e2
= ldl_kernel(ptr
+ 4);
361 e2
&= ~DESC_TSS_BUSY_MASK
;
362 stl_kernel(ptr
+ 4, e2
);
364 old_eflags
= compute_eflags();
365 if (source
== SWITCH_TSS_IRET
)
366 old_eflags
&= ~NT_MASK
;
368 /* save the current state in the old TSS */
371 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
372 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
373 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
374 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
375 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
376 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
377 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
378 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
379 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
380 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
381 for(i
= 0; i
< 6; i
++)
382 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
385 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
386 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
387 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
388 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
389 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
390 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
391 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
392 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
393 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
394 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
395 for(i
= 0; i
< 4; i
++)
396 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
399 /* now if an exception occurs, it will occurs in the next task
402 if (source
== SWITCH_TSS_CALL
) {
403 stw_kernel(tss_base
, env
->tr
.selector
);
404 new_eflags
|= NT_MASK
;
408 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
411 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
412 e2
= ldl_kernel(ptr
+ 4);
413 e2
|= DESC_TSS_BUSY_MASK
;
414 stl_kernel(ptr
+ 4, e2
);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env
->cr
[0] |= CR0_TS_MASK
;
420 env
->hflags
|= HF_TS_MASK
;
421 env
->tr
.selector
= tss_selector
;
422 env
->tr
.base
= tss_base
;
423 env
->tr
.limit
= tss_limit
;
424 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
426 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
427 cpu_x86_update_cr3(env
, new_cr3
);
430 /* load all registers without an exception, then reload them with
431 possible exception */
433 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
434 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
436 eflags_mask
&= 0xffff;
437 load_eflags(new_eflags
, eflags_mask
);
438 /* XXX: what to do in 16 bit case ? */
447 if (new_eflags
& VM_MASK
) {
448 for(i
= 0; i
< 6; i
++)
449 load_seg_vm(i
, new_segs
[i
]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env
, 3);
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i
= 0; i
< 6; i
++)
457 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
460 env
->ldt
.selector
= new_ldt
& ~4;
467 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
469 if ((new_ldt
& 0xfffc) != 0) {
471 index
= new_ldt
& ~7;
472 if ((index
+ 7) > dt
->limit
)
473 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
474 ptr
= dt
->base
+ index
;
475 e1
= ldl_kernel(ptr
);
476 e2
= ldl_kernel(ptr
+ 4);
477 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
479 if (!(e2
& DESC_P_MASK
))
480 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
481 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
484 /* load the segments */
485 if (!(new_eflags
& VM_MASK
)) {
486 tss_load_seg(R_CS
, new_segs
[R_CS
]);
487 tss_load_seg(R_SS
, new_segs
[R_SS
]);
488 tss_load_seg(R_ES
, new_segs
[R_ES
]);
489 tss_load_seg(R_DS
, new_segs
[R_DS
]);
490 tss_load_seg(R_FS
, new_segs
[R_FS
]);
491 tss_load_seg(R_GS
, new_segs
[R_GS
]);
494 /* check that EIP is in the CS segment limits */
495 if (new_eip
> env
->segs
[R_CS
].limit
) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF
, 0);
501 /* check if Port I/O is allowed in TSS */
502 static inline void check_io(int addr
, int size
)
504 int io_offset
, val
, mask
;
506 /* TSS must be a valid 32 bit one */
507 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
508 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
511 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
512 io_offset
+= (addr
>> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset
+ 1) > env
->tr
.limit
)
516 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
518 mask
= (1 << size
) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val
& mask
) != 0) {
522 raise_exception_err(EXCP0D_GPF
, 0);
526 void helper_check_iob(uint32_t t0
)
531 void helper_check_iow(uint32_t t0
)
536 void helper_check_iol(uint32_t t0
)
541 void helper_outb(uint32_t port
, uint32_t data
)
543 cpu_outb(env
, port
, data
& 0xff);
546 target_ulong
helper_inb(uint32_t port
)
548 return cpu_inb(env
, port
);
551 void helper_outw(uint32_t port
, uint32_t data
)
553 cpu_outw(env
, port
, data
& 0xffff);
556 target_ulong
helper_inw(uint32_t port
)
558 return cpu_inw(env
, port
);
561 void helper_outl(uint32_t port
, uint32_t data
)
563 cpu_outl(env
, port
, data
);
566 target_ulong
helper_inl(uint32_t port
)
568 return cpu_inl(env
, port
);
571 static inline unsigned int get_sp_mask(unsigned int e2
)
573 if (e2
& DESC_B_MASK
)
580 #define SET_ESP(val, sp_mask)\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
590 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
593 /* XXX: add a is_user flag to have proper security support */
594 #define PUSHW(ssp, sp, sp_mask, val)\
597 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
600 #define PUSHL(ssp, sp, sp_mask, val)\
603 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
606 #define POPW(ssp, sp, sp_mask, val)\
608 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
612 #define POPL(ssp, sp, sp_mask, val)\
614 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
618 /* protected mode interrupt */
619 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
620 unsigned int next_eip
, int is_hw
)
623 target_ulong ptr
, ssp
;
624 int type
, dpl
, selector
, ss_dpl
, cpl
;
625 int has_error_code
, new_stack
, shift
;
626 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
627 uint32_t old_eip
, sp_mask
;
628 int svm_should_check
= 1;
630 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
632 svm_should_check
= 0;
636 && (INTERCEPTEDl(_exceptions
, 1 << intno
)
638 raise_interrupt(intno
, is_int
, error_code
, 0);
641 if (!is_int
&& !is_hw
) {
660 if (intno
* 8 + 7 > dt
->limit
)
661 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
662 ptr
= dt
->base
+ intno
* 8;
663 e1
= ldl_kernel(ptr
);
664 e2
= ldl_kernel(ptr
+ 4);
665 /* check gate type */
666 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
668 case 5: /* task gate */
669 /* must do that check here to return the correct error code */
670 if (!(e2
& DESC_P_MASK
))
671 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
672 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
673 if (has_error_code
) {
676 /* push the error code */
677 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
679 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
683 esp
= (ESP
- (2 << shift
)) & mask
;
684 ssp
= env
->segs
[R_SS
].base
+ esp
;
686 stl_kernel(ssp
, error_code
);
688 stw_kernel(ssp
, error_code
);
692 case 6: /* 286 interrupt gate */
693 case 7: /* 286 trap gate */
694 case 14: /* 386 interrupt gate */
695 case 15: /* 386 trap gate */
698 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
701 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
702 cpl
= env
->hflags
& HF_CPL_MASK
;
703 /* check privledge if software int */
704 if (is_int
&& dpl
< cpl
)
705 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
706 /* check valid bit */
707 if (!(e2
& DESC_P_MASK
))
708 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
710 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
711 if ((selector
& 0xfffc) == 0)
712 raise_exception_err(EXCP0D_GPF
, 0);
714 if (load_segment(&e1
, &e2
, selector
) != 0)
715 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
716 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
717 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
718 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
720 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
721 if (!(e2
& DESC_P_MASK
))
722 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
723 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
724 /* to inner privilege */
725 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
726 if ((ss
& 0xfffc) == 0)
727 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
729 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
730 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
731 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
732 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
734 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
735 if (!(ss_e2
& DESC_S_MASK
) ||
736 (ss_e2
& DESC_CS_MASK
) ||
737 !(ss_e2
& DESC_W_MASK
))
738 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
739 if (!(ss_e2
& DESC_P_MASK
))
740 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
742 sp_mask
= get_sp_mask(ss_e2
);
743 ssp
= get_seg_base(ss_e1
, ss_e2
);
744 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
745 /* to same privilege */
746 if (env
->eflags
& VM_MASK
)
747 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
749 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
750 ssp
= env
->segs
[R_SS
].base
;
754 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
755 new_stack
= 0; /* avoid warning */
756 sp_mask
= 0; /* avoid warning */
757 ssp
= 0; /* avoid warning */
758 esp
= 0; /* avoid warning */
764 /* XXX: check that enough room is available */
765 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
766 if (env
->eflags
& VM_MASK
)
772 if (env
->eflags
& VM_MASK
) {
773 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
774 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
775 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
776 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
778 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
779 PUSHL(ssp
, esp
, sp_mask
, ESP
);
781 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
782 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
783 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
784 if (has_error_code
) {
785 PUSHL(ssp
, esp
, sp_mask
, error_code
);
789 if (env
->eflags
& VM_MASK
) {
790 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
791 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
792 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
793 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
795 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
796 PUSHW(ssp
, esp
, sp_mask
, ESP
);
798 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
799 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
800 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
801 if (has_error_code
) {
802 PUSHW(ssp
, esp
, sp_mask
, error_code
);
807 if (env
->eflags
& VM_MASK
) {
808 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
809 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
810 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
811 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
813 ss
= (ss
& ~3) | dpl
;
814 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
815 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
817 SET_ESP(esp
, sp_mask
);
819 selector
= (selector
& ~3) | dpl
;
820 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
821 get_seg_base(e1
, e2
),
822 get_seg_limit(e1
, e2
),
824 cpu_x86_set_cpl(env
, dpl
);
827 /* interrupt gate clear IF mask */
828 if ((type
& 1) == 0) {
829 env
->eflags
&= ~IF_MASK
;
831 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
836 #define PUSHQ(sp, val)\
839 stq_kernel(sp, (val));\
842 #define POPQ(sp, val)\
844 val = ldq_kernel(sp);\
848 static inline target_ulong
get_rsp_from_tss(int level
)
853 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
854 env
->tr
.base
, env
->tr
.limit
);
857 if (!(env
->tr
.flags
& DESC_P_MASK
))
858 cpu_abort(env
, "invalid tss");
859 index
= 8 * level
+ 4;
860 if ((index
+ 7) > env
->tr
.limit
)
861 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
862 return ldq_kernel(env
->tr
.base
+ index
);
865 /* 64 bit interrupt */
866 static void do_interrupt64(int intno
, int is_int
, int error_code
,
867 target_ulong next_eip
, int is_hw
)
871 int type
, dpl
, selector
, cpl
, ist
;
872 int has_error_code
, new_stack
;
873 uint32_t e1
, e2
, e3
, ss
;
874 target_ulong old_eip
, esp
, offset
;
875 int svm_should_check
= 1;
877 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
879 svm_should_check
= 0;
882 && INTERCEPTEDl(_exceptions
, 1 << intno
)
884 raise_interrupt(intno
, is_int
, error_code
, 0);
887 if (!is_int
&& !is_hw
) {
906 if (intno
* 16 + 15 > dt
->limit
)
907 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
908 ptr
= dt
->base
+ intno
* 16;
909 e1
= ldl_kernel(ptr
);
910 e2
= ldl_kernel(ptr
+ 4);
911 e3
= ldl_kernel(ptr
+ 8);
912 /* check gate type */
913 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
915 case 14: /* 386 interrupt gate */
916 case 15: /* 386 trap gate */
919 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
922 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
923 cpl
= env
->hflags
& HF_CPL_MASK
;
924 /* check privledge if software int */
925 if (is_int
&& dpl
< cpl
)
926 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
927 /* check valid bit */
928 if (!(e2
& DESC_P_MASK
))
929 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
931 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
933 if ((selector
& 0xfffc) == 0)
934 raise_exception_err(EXCP0D_GPF
, 0);
936 if (load_segment(&e1
, &e2
, selector
) != 0)
937 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
938 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
939 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
940 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
942 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
943 if (!(e2
& DESC_P_MASK
))
944 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
945 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
946 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
947 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
948 /* to inner privilege */
950 esp
= get_rsp_from_tss(ist
+ 3);
952 esp
= get_rsp_from_tss(dpl
);
953 esp
&= ~0xfLL
; /* align stack */
956 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
957 /* to same privilege */
958 if (env
->eflags
& VM_MASK
)
959 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
962 esp
= get_rsp_from_tss(ist
+ 3);
965 esp
&= ~0xfLL
; /* align stack */
968 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
969 new_stack
= 0; /* avoid warning */
970 esp
= 0; /* avoid warning */
973 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
975 PUSHQ(esp
, compute_eflags());
976 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
978 if (has_error_code
) {
979 PUSHQ(esp
, error_code
);
984 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
988 selector
= (selector
& ~3) | dpl
;
989 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
990 get_seg_base(e1
, e2
),
991 get_seg_limit(e1
, e2
),
993 cpu_x86_set_cpl(env
, dpl
);
996 /* interrupt gate clear IF mask */
997 if ((type
& 1) == 0) {
998 env
->eflags
&= ~IF_MASK
;
1000 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1004 #if defined(CONFIG_USER_ONLY)
1005 void helper_syscall(int next_eip_addend
)
1007 env
->exception_index
= EXCP_SYSCALL
;
1008 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1012 void helper_syscall(int next_eip_addend
)
1016 if (!(env
->efer
& MSR_EFER_SCE
)) {
1017 raise_exception_err(EXCP06_ILLOP
, 0);
1019 selector
= (env
->star
>> 32) & 0xffff;
1020 #ifdef TARGET_X86_64
1021 if (env
->hflags
& HF_LMA_MASK
) {
1024 ECX
= env
->eip
+ next_eip_addend
;
1025 env
->regs
[11] = compute_eflags();
1027 code64
= env
->hflags
& HF_CS64_MASK
;
1029 cpu_x86_set_cpl(env
, 0);
1030 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1032 DESC_G_MASK
| DESC_P_MASK
|
1034 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1035 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1037 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1039 DESC_W_MASK
| DESC_A_MASK
);
1040 env
->eflags
&= ~env
->fmask
;
1041 load_eflags(env
->eflags
, 0);
1043 env
->eip
= env
->lstar
;
1045 env
->eip
= env
->cstar
;
1049 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1051 cpu_x86_set_cpl(env
, 0);
1052 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1054 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1056 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1057 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1059 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1061 DESC_W_MASK
| DESC_A_MASK
);
1062 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1063 env
->eip
= (uint32_t)env
->star
;
1068 void helper_sysret(int dflag
)
1072 if (!(env
->efer
& MSR_EFER_SCE
)) {
1073 raise_exception_err(EXCP06_ILLOP
, 0);
1075 cpl
= env
->hflags
& HF_CPL_MASK
;
1076 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1077 raise_exception_err(EXCP0D_GPF
, 0);
1079 selector
= (env
->star
>> 48) & 0xffff;
1080 #ifdef TARGET_X86_64
1081 if (env
->hflags
& HF_LMA_MASK
) {
1083 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1085 DESC_G_MASK
| DESC_P_MASK
|
1086 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1087 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1091 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1093 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1094 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1095 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1096 env
->eip
= (uint32_t)ECX
;
1098 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1100 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1101 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1102 DESC_W_MASK
| DESC_A_MASK
);
1103 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1104 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1105 cpu_x86_set_cpl(env
, 3);
1109 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1111 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1112 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1113 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1114 env
->eip
= (uint32_t)ECX
;
1115 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1117 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1118 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1119 DESC_W_MASK
| DESC_A_MASK
);
1120 env
->eflags
|= IF_MASK
;
1121 cpu_x86_set_cpl(env
, 3);
1124 if (kqemu_is_ok(env
)) {
1125 if (env
->hflags
& HF_LMA_MASK
)
1126 CC_OP
= CC_OP_EFLAGS
;
1127 env
->exception_index
= -1;
1133 /* real mode interrupt */
1134 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1135 unsigned int next_eip
)
1138 target_ulong ptr
, ssp
;
1140 uint32_t offset
, esp
;
1141 uint32_t old_cs
, old_eip
;
1142 int svm_should_check
= 1;
1144 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
1146 svm_should_check
= 0;
1148 if (svm_should_check
1149 && INTERCEPTEDl(_exceptions
, 1 << intno
)
1151 raise_interrupt(intno
, is_int
, error_code
, 0);
1153 /* real mode (simpler !) */
1155 if (intno
* 4 + 3 > dt
->limit
)
1156 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1157 ptr
= dt
->base
+ intno
* 4;
1158 offset
= lduw_kernel(ptr
);
1159 selector
= lduw_kernel(ptr
+ 2);
1161 ssp
= env
->segs
[R_SS
].base
;
1166 old_cs
= env
->segs
[R_CS
].selector
;
1167 /* XXX: use SS segment size ? */
1168 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1169 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1170 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1172 /* update processor state */
1173 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1175 env
->segs
[R_CS
].selector
= selector
;
1176 env
->segs
[R_CS
].base
= (selector
<< 4);
1177 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1180 /* fake user mode interrupt */
1181 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1182 target_ulong next_eip
)
1186 int dpl
, cpl
, shift
;
1190 if (env
->hflags
& HF_LMA_MASK
) {
1195 ptr
= dt
->base
+ (intno
<< shift
);
1196 e2
= ldl_kernel(ptr
+ 4);
1198 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1199 cpl
= env
->hflags
& HF_CPL_MASK
;
1200 /* check privledge if software int */
1201 if (is_int
&& dpl
< cpl
)
1202 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1204 /* Since we emulate only user space, we cannot do more than
1205 exiting the emulation with the suitable exception and error
1212 * Begin execution of an interruption. is_int is TRUE if coming from
1213 * the int instruction. next_eip is the EIP value AFTER the interrupt
1214 * instruction. It is only relevant if is_int is TRUE.
1216 void do_interrupt(int intno
, int is_int
, int error_code
,
1217 target_ulong next_eip
, int is_hw
)
1219 if (loglevel
& CPU_LOG_INT
) {
1220 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1222 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1223 count
, intno
, error_code
, is_int
,
1224 env
->hflags
& HF_CPL_MASK
,
1225 env
->segs
[R_CS
].selector
, EIP
,
1226 (int)env
->segs
[R_CS
].base
+ EIP
,
1227 env
->segs
[R_SS
].selector
, ESP
);
1228 if (intno
== 0x0e) {
1229 fprintf(logfile
, " CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1231 fprintf(logfile
, " EAX=" TARGET_FMT_lx
, EAX
);
1233 fprintf(logfile
, "\n");
1234 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1239 fprintf(logfile
, " code=");
1240 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1241 for(i
= 0; i
< 16; i
++) {
1242 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
1244 fprintf(logfile
, "\n");
1250 if (env
->cr
[0] & CR0_PE_MASK
) {
1252 if (env
->hflags
& HF_LMA_MASK
) {
1253 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1257 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1260 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1265 * Check nested exceptions and change to double or triple fault if
1266 * needed. It should only be called, if this is not an interrupt.
1267 * Returns the new exception number.
1269 static int check_exception(int intno
, int *error_code
)
1271 int first_contributory
= env
->old_exception
== 0 ||
1272 (env
->old_exception
>= 10 &&
1273 env
->old_exception
<= 13);
1274 int second_contributory
= intno
== 0 ||
1275 (intno
>= 10 && intno
<= 13);
1277 if (loglevel
& CPU_LOG_INT
)
1278 fprintf(logfile
, "check_exception old: 0x%x new 0x%x\n",
1279 env
->old_exception
, intno
);
1281 if (env
->old_exception
== EXCP08_DBLE
)
1282 cpu_abort(env
, "triple fault");
1284 if ((first_contributory
&& second_contributory
)
1285 || (env
->old_exception
== EXCP0E_PAGE
&&
1286 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1287 intno
= EXCP08_DBLE
;
1291 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1292 (intno
== EXCP08_DBLE
))
1293 env
->old_exception
= intno
;
1299 * Signal an interruption. It is executed in the main CPU loop.
1300 * is_int is TRUE if coming from the int instruction. next_eip is the
1301 * EIP value AFTER the interrupt instruction. It is only relevant if
1304 void raise_interrupt(int intno
, int is_int
, int error_code
,
1305 int next_eip_addend
)
1308 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1309 intno
= check_exception(intno
, &error_code
);
1312 env
->exception_index
= intno
;
1313 env
->error_code
= error_code
;
1314 env
->exception_is_int
= is_int
;
1315 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1319 /* same as raise_exception_err, but do not restore global registers */
1320 static void raise_exception_err_norestore(int exception_index
, int error_code
)
1322 exception_index
= check_exception(exception_index
, &error_code
);
1324 env
->exception_index
= exception_index
;
1325 env
->error_code
= error_code
;
1326 env
->exception_is_int
= 0;
1327 env
->exception_next_eip
= 0;
1328 longjmp(env
->jmp_env
, 1);
1331 /* shortcuts to generate exceptions */
1333 void (raise_exception_err
)(int exception_index
, int error_code
)
1335 raise_interrupt(exception_index
, 0, error_code
, 0);
1338 void raise_exception(int exception_index
)
1340 raise_interrupt(exception_index
, 0, 0, 0);
1345 #if defined(CONFIG_USER_ONLY)
1347 void do_smm_enter(void)
1351 void helper_rsm(void)
1357 #ifdef TARGET_X86_64
1358 #define SMM_REVISION_ID 0x00020064
1360 #define SMM_REVISION_ID 0x00020000
1363 void do_smm_enter(void)
1365 target_ulong sm_state
;
1369 if (loglevel
& CPU_LOG_INT
) {
1370 fprintf(logfile
, "SMM: enter\n");
1371 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1374 env
->hflags
|= HF_SMM_MASK
;
1375 cpu_smm_update(env
);
1377 sm_state
= env
->smbase
+ 0x8000;
1379 #ifdef TARGET_X86_64
1380 for(i
= 0; i
< 6; i
++) {
1382 offset
= 0x7e00 + i
* 16;
1383 stw_phys(sm_state
+ offset
, dt
->selector
);
1384 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1385 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1386 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1389 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1390 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1392 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1393 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1394 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1395 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1397 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1398 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1400 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1401 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1402 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1403 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1405 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1407 stq_phys(sm_state
+ 0x7ff8, EAX
);
1408 stq_phys(sm_state
+ 0x7ff0, ECX
);
1409 stq_phys(sm_state
+ 0x7fe8, EDX
);
1410 stq_phys(sm_state
+ 0x7fe0, EBX
);
1411 stq_phys(sm_state
+ 0x7fd8, ESP
);
1412 stq_phys(sm_state
+ 0x7fd0, EBP
);
1413 stq_phys(sm_state
+ 0x7fc8, ESI
);
1414 stq_phys(sm_state
+ 0x7fc0, EDI
);
1415 for(i
= 8; i
< 16; i
++)
1416 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1417 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1418 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1419 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1420 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1422 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1423 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1424 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1426 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1427 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1429 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1430 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1431 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1432 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1433 stl_phys(sm_state
+ 0x7fec, EDI
);
1434 stl_phys(sm_state
+ 0x7fe8, ESI
);
1435 stl_phys(sm_state
+ 0x7fe4, EBP
);
1436 stl_phys(sm_state
+ 0x7fe0, ESP
);
1437 stl_phys(sm_state
+ 0x7fdc, EBX
);
1438 stl_phys(sm_state
+ 0x7fd8, EDX
);
1439 stl_phys(sm_state
+ 0x7fd4, ECX
);
1440 stl_phys(sm_state
+ 0x7fd0, EAX
);
1441 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1442 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1444 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1445 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1446 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1447 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1449 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1450 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1451 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1452 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1454 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1455 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1457 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1458 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1460 for(i
= 0; i
< 6; i
++) {
1463 offset
= 0x7f84 + i
* 12;
1465 offset
= 0x7f2c + (i
- 3) * 12;
1466 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1467 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1468 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1469 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1471 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1473 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1474 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1476 /* init SMM cpu state */
1478 #ifdef TARGET_X86_64
1480 env
->hflags
&= ~HF_LMA_MASK
;
1482 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1483 env
->eip
= 0x00008000;
1484 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1486 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1487 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1488 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1489 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1490 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1492 cpu_x86_update_cr0(env
,
1493 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1494 cpu_x86_update_cr4(env
, 0);
1495 env
->dr
[7] = 0x00000400;
1496 CC_OP
= CC_OP_EFLAGS
;
1499 void helper_rsm(void)
1501 target_ulong sm_state
;
1505 sm_state
= env
->smbase
+ 0x8000;
1506 #ifdef TARGET_X86_64
1507 env
->efer
= ldq_phys(sm_state
+ 0x7ed0);
1508 if (env
->efer
& MSR_EFER_LMA
)
1509 env
->hflags
|= HF_LMA_MASK
;
1511 env
->hflags
&= ~HF_LMA_MASK
;
1513 for(i
= 0; i
< 6; i
++) {
1514 offset
= 0x7e00 + i
* 16;
1515 cpu_x86_load_seg_cache(env
, i
,
1516 lduw_phys(sm_state
+ offset
),
1517 ldq_phys(sm_state
+ offset
+ 8),
1518 ldl_phys(sm_state
+ offset
+ 4),
1519 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1522 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1523 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1525 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1526 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1527 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1528 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1530 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1531 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1533 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1534 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1535 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1536 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1538 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1539 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1540 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1541 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1542 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1543 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1544 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1545 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1546 for(i
= 8; i
< 16; i
++)
1547 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1548 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1549 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1550 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1551 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1552 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1554 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1555 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1556 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1558 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1559 if (val
& 0x20000) {
1560 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1563 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1564 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1565 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1566 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1567 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1568 EDI
= ldl_phys(sm_state
+ 0x7fec);
1569 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1570 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1571 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1572 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1573 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1574 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1575 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1576 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1577 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1579 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1580 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1581 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1582 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1584 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1585 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1586 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1587 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1589 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1590 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1592 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1593 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1595 for(i
= 0; i
< 6; i
++) {
1597 offset
= 0x7f84 + i
* 12;
1599 offset
= 0x7f2c + (i
- 3) * 12;
1600 cpu_x86_load_seg_cache(env
, i
,
1601 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1602 ldl_phys(sm_state
+ offset
+ 8),
1603 ldl_phys(sm_state
+ offset
+ 4),
1604 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1606 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1608 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1609 if (val
& 0x20000) {
1610 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1613 CC_OP
= CC_OP_EFLAGS
;
1614 env
->hflags
&= ~HF_SMM_MASK
;
1615 cpu_smm_update(env
);
1617 if (loglevel
& CPU_LOG_INT
) {
1618 fprintf(logfile
, "SMM: after RSM\n");
1619 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1623 #endif /* !CONFIG_USER_ONLY */
1626 /* division, flags are undefined */
1628 void helper_divb_AL(target_ulong t0
)
1630 unsigned int num
, den
, q
, r
;
1632 num
= (EAX
& 0xffff);
1635 raise_exception(EXCP00_DIVZ
);
1639 raise_exception(EXCP00_DIVZ
);
1641 r
= (num
% den
) & 0xff;
1642 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1645 void helper_idivb_AL(target_ulong t0
)
1652 raise_exception(EXCP00_DIVZ
);
1656 raise_exception(EXCP00_DIVZ
);
1658 r
= (num
% den
) & 0xff;
1659 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1662 void helper_divw_AX(target_ulong t0
)
1664 unsigned int num
, den
, q
, r
;
1666 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1667 den
= (t0
& 0xffff);
1669 raise_exception(EXCP00_DIVZ
);
1673 raise_exception(EXCP00_DIVZ
);
1675 r
= (num
% den
) & 0xffff;
1676 EAX
= (EAX
& ~0xffff) | q
;
1677 EDX
= (EDX
& ~0xffff) | r
;
1680 void helper_idivw_AX(target_ulong t0
)
1684 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1687 raise_exception(EXCP00_DIVZ
);
1690 if (q
!= (int16_t)q
)
1691 raise_exception(EXCP00_DIVZ
);
1693 r
= (num
% den
) & 0xffff;
1694 EAX
= (EAX
& ~0xffff) | q
;
1695 EDX
= (EDX
& ~0xffff) | r
;
1698 void helper_divl_EAX(target_ulong t0
)
1700 unsigned int den
, r
;
1703 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1706 raise_exception(EXCP00_DIVZ
);
1711 raise_exception(EXCP00_DIVZ
);
1716 void helper_idivl_EAX(target_ulong t0
)
1721 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1724 raise_exception(EXCP00_DIVZ
);
1728 if (q
!= (int32_t)q
)
1729 raise_exception(EXCP00_DIVZ
);
1736 /* XXX: exception */
1737 void helper_aam(int base
)
1743 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1747 void helper_aad(int base
)
1751 ah
= (EAX
>> 8) & 0xff;
1752 al
= ((ah
* base
) + al
) & 0xff;
1753 EAX
= (EAX
& ~0xffff) | al
;
1757 void helper_aaa(void)
1763 eflags
= cc_table
[CC_OP
].compute_all();
1766 ah
= (EAX
>> 8) & 0xff;
1768 icarry
= (al
> 0xf9);
1769 if (((al
& 0x0f) > 9 ) || af
) {
1770 al
= (al
+ 6) & 0x0f;
1771 ah
= (ah
+ 1 + icarry
) & 0xff;
1772 eflags
|= CC_C
| CC_A
;
1774 eflags
&= ~(CC_C
| CC_A
);
1777 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1782 void helper_aas(void)
1788 eflags
= cc_table
[CC_OP
].compute_all();
1791 ah
= (EAX
>> 8) & 0xff;
1794 if (((al
& 0x0f) > 9 ) || af
) {
1795 al
= (al
- 6) & 0x0f;
1796 ah
= (ah
- 1 - icarry
) & 0xff;
1797 eflags
|= CC_C
| CC_A
;
1799 eflags
&= ~(CC_C
| CC_A
);
1802 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1807 void helper_daa(void)
1812 eflags
= cc_table
[CC_OP
].compute_all();
1818 if (((al
& 0x0f) > 9 ) || af
) {
1819 al
= (al
+ 6) & 0xff;
1822 if ((al
> 0x9f) || cf
) {
1823 al
= (al
+ 0x60) & 0xff;
1826 EAX
= (EAX
& ~0xff) | al
;
1827 /* well, speed is not an issue here, so we compute the flags by hand */
1828 eflags
|= (al
== 0) << 6; /* zf */
1829 eflags
|= parity_table
[al
]; /* pf */
1830 eflags
|= (al
& 0x80); /* sf */
1835 void helper_das(void)
1837 int al
, al1
, af
, cf
;
1840 eflags
= cc_table
[CC_OP
].compute_all();
1847 if (((al
& 0x0f) > 9 ) || af
) {
1851 al
= (al
- 6) & 0xff;
1853 if ((al1
> 0x99) || cf
) {
1854 al
= (al
- 0x60) & 0xff;
1857 EAX
= (EAX
& ~0xff) | al
;
1858 /* well, speed is not an issue here, so we compute the flags by hand */
1859 eflags
|= (al
== 0) << 6; /* zf */
1860 eflags
|= parity_table
[al
]; /* pf */
1861 eflags
|= (al
& 0x80); /* sf */
1866 void helper_into(int next_eip_addend
)
1869 eflags
= cc_table
[CC_OP
].compute_all();
1870 if (eflags
& CC_O
) {
1871 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1875 void helper_cmpxchg8b(target_ulong a0
)
1880 eflags
= cc_table
[CC_OP
].compute_all();
1882 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1883 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1886 EDX
= (uint32_t)(d
>> 32);
1893 #ifdef TARGET_X86_64
1894 void helper_cmpxchg16b(target_ulong a0
)
1899 eflags
= cc_table
[CC_OP
].compute_all();
1902 if (d0
== EAX
&& d1
== EDX
) {
1915 void helper_single_step(void)
1917 env
->dr
[6] |= 0x4000;
1918 raise_exception(EXCP01_SSTP
);
1921 void helper_cpuid(void)
1924 index
= (uint32_t)EAX
;
1926 /* test if maximum index reached */
1927 if (index
& 0x80000000) {
1928 if (index
> env
->cpuid_xlevel
)
1929 index
= env
->cpuid_level
;
1931 if (index
> env
->cpuid_level
)
1932 index
= env
->cpuid_level
;
1937 EAX
= env
->cpuid_level
;
1938 EBX
= env
->cpuid_vendor1
;
1939 EDX
= env
->cpuid_vendor2
;
1940 ECX
= env
->cpuid_vendor3
;
1943 EAX
= env
->cpuid_version
;
1944 EBX
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1945 ECX
= env
->cpuid_ext_features
;
1946 EDX
= env
->cpuid_features
;
1949 /* cache info: needed for Pentium Pro compatibility */
1956 EAX
= env
->cpuid_xlevel
;
1957 EBX
= env
->cpuid_vendor1
;
1958 EDX
= env
->cpuid_vendor2
;
1959 ECX
= env
->cpuid_vendor3
;
1962 EAX
= env
->cpuid_features
;
1964 ECX
= env
->cpuid_ext3_features
;
1965 EDX
= env
->cpuid_ext2_features
;
1970 EAX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1971 EBX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1972 ECX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1973 EDX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1976 /* cache info (L1 cache) */
1983 /* cache info (L2 cache) */
1990 /* virtual & phys address size in low 2 bytes. */
1991 /* XXX: This value must match the one used in the MMU code. */
1992 #if defined(TARGET_X86_64)
1993 # if defined(USE_KQEMU)
1994 EAX
= 0x00003020; /* 48 bits virtual, 32 bits physical */
1996 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1997 EAX
= 0x00003028; /* 48 bits virtual, 40 bits physical */
2000 # if defined(USE_KQEMU)
2001 EAX
= 0x00000020; /* 32 bits physical */
2003 EAX
= 0x00000024; /* 36 bits physical */
2017 /* reserved values: zero */
2026 void helper_enter_level(int level
, int data32
, target_ulong t1
)
2029 uint32_t esp_mask
, esp
, ebp
;
2031 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2032 ssp
= env
->segs
[R_SS
].base
;
2041 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
2044 stl(ssp
+ (esp
& esp_mask
), t1
);
2051 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
2054 stw(ssp
+ (esp
& esp_mask
), t1
);
2058 #ifdef TARGET_X86_64
2059 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
2061 target_ulong esp
, ebp
;
2081 stw(esp
, lduw(ebp
));
2089 void helper_lldt(int selector
)
2093 int index
, entry_limit
;
2097 if ((selector
& 0xfffc) == 0) {
2098 /* XXX: NULL selector case: invalid LDT */
2103 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2105 index
= selector
& ~7;
2106 #ifdef TARGET_X86_64
2107 if (env
->hflags
& HF_LMA_MASK
)
2112 if ((index
+ entry_limit
) > dt
->limit
)
2113 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2114 ptr
= dt
->base
+ index
;
2115 e1
= ldl_kernel(ptr
);
2116 e2
= ldl_kernel(ptr
+ 4);
2117 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2118 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2119 if (!(e2
& DESC_P_MASK
))
2120 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2121 #ifdef TARGET_X86_64
2122 if (env
->hflags
& HF_LMA_MASK
) {
2124 e3
= ldl_kernel(ptr
+ 8);
2125 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2126 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2130 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2133 env
->ldt
.selector
= selector
;
2136 void helper_ltr(int selector
)
2140 int index
, type
, entry_limit
;
2144 if ((selector
& 0xfffc) == 0) {
2145 /* NULL selector case: invalid TR */
2151 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2153 index
= selector
& ~7;
2154 #ifdef TARGET_X86_64
2155 if (env
->hflags
& HF_LMA_MASK
)
2160 if ((index
+ entry_limit
) > dt
->limit
)
2161 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2162 ptr
= dt
->base
+ index
;
2163 e1
= ldl_kernel(ptr
);
2164 e2
= ldl_kernel(ptr
+ 4);
2165 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2166 if ((e2
& DESC_S_MASK
) ||
2167 (type
!= 1 && type
!= 9))
2168 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2169 if (!(e2
& DESC_P_MASK
))
2170 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2171 #ifdef TARGET_X86_64
2172 if (env
->hflags
& HF_LMA_MASK
) {
2174 e3
= ldl_kernel(ptr
+ 8);
2175 e4
= ldl_kernel(ptr
+ 12);
2176 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2177 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2178 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2179 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2183 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2185 e2
|= DESC_TSS_BUSY_MASK
;
2186 stl_kernel(ptr
+ 4, e2
);
2188 env
->tr
.selector
= selector
;
2191 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2192 void helper_load_seg(int seg_reg
, int selector
)
2201 cpl
= env
->hflags
& HF_CPL_MASK
;
2202 if ((selector
& 0xfffc) == 0) {
2203 /* null selector case */
2205 #ifdef TARGET_X86_64
2206 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2209 raise_exception_err(EXCP0D_GPF
, 0);
2210 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2217 index
= selector
& ~7;
2218 if ((index
+ 7) > dt
->limit
)
2219 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2220 ptr
= dt
->base
+ index
;
2221 e1
= ldl_kernel(ptr
);
2222 e2
= ldl_kernel(ptr
+ 4);
2224 if (!(e2
& DESC_S_MASK
))
2225 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2227 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2228 if (seg_reg
== R_SS
) {
2229 /* must be writable segment */
2230 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2231 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2232 if (rpl
!= cpl
|| dpl
!= cpl
)
2233 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2235 /* must be readable segment */
2236 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2237 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2239 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2240 /* if not conforming code, test rights */
2241 if (dpl
< cpl
|| dpl
< rpl
)
2242 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2246 if (!(e2
& DESC_P_MASK
)) {
2247 if (seg_reg
== R_SS
)
2248 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2250 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2253 /* set the access bit if not already set */
2254 if (!(e2
& DESC_A_MASK
)) {
2256 stl_kernel(ptr
+ 4, e2
);
2259 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2260 get_seg_base(e1
, e2
),
2261 get_seg_limit(e1
, e2
),
2264 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2265 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2270 /* protected mode jump */
2271 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2272 int next_eip_addend
)
2275 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2276 target_ulong next_eip
;
2278 if ((new_cs
& 0xfffc) == 0)
2279 raise_exception_err(EXCP0D_GPF
, 0);
2280 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2281 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2282 cpl
= env
->hflags
& HF_CPL_MASK
;
2283 if (e2
& DESC_S_MASK
) {
2284 if (!(e2
& DESC_CS_MASK
))
2285 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2286 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2287 if (e2
& DESC_C_MASK
) {
2288 /* conforming code segment */
2290 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2292 /* non conforming code segment */
2295 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2297 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2299 if (!(e2
& DESC_P_MASK
))
2300 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2301 limit
= get_seg_limit(e1
, e2
);
2302 if (new_eip
> limit
&&
2303 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2304 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2305 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2306 get_seg_base(e1
, e2
), limit
, e2
);
2309 /* jump to call or task gate */
2310 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2312 cpl
= env
->hflags
& HF_CPL_MASK
;
2313 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2315 case 1: /* 286 TSS */
2316 case 9: /* 386 TSS */
2317 case 5: /* task gate */
2318 if (dpl
< cpl
|| dpl
< rpl
)
2319 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2320 next_eip
= env
->eip
+ next_eip_addend
;
2321 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2322 CC_OP
= CC_OP_EFLAGS
;
2324 case 4: /* 286 call gate */
2325 case 12: /* 386 call gate */
2326 if ((dpl
< cpl
) || (dpl
< rpl
))
2327 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2328 if (!(e2
& DESC_P_MASK
))
2329 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2331 new_eip
= (e1
& 0xffff);
2333 new_eip
|= (e2
& 0xffff0000);
2334 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2335 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2336 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2337 /* must be code segment */
2338 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2339 (DESC_S_MASK
| DESC_CS_MASK
)))
2340 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2341 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2342 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2343 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2344 if (!(e2
& DESC_P_MASK
))
2345 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2346 limit
= get_seg_limit(e1
, e2
);
2347 if (new_eip
> limit
)
2348 raise_exception_err(EXCP0D_GPF
, 0);
2349 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2350 get_seg_base(e1
, e2
), limit
, e2
);
2354 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2360 /* real mode call */
2361 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2362 int shift
, int next_eip
)
2365 uint32_t esp
, esp_mask
;
2370 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2371 ssp
= env
->segs
[R_SS
].base
;
2373 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2374 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2376 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2377 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2380 SET_ESP(esp
, esp_mask
);
2382 env
->segs
[R_CS
].selector
= new_cs
;
2383 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2386 /* protected mode call */
2387 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2388 int shift
, int next_eip_addend
)
2391 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2392 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
2393 uint32_t val
, limit
, old_sp_mask
;
2394 target_ulong ssp
, old_ssp
, next_eip
;
2396 next_eip
= env
->eip
+ next_eip_addend
;
2398 if (loglevel
& CPU_LOG_PCALL
) {
2399 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
2400 new_cs
, (uint32_t)new_eip
, shift
);
2401 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2404 if ((new_cs
& 0xfffc) == 0)
2405 raise_exception_err(EXCP0D_GPF
, 0);
2406 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2407 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2408 cpl
= env
->hflags
& HF_CPL_MASK
;
2410 if (loglevel
& CPU_LOG_PCALL
) {
2411 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
2414 if (e2
& DESC_S_MASK
) {
2415 if (!(e2
& DESC_CS_MASK
))
2416 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2417 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2418 if (e2
& DESC_C_MASK
) {
2419 /* conforming code segment */
2421 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2423 /* non conforming code segment */
2426 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2428 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2430 if (!(e2
& DESC_P_MASK
))
2431 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2433 #ifdef TARGET_X86_64
2434 /* XXX: check 16/32 bit cases in long mode */
2439 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2440 PUSHQ(rsp
, next_eip
);
2441 /* from this point, not restartable */
2443 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2444 get_seg_base(e1
, e2
),
2445 get_seg_limit(e1
, e2
), e2
);
2451 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2452 ssp
= env
->segs
[R_SS
].base
;
2454 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2455 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2457 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2458 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2461 limit
= get_seg_limit(e1
, e2
);
2462 if (new_eip
> limit
)
2463 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2464 /* from this point, not restartable */
2465 SET_ESP(sp
, sp_mask
);
2466 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2467 get_seg_base(e1
, e2
), limit
, e2
);
2471 /* check gate type */
2472 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2473 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2476 case 1: /* available 286 TSS */
2477 case 9: /* available 386 TSS */
2478 case 5: /* task gate */
2479 if (dpl
< cpl
|| dpl
< rpl
)
2480 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2481 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2482 CC_OP
= CC_OP_EFLAGS
;
2484 case 4: /* 286 call gate */
2485 case 12: /* 386 call gate */
2488 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2493 if (dpl
< cpl
|| dpl
< rpl
)
2494 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2495 /* check valid bit */
2496 if (!(e2
& DESC_P_MASK
))
2497 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2498 selector
= e1
>> 16;
2499 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2500 param_count
= e2
& 0x1f;
2501 if ((selector
& 0xfffc) == 0)
2502 raise_exception_err(EXCP0D_GPF
, 0);
2504 if (load_segment(&e1
, &e2
, selector
) != 0)
2505 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2506 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2507 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2508 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2510 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2511 if (!(e2
& DESC_P_MASK
))
2512 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2514 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2515 /* to inner privilege */
2516 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2518 if (loglevel
& CPU_LOG_PCALL
)
2519 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2520 ss
, sp
, param_count
, ESP
);
2522 if ((ss
& 0xfffc) == 0)
2523 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2524 if ((ss
& 3) != dpl
)
2525 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2526 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2527 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2528 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2530 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2531 if (!(ss_e2
& DESC_S_MASK
) ||
2532 (ss_e2
& DESC_CS_MASK
) ||
2533 !(ss_e2
& DESC_W_MASK
))
2534 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2535 if (!(ss_e2
& DESC_P_MASK
))
2536 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2538 // push_size = ((param_count * 2) + 8) << shift;
2540 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2541 old_ssp
= env
->segs
[R_SS
].base
;
2543 sp_mask
= get_sp_mask(ss_e2
);
2544 ssp
= get_seg_base(ss_e1
, ss_e2
);
2546 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2547 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2548 for(i
= param_count
- 1; i
>= 0; i
--) {
2549 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2550 PUSHL(ssp
, sp
, sp_mask
, val
);
2553 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2554 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2555 for(i
= param_count
- 1; i
>= 0; i
--) {
2556 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2557 PUSHW(ssp
, sp
, sp_mask
, val
);
2562 /* to same privilege */
2564 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2565 ssp
= env
->segs
[R_SS
].base
;
2566 // push_size = (4 << shift);
2571 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2572 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2574 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2575 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2578 /* from this point, not restartable */
2581 ss
= (ss
& ~3) | dpl
;
2582 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2584 get_seg_limit(ss_e1
, ss_e2
),
2588 selector
= (selector
& ~3) | dpl
;
2589 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2590 get_seg_base(e1
, e2
),
2591 get_seg_limit(e1
, e2
),
2593 cpu_x86_set_cpl(env
, dpl
);
2594 SET_ESP(sp
, sp_mask
);
2598 if (kqemu_is_ok(env
)) {
2599 env
->exception_index
= -1;
2605 /* real and vm86 mode iret */
2606 void helper_iret_real(int shift
)
2608 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2612 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2614 ssp
= env
->segs
[R_SS
].base
;
2617 POPL(ssp
, sp
, sp_mask
, new_eip
);
2618 POPL(ssp
, sp
, sp_mask
, new_cs
);
2620 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2623 POPW(ssp
, sp
, sp_mask
, new_eip
);
2624 POPW(ssp
, sp
, sp_mask
, new_cs
);
2625 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2627 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2628 load_seg_vm(R_CS
, new_cs
);
2630 if (env
->eflags
& VM_MASK
)
2631 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2633 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2635 eflags_mask
&= 0xffff;
2636 load_eflags(new_eflags
, eflags_mask
);
2637 env
->hflags
&= ~HF_NMI_MASK
;
2640 static inline void validate_seg(int seg_reg
, int cpl
)
2645 /* XXX: on x86_64, we do not want to nullify FS and GS because
2646 they may still contain a valid base. I would be interested to
2647 know how a real x86_64 CPU behaves */
2648 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2649 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2652 e2
= env
->segs
[seg_reg
].flags
;
2653 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2654 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2655 /* data or non conforming code segment */
2657 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2662 /* protected mode iret */
2663 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2665 uint32_t new_cs
, new_eflags
, new_ss
;
2666 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2667 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2668 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2669 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2671 #ifdef TARGET_X86_64
2676 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2678 ssp
= env
->segs
[R_SS
].base
;
2679 new_eflags
= 0; /* avoid warning */
2680 #ifdef TARGET_X86_64
2686 POPQ(sp
, new_eflags
);
2692 POPL(ssp
, sp
, sp_mask
, new_eip
);
2693 POPL(ssp
, sp
, sp_mask
, new_cs
);
2696 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2697 if (new_eflags
& VM_MASK
)
2698 goto return_to_vm86
;
2702 POPW(ssp
, sp
, sp_mask
, new_eip
);
2703 POPW(ssp
, sp
, sp_mask
, new_cs
);
2705 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2708 if (loglevel
& CPU_LOG_PCALL
) {
2709 fprintf(logfile
, "lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2710 new_cs
, new_eip
, shift
, addend
);
2711 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2714 if ((new_cs
& 0xfffc) == 0)
2715 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2716 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2717 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2718 if (!(e2
& DESC_S_MASK
) ||
2719 !(e2
& DESC_CS_MASK
))
2720 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2721 cpl
= env
->hflags
& HF_CPL_MASK
;
2724 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2725 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2726 if (e2
& DESC_C_MASK
) {
2728 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2731 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2733 if (!(e2
& DESC_P_MASK
))
2734 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2737 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2738 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2739 /* return to same priledge level */
2740 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2741 get_seg_base(e1
, e2
),
2742 get_seg_limit(e1
, e2
),
2745 /* return to different privilege level */
2746 #ifdef TARGET_X86_64
2755 POPL(ssp
, sp
, sp_mask
, new_esp
);
2756 POPL(ssp
, sp
, sp_mask
, new_ss
);
2760 POPW(ssp
, sp
, sp_mask
, new_esp
);
2761 POPW(ssp
, sp
, sp_mask
, new_ss
);
2764 if (loglevel
& CPU_LOG_PCALL
) {
2765 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2769 if ((new_ss
& 0xfffc) == 0) {
2770 #ifdef TARGET_X86_64
2771 /* NULL ss is allowed in long mode if cpl != 3*/
2772 /* XXX: test CS64 ? */
2773 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2774 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2776 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2777 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2778 DESC_W_MASK
| DESC_A_MASK
);
2779 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2783 raise_exception_err(EXCP0D_GPF
, 0);
2786 if ((new_ss
& 3) != rpl
)
2787 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2788 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2789 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2790 if (!(ss_e2
& DESC_S_MASK
) ||
2791 (ss_e2
& DESC_CS_MASK
) ||
2792 !(ss_e2
& DESC_W_MASK
))
2793 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2794 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2796 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2797 if (!(ss_e2
& DESC_P_MASK
))
2798 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2799 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2800 get_seg_base(ss_e1
, ss_e2
),
2801 get_seg_limit(ss_e1
, ss_e2
),
2805 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2806 get_seg_base(e1
, e2
),
2807 get_seg_limit(e1
, e2
),
2809 cpu_x86_set_cpl(env
, rpl
);
2811 #ifdef TARGET_X86_64
2812 if (env
->hflags
& HF_CS64_MASK
)
2816 sp_mask
= get_sp_mask(ss_e2
);
2818 /* validate data segments */
2819 validate_seg(R_ES
, rpl
);
2820 validate_seg(R_DS
, rpl
);
2821 validate_seg(R_FS
, rpl
);
2822 validate_seg(R_GS
, rpl
);
2826 SET_ESP(sp
, sp_mask
);
2829 /* NOTE: 'cpl' is the _old_ CPL */
2830 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2832 eflags_mask
|= IOPL_MASK
;
2833 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2835 eflags_mask
|= IF_MASK
;
2837 eflags_mask
&= 0xffff;
2838 load_eflags(new_eflags
, eflags_mask
);
2843 POPL(ssp
, sp
, sp_mask
, new_esp
);
2844 POPL(ssp
, sp
, sp_mask
, new_ss
);
2845 POPL(ssp
, sp
, sp_mask
, new_es
);
2846 POPL(ssp
, sp
, sp_mask
, new_ds
);
2847 POPL(ssp
, sp
, sp_mask
, new_fs
);
2848 POPL(ssp
, sp
, sp_mask
, new_gs
);
2850 /* modify processor state */
2851 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2852 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2853 load_seg_vm(R_CS
, new_cs
& 0xffff);
2854 cpu_x86_set_cpl(env
, 3);
2855 load_seg_vm(R_SS
, new_ss
& 0xffff);
2856 load_seg_vm(R_ES
, new_es
& 0xffff);
2857 load_seg_vm(R_DS
, new_ds
& 0xffff);
2858 load_seg_vm(R_FS
, new_fs
& 0xffff);
2859 load_seg_vm(R_GS
, new_gs
& 0xffff);
2861 env
->eip
= new_eip
& 0xffff;
2865 void helper_iret_protected(int shift
, int next_eip
)
2867 int tss_selector
, type
;
2870 /* specific case for TSS */
2871 if (env
->eflags
& NT_MASK
) {
2872 #ifdef TARGET_X86_64
2873 if (env
->hflags
& HF_LMA_MASK
)
2874 raise_exception_err(EXCP0D_GPF
, 0);
2876 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2877 if (tss_selector
& 4)
2878 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2879 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2880 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2881 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2882 /* NOTE: we check both segment and busy TSS */
2884 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2885 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2887 helper_ret_protected(shift
, 1, 0);
2889 env
->hflags
&= ~HF_NMI_MASK
;
2891 if (kqemu_is_ok(env
)) {
2892 CC_OP
= CC_OP_EFLAGS
;
2893 env
->exception_index
= -1;
2899 void helper_lret_protected(int shift
, int addend
)
2901 helper_ret_protected(shift
, 0, addend
);
2903 if (kqemu_is_ok(env
)) {
2904 env
->exception_index
= -1;
2910 void helper_sysenter(void)
2912 if (env
->sysenter_cs
== 0) {
2913 raise_exception_err(EXCP0D_GPF
, 0);
2915 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2916 cpu_x86_set_cpl(env
, 0);
2917 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2919 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2921 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2922 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2924 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2926 DESC_W_MASK
| DESC_A_MASK
);
2927 ESP
= env
->sysenter_esp
;
2928 EIP
= env
->sysenter_eip
;
2931 void helper_sysexit(void)
2935 cpl
= env
->hflags
& HF_CPL_MASK
;
2936 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2937 raise_exception_err(EXCP0D_GPF
, 0);
2939 cpu_x86_set_cpl(env
, 3);
2940 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2942 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2943 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2944 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2945 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2947 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2948 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2949 DESC_W_MASK
| DESC_A_MASK
);
2953 if (kqemu_is_ok(env
)) {
2954 env
->exception_index
= -1;
2960 void helper_movl_crN_T0(int reg
, target_ulong t0
)
2962 #if !defined(CONFIG_USER_ONLY)
2965 cpu_x86_update_cr0(env
, t0
);
2968 cpu_x86_update_cr3(env
, t0
);
2971 cpu_x86_update_cr4(env
, t0
);
2974 cpu_set_apic_tpr(env
, t0
);
2984 void helper_lmsw(target_ulong t0
)
2986 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2987 if already set to one. */
2988 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2989 helper_movl_crN_T0(0, t0
);
2992 void helper_clts(void)
2994 env
->cr
[0] &= ~CR0_TS_MASK
;
2995 env
->hflags
&= ~HF_TS_MASK
;
2998 #if !defined(CONFIG_USER_ONLY)
2999 target_ulong
helper_movtl_T0_cr8(void)
3001 return cpu_get_apic_tpr(env
);
3006 void helper_movl_drN_T0(int reg
, target_ulong t0
)
3011 void helper_invlpg(target_ulong addr
)
3013 cpu_x86_flush_tlb(env
, addr
);
3016 void helper_rdtsc(void)
3020 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3021 raise_exception(EXCP0D_GPF
);
3023 val
= cpu_get_tsc(env
);
3024 EAX
= (uint32_t)(val
);
3025 EDX
= (uint32_t)(val
>> 32);
3028 void helper_rdpmc(void)
3030 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3031 raise_exception(EXCP0D_GPF
);
3034 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3036 /* currently unimplemented */
3037 raise_exception_err(EXCP06_ILLOP
, 0);
3040 #if defined(CONFIG_USER_ONLY)
3041 void helper_wrmsr(void)
3045 void helper_rdmsr(void)
3049 void helper_wrmsr(void)
3053 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3055 switch((uint32_t)ECX
) {
3056 case MSR_IA32_SYSENTER_CS
:
3057 env
->sysenter_cs
= val
& 0xffff;
3059 case MSR_IA32_SYSENTER_ESP
:
3060 env
->sysenter_esp
= val
;
3062 case MSR_IA32_SYSENTER_EIP
:
3063 env
->sysenter_eip
= val
;
3065 case MSR_IA32_APICBASE
:
3066 cpu_set_apic_base(env
, val
);
3070 uint64_t update_mask
;
3072 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3073 update_mask
|= MSR_EFER_SCE
;
3074 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3075 update_mask
|= MSR_EFER_LME
;
3076 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3077 update_mask
|= MSR_EFER_FFXSR
;
3078 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3079 update_mask
|= MSR_EFER_NXE
;
3080 env
->efer
= (env
->efer
& ~update_mask
) |
3081 (val
& update_mask
);
3090 case MSR_VM_HSAVE_PA
:
3091 env
->vm_hsave
= val
;
3093 #ifdef TARGET_X86_64
3104 env
->segs
[R_FS
].base
= val
;
3107 env
->segs
[R_GS
].base
= val
;
3109 case MSR_KERNELGSBASE
:
3110 env
->kernelgsbase
= val
;
3114 /* XXX: exception ? */
3119 void helper_rdmsr(void)
3122 switch((uint32_t)ECX
) {
3123 case MSR_IA32_SYSENTER_CS
:
3124 val
= env
->sysenter_cs
;
3126 case MSR_IA32_SYSENTER_ESP
:
3127 val
= env
->sysenter_esp
;
3129 case MSR_IA32_SYSENTER_EIP
:
3130 val
= env
->sysenter_eip
;
3132 case MSR_IA32_APICBASE
:
3133 val
= cpu_get_apic_base(env
);
3144 case MSR_VM_HSAVE_PA
:
3145 val
= env
->vm_hsave
;
3147 #ifdef TARGET_X86_64
3158 val
= env
->segs
[R_FS
].base
;
3161 val
= env
->segs
[R_GS
].base
;
3163 case MSR_KERNELGSBASE
:
3164 val
= env
->kernelgsbase
;
3168 /* XXX: exception ? */
3172 EAX
= (uint32_t)(val
);
3173 EDX
= (uint32_t)(val
>> 32);
3177 target_ulong
helper_lsl(target_ulong selector1
)
3180 uint32_t e1
, e2
, eflags
, selector
;
3181 int rpl
, dpl
, cpl
, type
;
3183 selector
= selector1
& 0xffff;
3184 eflags
= cc_table
[CC_OP
].compute_all();
3185 if (load_segment(&e1
, &e2
, selector
) != 0)
3188 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3189 cpl
= env
->hflags
& HF_CPL_MASK
;
3190 if (e2
& DESC_S_MASK
) {
3191 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3194 if (dpl
< cpl
|| dpl
< rpl
)
3198 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3209 if (dpl
< cpl
|| dpl
< rpl
) {
3211 CC_SRC
= eflags
& ~CC_Z
;
3215 limit
= get_seg_limit(e1
, e2
);
3216 CC_SRC
= eflags
| CC_Z
;
3220 target_ulong
helper_lar(target_ulong selector1
)
3222 uint32_t e1
, e2
, eflags
, selector
;
3223 int rpl
, dpl
, cpl
, type
;
3225 selector
= selector1
& 0xffff;
3226 eflags
= cc_table
[CC_OP
].compute_all();
3227 if ((selector
& 0xfffc) == 0)
3229 if (load_segment(&e1
, &e2
, selector
) != 0)
3232 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3233 cpl
= env
->hflags
& HF_CPL_MASK
;
3234 if (e2
& DESC_S_MASK
) {
3235 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3238 if (dpl
< cpl
|| dpl
< rpl
)
3242 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3256 if (dpl
< cpl
|| dpl
< rpl
) {
3258 CC_SRC
= eflags
& ~CC_Z
;
3262 CC_SRC
= eflags
| CC_Z
;
3263 return e2
& 0x00f0ff00;
3266 void helper_verr(target_ulong selector1
)
3268 uint32_t e1
, e2
, eflags
, selector
;
3271 selector
= selector1
& 0xffff;
3272 eflags
= cc_table
[CC_OP
].compute_all();
3273 if ((selector
& 0xfffc) == 0)
3275 if (load_segment(&e1
, &e2
, selector
) != 0)
3277 if (!(e2
& DESC_S_MASK
))
3280 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3281 cpl
= env
->hflags
& HF_CPL_MASK
;
3282 if (e2
& DESC_CS_MASK
) {
3283 if (!(e2
& DESC_R_MASK
))
3285 if (!(e2
& DESC_C_MASK
)) {
3286 if (dpl
< cpl
|| dpl
< rpl
)
3290 if (dpl
< cpl
|| dpl
< rpl
) {
3292 CC_SRC
= eflags
& ~CC_Z
;
3296 CC_SRC
= eflags
| CC_Z
;
3299 void helper_verw(target_ulong selector1
)
3301 uint32_t e1
, e2
, eflags
, selector
;
3304 selector
= selector1
& 0xffff;
3305 eflags
= cc_table
[CC_OP
].compute_all();
3306 if ((selector
& 0xfffc) == 0)
3308 if (load_segment(&e1
, &e2
, selector
) != 0)
3310 if (!(e2
& DESC_S_MASK
))
3313 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3314 cpl
= env
->hflags
& HF_CPL_MASK
;
3315 if (e2
& DESC_CS_MASK
) {
3318 if (dpl
< cpl
|| dpl
< rpl
)
3320 if (!(e2
& DESC_W_MASK
)) {
3322 CC_SRC
= eflags
& ~CC_Z
;
3326 CC_SRC
= eflags
| CC_Z
;
3329 /* x87 FPU helpers */
3331 static void fpu_set_exception(int mask
)
3334 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3335 env
->fpus
|= FPUS_SE
| FPUS_B
;
3338 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3341 fpu_set_exception(FPUS_ZE
);
3345 void fpu_raise_exception(void)
3347 if (env
->cr
[0] & CR0_NE_MASK
) {
3348 raise_exception(EXCP10_COPR
);
3350 #if !defined(CONFIG_USER_ONLY)
3357 void helper_flds_FT0(uint32_t val
)
3364 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3367 void helper_fldl_FT0(uint64_t val
)
3374 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3377 void helper_fildl_FT0(int32_t val
)
3379 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3382 void helper_flds_ST0(uint32_t val
)
3389 new_fpstt
= (env
->fpstt
- 1) & 7;
3391 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3392 env
->fpstt
= new_fpstt
;
3393 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3396 void helper_fldl_ST0(uint64_t val
)
3403 new_fpstt
= (env
->fpstt
- 1) & 7;
3405 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3406 env
->fpstt
= new_fpstt
;
3407 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3410 void helper_fildl_ST0(int32_t val
)
3413 new_fpstt
= (env
->fpstt
- 1) & 7;
3414 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3415 env
->fpstt
= new_fpstt
;
3416 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3419 void helper_fildll_ST0(int64_t val
)
3422 new_fpstt
= (env
->fpstt
- 1) & 7;
3423 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3424 env
->fpstt
= new_fpstt
;
3425 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3428 uint32_t helper_fsts_ST0(void)
3434 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3438 uint64_t helper_fstl_ST0(void)
3444 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3448 int32_t helper_fist_ST0(void)
3451 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3452 if (val
!= (int16_t)val
)
3457 int32_t helper_fistl_ST0(void)
3460 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3464 int64_t helper_fistll_ST0(void)
3467 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3471 int32_t helper_fistt_ST0(void)
3474 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3475 if (val
!= (int16_t)val
)
3480 int32_t helper_fisttl_ST0(void)
3483 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3487 int64_t helper_fisttll_ST0(void)
3490 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3494 void helper_fldt_ST0(target_ulong ptr
)
3497 new_fpstt
= (env
->fpstt
- 1) & 7;
3498 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3499 env
->fpstt
= new_fpstt
;
3500 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3503 void helper_fstt_ST0(target_ulong ptr
)
3505 helper_fstt(ST0
, ptr
);
3508 void helper_fpush(void)
3513 void helper_fpop(void)
3518 void helper_fdecstp(void)
3520 env
->fpstt
= (env
->fpstt
- 1) & 7;
3521 env
->fpus
&= (~0x4700);
3524 void helper_fincstp(void)
3526 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3527 env
->fpus
&= (~0x4700);
3532 void helper_ffree_STN(int st_index
)
3534 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3537 void helper_fmov_ST0_FT0(void)
3542 void helper_fmov_FT0_STN(int st_index
)
3547 void helper_fmov_ST0_STN(int st_index
)
3552 void helper_fmov_STN_ST0(int st_index
)
3557 void helper_fxchg_ST0_STN(int st_index
)
3565 /* FPU operations */
3567 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3569 void helper_fcom_ST0_FT0(void)
3573 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3574 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3578 void helper_fucom_ST0_FT0(void)
3582 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3583 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3587 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3589 void helper_fcomi_ST0_FT0(void)
3594 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3595 eflags
= cc_table
[CC_OP
].compute_all();
3596 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3601 void helper_fucomi_ST0_FT0(void)
3606 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3607 eflags
= cc_table
[CC_OP
].compute_all();
3608 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3613 void helper_fadd_ST0_FT0(void)
3618 void helper_fmul_ST0_FT0(void)
3623 void helper_fsub_ST0_FT0(void)
3628 void helper_fsubr_ST0_FT0(void)
3633 void helper_fdiv_ST0_FT0(void)
3635 ST0
= helper_fdiv(ST0
, FT0
);
3638 void helper_fdivr_ST0_FT0(void)
3640 ST0
= helper_fdiv(FT0
, ST0
);
3643 /* fp operations between STN and ST0 */
3645 void helper_fadd_STN_ST0(int st_index
)
3647 ST(st_index
) += ST0
;
3650 void helper_fmul_STN_ST0(int st_index
)
3652 ST(st_index
) *= ST0
;
3655 void helper_fsub_STN_ST0(int st_index
)
3657 ST(st_index
) -= ST0
;
3660 void helper_fsubr_STN_ST0(int st_index
)
3667 void helper_fdiv_STN_ST0(int st_index
)
3671 *p
= helper_fdiv(*p
, ST0
);
3674 void helper_fdivr_STN_ST0(int st_index
)
3678 *p
= helper_fdiv(ST0
, *p
);
3681 /* misc FPU operations */
3682 void helper_fchs_ST0(void)
3684 ST0
= floatx_chs(ST0
);
3687 void helper_fabs_ST0(void)
3689 ST0
= floatx_abs(ST0
);
3692 void helper_fld1_ST0(void)
3697 void helper_fldl2t_ST0(void)
3702 void helper_fldl2e_ST0(void)
3707 void helper_fldpi_ST0(void)
3712 void helper_fldlg2_ST0(void)
3717 void helper_fldln2_ST0(void)
3722 void helper_fldz_ST0(void)
3727 void helper_fldz_FT0(void)
3732 uint32_t helper_fnstsw(void)
3734 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3737 uint32_t helper_fnstcw(void)
3742 static void update_fp_status(void)
3746 /* set rounding mode */
3747 switch(env
->fpuc
& RC_MASK
) {
3750 rnd_type
= float_round_nearest_even
;
3753 rnd_type
= float_round_down
;
3756 rnd_type
= float_round_up
;
3759 rnd_type
= float_round_to_zero
;
3762 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3764 switch((env
->fpuc
>> 8) & 3) {
3776 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3780 void helper_fldcw(uint32_t val
)
3786 void helper_fclex(void)
3788 env
->fpus
&= 0x7f00;
3791 void helper_fwait(void)
3793 if (env
->fpus
& FPUS_SE
)
3794 fpu_raise_exception();
3798 void helper_fninit(void)
3815 void helper_fbld_ST0(target_ulong ptr
)
3823 for(i
= 8; i
>= 0; i
--) {
3825 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3828 if (ldub(ptr
+ 9) & 0x80)
3834 void helper_fbst_ST0(target_ulong ptr
)
3837 target_ulong mem_ref
, mem_end
;
3840 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3842 mem_end
= mem_ref
+ 9;
3849 while (mem_ref
< mem_end
) {
3854 v
= ((v
/ 10) << 4) | (v
% 10);
3857 while (mem_ref
< mem_end
) {
3862 void helper_f2xm1(void)
3864 ST0
= pow(2.0,ST0
) - 1.0;
3867 void helper_fyl2x(void)
3869 CPU86_LDouble fptemp
;
3873 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3877 env
->fpus
&= (~0x4700);
3882 void helper_fptan(void)
3884 CPU86_LDouble fptemp
;
3887 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3893 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3894 /* the above code is for |arg| < 2**52 only */
3898 void helper_fpatan(void)
3900 CPU86_LDouble fptemp
, fpsrcop
;
3904 ST1
= atan2(fpsrcop
,fptemp
);
3908 void helper_fxtract(void)
3910 CPU86_LDoubleU temp
;
3911 unsigned int expdif
;
3914 expdif
= EXPD(temp
) - EXPBIAS
;
3915 /*DP exponent bias*/
3922 void helper_fprem1(void)
3924 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3925 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3927 signed long long int q
;
3929 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3930 ST0
= 0.0 / 0.0; /* NaN */
3931 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3937 fpsrcop1
.d
= fpsrcop
;
3939 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3942 /* optimisation? taken from the AMD docs */
3943 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3944 /* ST0 is unchanged */
3949 dblq
= fpsrcop
/ fptemp
;
3950 /* round dblq towards nearest integer */
3952 ST0
= fpsrcop
- fptemp
* dblq
;
3954 /* convert dblq to q by truncating towards zero */
3956 q
= (signed long long int)(-dblq
);
3958 q
= (signed long long int)dblq
;
3960 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3961 /* (C0,C3,C1) <-- (q2,q1,q0) */
3962 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3963 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3964 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3966 env
->fpus
|= 0x400; /* C2 <-- 1 */
3967 fptemp
= pow(2.0, expdif
- 50);
3968 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3969 /* fpsrcop = integer obtained by chopping */
3970 fpsrcop
= (fpsrcop
< 0.0) ?
3971 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3972 ST0
-= (ST1
* fpsrcop
* fptemp
);
3976 void helper_fprem(void)
3978 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3979 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3981 signed long long int q
;
3983 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3984 ST0
= 0.0 / 0.0; /* NaN */
3985 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3989 fpsrcop
= (CPU86_LDouble
)ST0
;
3990 fptemp
= (CPU86_LDouble
)ST1
;
3991 fpsrcop1
.d
= fpsrcop
;
3993 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3996 /* optimisation? taken from the AMD docs */
3997 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3998 /* ST0 is unchanged */
4002 if ( expdif
< 53 ) {
4003 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4004 /* round dblq towards zero */
4005 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4006 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4008 /* convert dblq to q by truncating towards zero */
4010 q
= (signed long long int)(-dblq
);
4012 q
= (signed long long int)dblq
;
4014 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4015 /* (C0,C3,C1) <-- (q2,q1,q0) */
4016 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4017 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4018 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4020 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4021 env
->fpus
|= 0x400; /* C2 <-- 1 */
4022 fptemp
= pow(2.0, (double)(expdif
- N
));
4023 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4024 /* fpsrcop = integer obtained by chopping */
4025 fpsrcop
= (fpsrcop
< 0.0) ?
4026 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4027 ST0
-= (ST1
* fpsrcop
* fptemp
);
4031 void helper_fyl2xp1(void)
4033 CPU86_LDouble fptemp
;
4036 if ((fptemp
+1.0)>0.0) {
4037 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4041 env
->fpus
&= (~0x4700);
4046 void helper_fsqrt(void)
4048 CPU86_LDouble fptemp
;
4052 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4058 void helper_fsincos(void)
4060 CPU86_LDouble fptemp
;
4063 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4069 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4070 /* the above code is for |arg| < 2**63 only */
4074 void helper_frndint(void)
4076 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4079 void helper_fscale(void)
4081 ST0
= ldexp (ST0
, (int)(ST1
));
4084 void helper_fsin(void)
4086 CPU86_LDouble fptemp
;
4089 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4093 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4094 /* the above code is for |arg| < 2**53 only */
4098 void helper_fcos(void)
4100 CPU86_LDouble fptemp
;
4103 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4107 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4108 /* the above code is for |arg5 < 2**63 only */
4112 void helper_fxam_ST0(void)
4114 CPU86_LDoubleU temp
;
4119 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4121 env
->fpus
|= 0x200; /* C1 <-- 1 */
4123 /* XXX: test fptags too */
4124 expdif
= EXPD(temp
);
4125 if (expdif
== MAXEXPD
) {
4126 #ifdef USE_X86LDOUBLE
4127 if (MANTD(temp
) == 0x8000000000000000ULL
)
4129 if (MANTD(temp
) == 0)
4131 env
->fpus
|= 0x500 /*Infinity*/;
4133 env
->fpus
|= 0x100 /*NaN*/;
4134 } else if (expdif
== 0) {
4135 if (MANTD(temp
) == 0)
4136 env
->fpus
|= 0x4000 /*Zero*/;
4138 env
->fpus
|= 0x4400 /*Denormal*/;
4144 void helper_fstenv(target_ulong ptr
, int data32
)
4146 int fpus
, fptag
, exp
, i
;
4150 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4152 for (i
=7; i
>=0; i
--) {
4154 if (env
->fptags
[i
]) {
4157 tmp
.d
= env
->fpregs
[i
].d
;
4160 if (exp
== 0 && mant
== 0) {
4163 } else if (exp
== 0 || exp
== MAXEXPD
4164 #ifdef USE_X86LDOUBLE
4165 || (mant
& (1LL << 63)) == 0
4168 /* NaNs, infinity, denormal */
4175 stl(ptr
, env
->fpuc
);
4177 stl(ptr
+ 8, fptag
);
4178 stl(ptr
+ 12, 0); /* fpip */
4179 stl(ptr
+ 16, 0); /* fpcs */
4180 stl(ptr
+ 20, 0); /* fpoo */
4181 stl(ptr
+ 24, 0); /* fpos */
4184 stw(ptr
, env
->fpuc
);
4186 stw(ptr
+ 4, fptag
);
4194 void helper_fldenv(target_ulong ptr
, int data32
)
4199 env
->fpuc
= lduw(ptr
);
4200 fpus
= lduw(ptr
+ 4);
4201 fptag
= lduw(ptr
+ 8);
4204 env
->fpuc
= lduw(ptr
);
4205 fpus
= lduw(ptr
+ 2);
4206 fptag
= lduw(ptr
+ 4);
4208 env
->fpstt
= (fpus
>> 11) & 7;
4209 env
->fpus
= fpus
& ~0x3800;
4210 for(i
= 0;i
< 8; i
++) {
4211 env
->fptags
[i
] = ((fptag
& 3) == 3);
4216 void helper_fsave(target_ulong ptr
, int data32
)
4221 helper_fstenv(ptr
, data32
);
4223 ptr
+= (14 << data32
);
4224 for(i
= 0;i
< 8; i
++) {
4226 helper_fstt(tmp
, ptr
);
4244 void helper_frstor(target_ulong ptr
, int data32
)
4249 helper_fldenv(ptr
, data32
);
4250 ptr
+= (14 << data32
);
4252 for(i
= 0;i
< 8; i
++) {
4253 tmp
= helper_fldt(ptr
);
4259 void helper_fxsave(target_ulong ptr
, int data64
)
4261 int fpus
, fptag
, i
, nb_xmm_regs
;
4265 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4267 for(i
= 0; i
< 8; i
++) {
4268 fptag
|= (env
->fptags
[i
] << i
);
4270 stw(ptr
, env
->fpuc
);
4272 stw(ptr
+ 4, fptag
^ 0xff);
4273 #ifdef TARGET_X86_64
4275 stq(ptr
+ 0x08, 0); /* rip */
4276 stq(ptr
+ 0x10, 0); /* rdp */
4280 stl(ptr
+ 0x08, 0); /* eip */
4281 stl(ptr
+ 0x0c, 0); /* sel */
4282 stl(ptr
+ 0x10, 0); /* dp */
4283 stl(ptr
+ 0x14, 0); /* sel */
4287 for(i
= 0;i
< 8; i
++) {
4289 helper_fstt(tmp
, addr
);
4293 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4294 /* XXX: finish it */
4295 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4296 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4297 if (env
->hflags
& HF_CS64_MASK
)
4302 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4303 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4304 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4310 void helper_fxrstor(target_ulong ptr
, int data64
)
4312 int i
, fpus
, fptag
, nb_xmm_regs
;
4316 env
->fpuc
= lduw(ptr
);
4317 fpus
= lduw(ptr
+ 2);
4318 fptag
= lduw(ptr
+ 4);
4319 env
->fpstt
= (fpus
>> 11) & 7;
4320 env
->fpus
= fpus
& ~0x3800;
4322 for(i
= 0;i
< 8; i
++) {
4323 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4327 for(i
= 0;i
< 8; i
++) {
4328 tmp
= helper_fldt(addr
);
4333 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4334 /* XXX: finish it */
4335 env
->mxcsr
= ldl(ptr
+ 0x18);
4337 if (env
->hflags
& HF_CS64_MASK
)
4342 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4343 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4344 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4350 #ifndef USE_X86LDOUBLE
4352 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4354 CPU86_LDoubleU temp
;
4359 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4360 /* exponent + sign */
4361 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4362 e
|= SIGND(temp
) >> 16;
4366 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4368 CPU86_LDoubleU temp
;
4372 /* XXX: handle overflow ? */
4373 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4374 e
|= (upper
>> 4) & 0x800; /* sign */
4375 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4377 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4380 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4387 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4389 CPU86_LDoubleU temp
;
4392 *pmant
= temp
.l
.lower
;
4393 *pexp
= temp
.l
.upper
;
4396 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4398 CPU86_LDoubleU temp
;
4400 temp
.l
.upper
= upper
;
4401 temp
.l
.lower
= mant
;
4406 #ifdef TARGET_X86_64
4408 //#define DEBUG_MULDIV
4410 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4419 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4423 add128(plow
, phigh
, 1, 0);
4426 /* return TRUE if overflow */
4427 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4429 uint64_t q
, r
, a1
, a0
;
4442 /* XXX: use a better algorithm */
4443 for(i
= 0; i
< 64; i
++) {
4445 a1
= (a1
<< 1) | (a0
>> 63);
4446 if (ab
|| a1
>= b
) {
4452 a0
= (a0
<< 1) | qb
;
4454 #if defined(DEBUG_MULDIV)
4455 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4456 *phigh
, *plow
, b
, a0
, a1
);
4464 /* return TRUE if overflow */
4465 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4468 sa
= ((int64_t)*phigh
< 0);
4470 neg128(plow
, phigh
);
4474 if (div64(plow
, phigh
, b
) != 0)
4477 if (*plow
> (1ULL << 63))
4481 if (*plow
>= (1ULL << 63))
4489 void helper_mulq_EAX_T0(target_ulong t0
)
4493 mulu64(&r0
, &r1
, EAX
, t0
);
4500 void helper_imulq_EAX_T0(target_ulong t0
)
4504 muls64(&r0
, &r1
, EAX
, t0
);
4508 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4511 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4515 muls64(&r0
, &r1
, t0
, t1
);
4517 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4521 void helper_divq_EAX(target_ulong t0
)
4525 raise_exception(EXCP00_DIVZ
);
4529 if (div64(&r0
, &r1
, t0
))
4530 raise_exception(EXCP00_DIVZ
);
4535 void helper_idivq_EAX(target_ulong t0
)
4539 raise_exception(EXCP00_DIVZ
);
4543 if (idiv64(&r0
, &r1
, t0
))
4544 raise_exception(EXCP00_DIVZ
);
4550 void helper_hlt(void)
4552 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4553 env
->hflags
|= HF_HALTED_MASK
;
4554 env
->exception_index
= EXCP_HLT
;
4558 void helper_monitor(target_ulong ptr
)
4560 if ((uint32_t)ECX
!= 0)
4561 raise_exception(EXCP0D_GPF
);
4562 /* XXX: store address ? */
4565 void helper_mwait(void)
4567 if ((uint32_t)ECX
!= 0)
4568 raise_exception(EXCP0D_GPF
);
4569 /* XXX: not complete but not completely erroneous */
4570 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4571 /* more than one CPU: do not sleep because another CPU may
4578 void helper_debug(void)
4580 env
->exception_index
= EXCP_DEBUG
;
4584 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4586 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4589 void helper_raise_exception(int exception_index
)
4591 raise_exception(exception_index
);
4594 void helper_cli(void)
4596 env
->eflags
&= ~IF_MASK
;
4599 void helper_sti(void)
4601 env
->eflags
|= IF_MASK
;
4605 /* vm86plus instructions */
4606 void helper_cli_vm(void)
4608 env
->eflags
&= ~VIF_MASK
;
4611 void helper_sti_vm(void)
4613 env
->eflags
|= VIF_MASK
;
4614 if (env
->eflags
& VIP_MASK
) {
4615 raise_exception(EXCP0D_GPF
);
4620 void helper_set_inhibit_irq(void)
4622 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4625 void helper_reset_inhibit_irq(void)
4627 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4630 void helper_boundw(target_ulong a0
, int v
)
4634 high
= ldsw(a0
+ 2);
4636 if (v
< low
|| v
> high
) {
4637 raise_exception(EXCP05_BOUND
);
4642 void helper_boundl(target_ulong a0
, int v
)
4647 if (v
< low
|| v
> high
) {
4648 raise_exception(EXCP05_BOUND
);
4653 static float approx_rsqrt(float a
)
4655 return 1.0 / sqrt(a
);
4658 static float approx_rcp(float a
)
4663 #if !defined(CONFIG_USER_ONLY)
4665 #define MMUSUFFIX _mmu
4668 #include "softmmu_template.h"
4671 #include "softmmu_template.h"
4674 #include "softmmu_template.h"
4677 #include "softmmu_template.h"
4681 /* try to fill the TLB and return an exception if error. If retaddr is
4682 NULL, it means that the function was called in C code (i.e. not
4683 from generated code or from helper.c) */
4684 /* XXX: fix it to restore all registers */
4685 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4687 TranslationBlock
*tb
;
4690 CPUX86State
*saved_env
;
4692 /* XXX: hack to restore env in all cases, even if not called from
4695 env
= cpu_single_env
;
4697 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4700 /* now we have a real cpu fault */
4701 pc
= (unsigned long)retaddr
;
4702 tb
= tb_find_pc(pc
);
4704 /* the PC is inside the translated code. It means that we have
4705 a virtual CPU fault */
4706 cpu_restore_state(tb
, env
, pc
, NULL
);
4710 raise_exception_err(env
->exception_index
, env
->error_code
);
4712 raise_exception_err_norestore(env
->exception_index
, env
->error_code
);
4718 /* Secure Virtual Machine helpers */
4720 void helper_stgi(void)
4722 env
->hflags
|= HF_GIF_MASK
;
4725 void helper_clgi(void)
4727 env
->hflags
&= ~HF_GIF_MASK
;
4730 #if defined(CONFIG_USER_ONLY)
4732 void helper_vmrun(void)
4735 void helper_vmmcall(void)
4738 void helper_vmload(void)
4741 void helper_vmsave(void)
4744 void helper_skinit(void)
4747 void helper_invlpga(void)
4750 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4753 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4757 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4758 uint32_t next_eip_addend
)
4763 static inline uint32_t
4764 vmcb2cpu_attrib(uint16_t vmcb_attrib
, uint32_t vmcb_base
, uint32_t vmcb_limit
)
4766 return ((vmcb_attrib
& 0x00ff) << 8) /* Type, S, DPL, P */
4767 | ((vmcb_attrib
& 0x0f00) << 12) /* AVL, L, DB, G */
4768 | ((vmcb_base
>> 16) & 0xff) /* Base 23-16 */
4769 | (vmcb_base
& 0xff000000) /* Base 31-24 */
4770 | (vmcb_limit
& 0xf0000); /* Limit 19-16 */
4773 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib
)
4775 return ((cpu_attrib
>> 8) & 0xff) /* Type, S, DPL, P */
4776 | ((cpu_attrib
& 0xf00000) >> 12); /* AVL, L, DB, G */
4779 void helper_vmrun(void)
4786 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4787 fprintf(logfile
,"vmrun! " TARGET_FMT_lx
"\n", addr
);
4789 env
->vm_vmcb
= addr
;
4791 /* save the current CPU state in the hsave page */
4792 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4793 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4795 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4796 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4798 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4799 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4800 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4801 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4802 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
), env
->cr
[8]);
4803 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4804 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4806 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4807 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4809 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_ES
], es
);
4810 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_CS
], cs
);
4811 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_SS
], ss
);
4812 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_DS
], ds
);
4814 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
), EIP
);
4815 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4816 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4818 /* load the interception bitmaps so we do not need to access the
4820 /* We shift all the intercept bits so we can OR them with the TB
4822 env
->intercept
= (ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
)) << INTERCEPT_INTR
) | INTERCEPT_SVM_MASK
;
4823 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4824 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4825 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4826 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4827 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4829 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4830 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4832 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4833 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4835 /* clear exit_info_2 so we behave like the real hardware */
4836 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4838 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4839 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4840 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4841 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4842 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4843 if (int_ctl
& V_INTR_MASKING_MASK
) {
4844 env
->cr
[8] = int_ctl
& V_TPR_MASK
;
4845 cpu_set_apic_tpr(env
, env
->cr
[8]);
4846 if (env
->eflags
& IF_MASK
)
4847 env
->hflags
|= HF_HIF_MASK
;
4850 #ifdef TARGET_X86_64
4851 env
->efer
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
));
4852 env
->hflags
&= ~HF_LMA_MASK
;
4853 if (env
->efer
& MSR_EFER_LMA
)
4854 env
->hflags
|= HF_LMA_MASK
;
4857 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4858 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4859 CC_OP
= CC_OP_EFLAGS
;
4860 CC_DST
= 0xffffffff;
4862 SVM_LOAD_SEG(env
->vm_vmcb
, ES
, es
);
4863 SVM_LOAD_SEG(env
->vm_vmcb
, CS
, cs
);
4864 SVM_LOAD_SEG(env
->vm_vmcb
, SS
, ss
);
4865 SVM_LOAD_SEG(env
->vm_vmcb
, DS
, ds
);
4867 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4869 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4870 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4871 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4872 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4873 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4875 /* FIXME: guest state consistency checks */
4877 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4878 case TLB_CONTROL_DO_NOTHING
:
4880 case TLB_CONTROL_FLUSH_ALL_ASID
:
4881 /* FIXME: this is not 100% correct but should work for now */
4888 /* maybe we need to inject an event */
4889 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4890 if (event_inj
& SVM_EVTINJ_VALID
) {
4891 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4892 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4893 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4894 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4896 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4897 fprintf(logfile
, "Injecting(%#hx): ", valid_err
);
4898 /* FIXME: need to implement valid_err */
4899 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
4900 case SVM_EVTINJ_TYPE_INTR
:
4901 env
->exception_index
= vector
;
4902 env
->error_code
= event_inj_err
;
4903 env
->exception_is_int
= 0;
4904 env
->exception_next_eip
= -1;
4905 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4906 fprintf(logfile
, "INTR");
4908 case SVM_EVTINJ_TYPE_NMI
:
4909 env
->exception_index
= vector
;
4910 env
->error_code
= event_inj_err
;
4911 env
->exception_is_int
= 0;
4912 env
->exception_next_eip
= EIP
;
4913 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4914 fprintf(logfile
, "NMI");
4916 case SVM_EVTINJ_TYPE_EXEPT
:
4917 env
->exception_index
= vector
;
4918 env
->error_code
= event_inj_err
;
4919 env
->exception_is_int
= 0;
4920 env
->exception_next_eip
= -1;
4921 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4922 fprintf(logfile
, "EXEPT");
4924 case SVM_EVTINJ_TYPE_SOFT
:
4925 env
->exception_index
= vector
;
4926 env
->error_code
= event_inj_err
;
4927 env
->exception_is_int
= 1;
4928 env
->exception_next_eip
= EIP
;
4929 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4930 fprintf(logfile
, "SOFT");
4933 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4934 fprintf(logfile
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
4936 if ((int_ctl
& V_IRQ_MASK
) || (env
->intercept
& INTERCEPT_VINTR
)) {
4937 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4943 void helper_vmmcall(void)
4945 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4946 fprintf(logfile
,"vmmcall!\n");
4949 void helper_vmload(void)
4953 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4954 fprintf(logfile
,"vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4955 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4956 env
->segs
[R_FS
].base
);
4958 SVM_LOAD_SEG2(addr
, segs
[R_FS
], fs
);
4959 SVM_LOAD_SEG2(addr
, segs
[R_GS
], gs
);
4960 SVM_LOAD_SEG2(addr
, tr
, tr
);
4961 SVM_LOAD_SEG2(addr
, ldt
, ldtr
);
4963 #ifdef TARGET_X86_64
4964 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
4965 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
4966 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
4967 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
4969 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
4970 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
4971 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
4972 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
4975 void helper_vmsave(void)
4979 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4980 fprintf(logfile
,"vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4981 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4982 env
->segs
[R_FS
].base
);
4984 SVM_SAVE_SEG(addr
, segs
[R_FS
], fs
);
4985 SVM_SAVE_SEG(addr
, segs
[R_GS
], gs
);
4986 SVM_SAVE_SEG(addr
, tr
, tr
);
4987 SVM_SAVE_SEG(addr
, ldt
, ldtr
);
4989 #ifdef TARGET_X86_64
4990 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
4991 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
4992 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
4993 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
4995 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
4996 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
4997 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
4998 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5001 void helper_skinit(void)
5003 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5004 fprintf(logfile
,"skinit!\n");
5007 void helper_invlpga(void)
5012 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5015 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5016 if (INTERCEPTEDw(_cr_read
, (1 << (type
- SVM_EXIT_READ_CR0
)))) {
5017 helper_vmexit(type
, param
);
5020 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 8:
5021 if (INTERCEPTEDw(_dr_read
, (1 << (type
- SVM_EXIT_READ_DR0
)))) {
5022 helper_vmexit(type
, param
);
5025 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5026 if (INTERCEPTEDw(_cr_write
, (1 << (type
- SVM_EXIT_WRITE_CR0
)))) {
5027 helper_vmexit(type
, param
);
5030 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 8:
5031 if (INTERCEPTEDw(_dr_write
, (1 << (type
- SVM_EXIT_WRITE_DR0
)))) {
5032 helper_vmexit(type
, param
);
5035 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 16:
5036 if (INTERCEPTEDl(_exceptions
, (1 << (type
- SVM_EXIT_EXCP_BASE
)))) {
5037 helper_vmexit(type
, param
);
5044 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT
)) {
5045 /* FIXME: this should be read in at vmrun (faster this way?) */
5046 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5048 switch((uint32_t)ECX
) {
5053 case 0xc0000000 ... 0xc0001fff:
5054 t0
= (8192 + ECX
- 0xc0000000) * 2;
5058 case 0xc0010000 ... 0xc0011fff:
5059 t0
= (16384 + ECX
- 0xc0010000) * 2;
5064 helper_vmexit(type
, param
);
5069 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5070 helper_vmexit(type
, param
);
5074 if (INTERCEPTED((1ULL << ((type
- SVM_EXIT_INTR
) + INTERCEPT_INTR
)))) {
5075 helper_vmexit(type
, param
);
5081 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5082 uint32_t next_eip_addend
)
5084 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT
)) {
5085 /* FIXME: this should be read in at vmrun (faster this way?) */
5086 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5087 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5088 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5090 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5091 env
->eip
+ next_eip_addend
);
5092 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5097 /* Note: currently only 32 bits of exit_code are used */
5098 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5102 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5103 fprintf(logfile
,"vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5104 exit_code
, exit_info_1
,
5105 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5108 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5109 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5110 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5112 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5115 /* Save the VM state in the vmcb */
5116 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_ES
], es
);
5117 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_CS
], cs
);
5118 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_SS
], ss
);
5119 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_DS
], ds
);
5121 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5122 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5124 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5125 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5127 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5128 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5129 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5130 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5131 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5133 if ((int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
))) & V_INTR_MASKING_MASK
) {
5134 int_ctl
&= ~V_TPR_MASK
;
5135 int_ctl
|= env
->cr
[8] & V_TPR_MASK
;
5136 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5139 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5140 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5141 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5142 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5143 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5144 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5145 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5147 /* Reload the host state from vm_hsave */
5148 env
->hflags
&= ~HF_HIF_MASK
;
5150 env
->intercept_exceptions
= 0;
5151 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5153 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5154 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5156 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5157 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5159 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5160 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5161 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5162 if (int_ctl
& V_INTR_MASKING_MASK
) {
5163 env
->cr
[8] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
));
5164 cpu_set_apic_tpr(env
, env
->cr
[8]);
5166 /* we need to set the efer after the crs so the hidden flags get set properly */
5167 #ifdef TARGET_X86_64
5168 env
->efer
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
));
5169 env
->hflags
&= ~HF_LMA_MASK
;
5170 if (env
->efer
& MSR_EFER_LMA
)
5171 env
->hflags
|= HF_LMA_MASK
;
5175 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5176 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5177 CC_OP
= CC_OP_EFLAGS
;
5179 SVM_LOAD_SEG(env
->vm_hsave
, ES
, es
);
5180 SVM_LOAD_SEG(env
->vm_hsave
, CS
, cs
);
5181 SVM_LOAD_SEG(env
->vm_hsave
, SS
, ss
);
5182 SVM_LOAD_SEG(env
->vm_hsave
, DS
, ds
);
5184 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5185 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5186 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5188 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5189 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5192 cpu_x86_set_cpl(env
, 0);
5193 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5194 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5197 /* FIXME: Resets the current ASID register to zero (host ASID). */
5199 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5201 /* Clears the TSC_OFFSET inside the processor. */
5203 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5204 from the page table indicated the host's CR3. If the PDPEs contain
5205 illegal state, the processor causes a shutdown. */
5207 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5208 env
->cr
[0] |= CR0_PE_MASK
;
5209 env
->eflags
&= ~VM_MASK
;
5211 /* Disables all breakpoints in the host DR7 register. */
5213 /* Checks the reloaded host state for consistency. */
5215 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5216 host's code segment or non-canonical (in the case of long mode), a
5217 #GP fault is delivered inside the host.) */
5219 /* remove any pending exception */
5220 env
->exception_index
= -1;
5221 env
->error_code
= 0;
5222 env
->old_exception
= -1;
5230 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5231 void helper_enter_mmx(void)
5234 *(uint32_t *)(env
->fptags
) = 0;
5235 *(uint32_t *)(env
->fptags
+ 4) = 0;
5238 void helper_emms(void)
5240 /* set to empty state */
5241 *(uint32_t *)(env
->fptags
) = 0x01010101;
5242 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5246 void helper_movq(uint64_t *d
, uint64_t *s
)
5252 #include "ops_sse.h"
5255 #include "ops_sse.h"
5258 #include "helper_template.h"
5262 #include "helper_template.h"
5266 #include "helper_template.h"
5269 #ifdef TARGET_X86_64
5272 #include "helper_template.h"
5277 /* bit operations */
5278 target_ulong
helper_bsf(target_ulong t0
)
5285 while ((res
& 1) == 0) {
5292 target_ulong
helper_bsr(target_ulong t0
)
5295 target_ulong res
, mask
;
5298 count
= TARGET_LONG_BITS
- 1;
5299 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5300 while ((res
& mask
) == 0) {
5308 static int compute_all_eflags(void)
5313 static int compute_c_eflags(void)
5315 return CC_SRC
& CC_C
;
5318 CCTable cc_table
[CC_OP_NB
] = {
5319 [CC_OP_DYNAMIC
] = { /* should never happen */ },
5321 [CC_OP_EFLAGS
] = { compute_all_eflags
, compute_c_eflags
},
5323 [CC_OP_MULB
] = { compute_all_mulb
, compute_c_mull
},
5324 [CC_OP_MULW
] = { compute_all_mulw
, compute_c_mull
},
5325 [CC_OP_MULL
] = { compute_all_mull
, compute_c_mull
},
5327 [CC_OP_ADDB
] = { compute_all_addb
, compute_c_addb
},
5328 [CC_OP_ADDW
] = { compute_all_addw
, compute_c_addw
},
5329 [CC_OP_ADDL
] = { compute_all_addl
, compute_c_addl
},
5331 [CC_OP_ADCB
] = { compute_all_adcb
, compute_c_adcb
},
5332 [CC_OP_ADCW
] = { compute_all_adcw
, compute_c_adcw
},
5333 [CC_OP_ADCL
] = { compute_all_adcl
, compute_c_adcl
},
5335 [CC_OP_SUBB
] = { compute_all_subb
, compute_c_subb
},
5336 [CC_OP_SUBW
] = { compute_all_subw
, compute_c_subw
},
5337 [CC_OP_SUBL
] = { compute_all_subl
, compute_c_subl
},
5339 [CC_OP_SBBB
] = { compute_all_sbbb
, compute_c_sbbb
},
5340 [CC_OP_SBBW
] = { compute_all_sbbw
, compute_c_sbbw
},
5341 [CC_OP_SBBL
] = { compute_all_sbbl
, compute_c_sbbl
},
5343 [CC_OP_LOGICB
] = { compute_all_logicb
, compute_c_logicb
},
5344 [CC_OP_LOGICW
] = { compute_all_logicw
, compute_c_logicw
},
5345 [CC_OP_LOGICL
] = { compute_all_logicl
, compute_c_logicl
},
5347 [CC_OP_INCB
] = { compute_all_incb
, compute_c_incl
},
5348 [CC_OP_INCW
] = { compute_all_incw
, compute_c_incl
},
5349 [CC_OP_INCL
] = { compute_all_incl
, compute_c_incl
},
5351 [CC_OP_DECB
] = { compute_all_decb
, compute_c_incl
},
5352 [CC_OP_DECW
] = { compute_all_decw
, compute_c_incl
},
5353 [CC_OP_DECL
] = { compute_all_decl
, compute_c_incl
},
5355 [CC_OP_SHLB
] = { compute_all_shlb
, compute_c_shlb
},
5356 [CC_OP_SHLW
] = { compute_all_shlw
, compute_c_shlw
},
5357 [CC_OP_SHLL
] = { compute_all_shll
, compute_c_shll
},
5359 [CC_OP_SARB
] = { compute_all_sarb
, compute_c_sarl
},
5360 [CC_OP_SARW
] = { compute_all_sarw
, compute_c_sarl
},
5361 [CC_OP_SARL
] = { compute_all_sarl
, compute_c_sarl
},
5363 #ifdef TARGET_X86_64
5364 [CC_OP_MULQ
] = { compute_all_mulq
, compute_c_mull
},
5366 [CC_OP_ADDQ
] = { compute_all_addq
, compute_c_addq
},
5368 [CC_OP_ADCQ
] = { compute_all_adcq
, compute_c_adcq
},
5370 [CC_OP_SUBQ
] = { compute_all_subq
, compute_c_subq
},
5372 [CC_OP_SBBQ
] = { compute_all_sbbq
, compute_c_sbbq
},
5374 [CC_OP_LOGICQ
] = { compute_all_logicq
, compute_c_logicq
},
5376 [CC_OP_INCQ
] = { compute_all_incq
, compute_c_incl
},
5378 [CC_OP_DECQ
] = { compute_all_decq
, compute_c_incl
},
5380 [CC_OP_SHLQ
] = { compute_all_shlq
, compute_c_shlq
},
5382 [CC_OP_SARQ
] = { compute_all_sarq
, compute_c_sarl
},