4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define CPU_NO_GLOBAL_REGS
22 #include "host-utils.h"
27 #define raise_exception_err(a, b)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
35 const uint8_t parity_table
[256] = {
36 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
41 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
71 const uint8_t rclw_table
[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
79 const uint8_t rclb_table
[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
86 const CPU86_LDouble f15rk
[7] =
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
97 /* broken thread support */
99 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
101 void helper_lock(void)
103 spin_lock(&global_cpu_lock
);
106 void helper_unlock(void)
108 spin_unlock(&global_cpu_lock
);
111 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
113 load_eflags(t0
, update_mask
);
116 target_ulong
helper_read_eflags(void)
119 eflags
= cc_table
[CC_OP
].compute_all();
120 eflags
|= (DF
& DF_MASK
);
121 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
125 /* return non zero if error */
126 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
137 index
= selector
& ~7;
138 if ((index
+ 7) > dt
->limit
)
140 ptr
= dt
->base
+ index
;
141 *e1_ptr
= ldl_kernel(ptr
);
142 *e2_ptr
= ldl_kernel(ptr
+ 4);
146 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
149 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
150 if (e2
& DESC_G_MASK
)
151 limit
= (limit
<< 12) | 0xfff;
155 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
157 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
160 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
162 sc
->base
= get_seg_base(e1
, e2
);
163 sc
->limit
= get_seg_limit(e1
, e2
);
167 /* init the segment cache in vm86 mode. */
168 static inline void load_seg_vm(int seg
, int selector
)
171 cpu_x86_load_seg_cache(env
, seg
, selector
,
172 (selector
<< 4), 0xffff, 0);
175 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
176 uint32_t *esp_ptr
, int dpl
)
178 int type
, index
, shift
;
183 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
184 for(i
=0;i
<env
->tr
.limit
;i
++) {
185 printf("%02x ", env
->tr
.base
[i
]);
186 if ((i
& 7) == 7) printf("\n");
192 if (!(env
->tr
.flags
& DESC_P_MASK
))
193 cpu_abort(env
, "invalid tss");
194 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
196 cpu_abort(env
, "invalid tss type");
198 index
= (dpl
* 4 + 2) << shift
;
199 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
200 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
202 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
203 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
205 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
206 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
210 /* XXX: merge with load_seg() */
211 static void tss_load_seg(int seg_reg
, int selector
)
216 if ((selector
& 0xfffc) != 0) {
217 if (load_segment(&e1
, &e2
, selector
) != 0)
218 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
219 if (!(e2
& DESC_S_MASK
))
220 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
222 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
223 cpl
= env
->hflags
& HF_CPL_MASK
;
224 if (seg_reg
== R_CS
) {
225 if (!(e2
& DESC_CS_MASK
))
226 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
227 /* XXX: is it correct ? */
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
232 } else if (seg_reg
== R_SS
) {
233 /* SS must be writable data */
234 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
235 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
236 if (dpl
!= cpl
|| dpl
!= rpl
)
237 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
239 /* not readable code */
240 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
241 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
244 if (dpl
< cpl
|| dpl
< rpl
)
245 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
248 if (!(e2
& DESC_P_MASK
))
249 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
250 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
251 get_seg_base(e1
, e2
),
252 get_seg_limit(e1
, e2
),
255 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
256 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
260 #define SWITCH_TSS_JMP 0
261 #define SWITCH_TSS_IRET 1
262 #define SWITCH_TSS_CALL 2
264 /* XXX: restore CPU state in registers (PowerPC case) */
265 static void switch_tss(int tss_selector
,
266 uint32_t e1
, uint32_t e2
, int source
,
269 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
270 target_ulong tss_base
;
271 uint32_t new_regs
[8], new_segs
[6];
272 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
273 uint32_t old_eflags
, eflags_mask
;
278 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
280 if (loglevel
& CPU_LOG_PCALL
)
281 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
284 /* if task gate, we read the TSS segment and we load it */
286 if (!(e2
& DESC_P_MASK
))
287 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
288 tss_selector
= e1
>> 16;
289 if (tss_selector
& 4)
290 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
291 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
292 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
293 if (e2
& DESC_S_MASK
)
294 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
295 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
297 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
300 if (!(e2
& DESC_P_MASK
))
301 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
307 tss_limit
= get_seg_limit(e1
, e2
);
308 tss_base
= get_seg_base(e1
, e2
);
309 if ((tss_selector
& 4) != 0 ||
310 tss_limit
< tss_limit_max
)
311 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
312 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
314 old_tss_limit_max
= 103;
316 old_tss_limit_max
= 43;
318 /* read all the registers from the new TSS */
321 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
322 new_eip
= ldl_kernel(tss_base
+ 0x20);
323 new_eflags
= ldl_kernel(tss_base
+ 0x24);
324 for(i
= 0; i
< 8; i
++)
325 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
326 for(i
= 0; i
< 6; i
++)
327 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
328 new_ldt
= lduw_kernel(tss_base
+ 0x60);
329 new_trap
= ldl_kernel(tss_base
+ 0x64);
333 new_eip
= lduw_kernel(tss_base
+ 0x0e);
334 new_eflags
= lduw_kernel(tss_base
+ 0x10);
335 for(i
= 0; i
< 8; i
++)
336 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
337 for(i
= 0; i
< 4; i
++)
338 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
339 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
350 v1
= ldub_kernel(env
->tr
.base
);
351 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
352 stb_kernel(env
->tr
.base
, v1
);
353 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
355 /* clear busy bit (it is restartable) */
356 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
359 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
360 e2
= ldl_kernel(ptr
+ 4);
361 e2
&= ~DESC_TSS_BUSY_MASK
;
362 stl_kernel(ptr
+ 4, e2
);
364 old_eflags
= compute_eflags();
365 if (source
== SWITCH_TSS_IRET
)
366 old_eflags
&= ~NT_MASK
;
368 /* save the current state in the old TSS */
371 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
372 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
373 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
374 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
375 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
376 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
377 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
378 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
379 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
380 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
381 for(i
= 0; i
< 6; i
++)
382 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
385 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
386 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
387 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
388 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
389 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
390 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
391 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
392 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
393 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
394 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
395 for(i
= 0; i
< 4; i
++)
396 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
399 /* now if an exception occurs, it will occurs in the next task
402 if (source
== SWITCH_TSS_CALL
) {
403 stw_kernel(tss_base
, env
->tr
.selector
);
404 new_eflags
|= NT_MASK
;
408 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
411 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
412 e2
= ldl_kernel(ptr
+ 4);
413 e2
|= DESC_TSS_BUSY_MASK
;
414 stl_kernel(ptr
+ 4, e2
);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env
->cr
[0] |= CR0_TS_MASK
;
420 env
->hflags
|= HF_TS_MASK
;
421 env
->tr
.selector
= tss_selector
;
422 env
->tr
.base
= tss_base
;
423 env
->tr
.limit
= tss_limit
;
424 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
426 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
427 cpu_x86_update_cr3(env
, new_cr3
);
430 /* load all registers without an exception, then reload them with
431 possible exception */
433 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
434 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
436 eflags_mask
&= 0xffff;
437 load_eflags(new_eflags
, eflags_mask
);
438 /* XXX: what to do in 16 bit case ? */
447 if (new_eflags
& VM_MASK
) {
448 for(i
= 0; i
< 6; i
++)
449 load_seg_vm(i
, new_segs
[i
]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env
, 3);
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i
= 0; i
< 6; i
++)
457 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
460 env
->ldt
.selector
= new_ldt
& ~4;
467 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
469 if ((new_ldt
& 0xfffc) != 0) {
471 index
= new_ldt
& ~7;
472 if ((index
+ 7) > dt
->limit
)
473 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
474 ptr
= dt
->base
+ index
;
475 e1
= ldl_kernel(ptr
);
476 e2
= ldl_kernel(ptr
+ 4);
477 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
479 if (!(e2
& DESC_P_MASK
))
480 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
481 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
484 /* load the segments */
485 if (!(new_eflags
& VM_MASK
)) {
486 tss_load_seg(R_CS
, new_segs
[R_CS
]);
487 tss_load_seg(R_SS
, new_segs
[R_SS
]);
488 tss_load_seg(R_ES
, new_segs
[R_ES
]);
489 tss_load_seg(R_DS
, new_segs
[R_DS
]);
490 tss_load_seg(R_FS
, new_segs
[R_FS
]);
491 tss_load_seg(R_GS
, new_segs
[R_GS
]);
494 /* check that EIP is in the CS segment limits */
495 if (new_eip
> env
->segs
[R_CS
].limit
) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF
, 0);
501 /* check if Port I/O is allowed in TSS */
502 static inline void check_io(int addr
, int size
)
504 int io_offset
, val
, mask
;
506 /* TSS must be a valid 32 bit one */
507 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
508 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
511 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
512 io_offset
+= (addr
>> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset
+ 1) > env
->tr
.limit
)
516 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
518 mask
= (1 << size
) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val
& mask
) != 0) {
522 raise_exception_err(EXCP0D_GPF
, 0);
526 void helper_check_iob(uint32_t t0
)
531 void helper_check_iow(uint32_t t0
)
536 void helper_check_iol(uint32_t t0
)
541 void helper_outb(uint32_t port
, uint32_t data
)
543 cpu_outb(env
, port
, data
& 0xff);
546 target_ulong
helper_inb(uint32_t port
)
548 return cpu_inb(env
, port
);
551 void helper_outw(uint32_t port
, uint32_t data
)
553 cpu_outw(env
, port
, data
& 0xffff);
556 target_ulong
helper_inw(uint32_t port
)
558 return cpu_inw(env
, port
);
561 void helper_outl(uint32_t port
, uint32_t data
)
563 cpu_outl(env
, port
, data
);
566 target_ulong
helper_inl(uint32_t port
)
568 return cpu_inl(env
, port
);
571 static inline unsigned int get_sp_mask(unsigned int e2
)
573 if (e2
& DESC_B_MASK
)
580 #define SET_ESP(val, sp_mask)\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
590 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
593 /* XXX: add a is_user flag to have proper security support */
594 #define PUSHW(ssp, sp, sp_mask, val)\
597 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
600 #define PUSHL(ssp, sp, sp_mask, val)\
603 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
606 #define POPW(ssp, sp, sp_mask, val)\
608 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
612 #define POPL(ssp, sp, sp_mask, val)\
614 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
618 /* protected mode interrupt */
619 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
620 unsigned int next_eip
, int is_hw
)
623 target_ulong ptr
, ssp
;
624 int type
, dpl
, selector
, ss_dpl
, cpl
;
625 int has_error_code
, new_stack
, shift
;
626 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
627 uint32_t old_eip
, sp_mask
;
630 if (!is_int
&& !is_hw
) {
649 if (intno
* 8 + 7 > dt
->limit
)
650 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
651 ptr
= dt
->base
+ intno
* 8;
652 e1
= ldl_kernel(ptr
);
653 e2
= ldl_kernel(ptr
+ 4);
654 /* check gate type */
655 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
657 case 5: /* task gate */
658 /* must do that check here to return the correct error code */
659 if (!(e2
& DESC_P_MASK
))
660 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
661 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
662 if (has_error_code
) {
665 /* push the error code */
666 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
668 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
672 esp
= (ESP
- (2 << shift
)) & mask
;
673 ssp
= env
->segs
[R_SS
].base
+ esp
;
675 stl_kernel(ssp
, error_code
);
677 stw_kernel(ssp
, error_code
);
681 case 6: /* 286 interrupt gate */
682 case 7: /* 286 trap gate */
683 case 14: /* 386 interrupt gate */
684 case 15: /* 386 trap gate */
687 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
690 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
691 cpl
= env
->hflags
& HF_CPL_MASK
;
692 /* check privledge if software int */
693 if (is_int
&& dpl
< cpl
)
694 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
695 /* check valid bit */
696 if (!(e2
& DESC_P_MASK
))
697 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
699 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
700 if ((selector
& 0xfffc) == 0)
701 raise_exception_err(EXCP0D_GPF
, 0);
703 if (load_segment(&e1
, &e2
, selector
) != 0)
704 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
705 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
706 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
707 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
709 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
710 if (!(e2
& DESC_P_MASK
))
711 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
712 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
713 /* to inner privilege */
714 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
715 if ((ss
& 0xfffc) == 0)
716 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
718 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
719 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
720 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
721 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
723 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
724 if (!(ss_e2
& DESC_S_MASK
) ||
725 (ss_e2
& DESC_CS_MASK
) ||
726 !(ss_e2
& DESC_W_MASK
))
727 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
728 if (!(ss_e2
& DESC_P_MASK
))
729 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
731 sp_mask
= get_sp_mask(ss_e2
);
732 ssp
= get_seg_base(ss_e1
, ss_e2
);
733 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
734 /* to same privilege */
735 if (env
->eflags
& VM_MASK
)
736 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
738 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
739 ssp
= env
->segs
[R_SS
].base
;
743 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
744 new_stack
= 0; /* avoid warning */
745 sp_mask
= 0; /* avoid warning */
746 ssp
= 0; /* avoid warning */
747 esp
= 0; /* avoid warning */
753 /* XXX: check that enough room is available */
754 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
755 if (env
->eflags
& VM_MASK
)
761 if (env
->eflags
& VM_MASK
) {
762 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
763 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
764 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
765 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
767 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
768 PUSHL(ssp
, esp
, sp_mask
, ESP
);
770 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
771 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
772 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
773 if (has_error_code
) {
774 PUSHL(ssp
, esp
, sp_mask
, error_code
);
778 if (env
->eflags
& VM_MASK
) {
779 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
780 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
781 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
782 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
784 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
785 PUSHW(ssp
, esp
, sp_mask
, ESP
);
787 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
788 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
789 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
790 if (has_error_code
) {
791 PUSHW(ssp
, esp
, sp_mask
, error_code
);
796 if (env
->eflags
& VM_MASK
) {
797 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
802 ss
= (ss
& ~3) | dpl
;
803 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
804 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
806 SET_ESP(esp
, sp_mask
);
808 selector
= (selector
& ~3) | dpl
;
809 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
810 get_seg_base(e1
, e2
),
811 get_seg_limit(e1
, e2
),
813 cpu_x86_set_cpl(env
, dpl
);
816 /* interrupt gate clear IF mask */
817 if ((type
& 1) == 0) {
818 env
->eflags
&= ~IF_MASK
;
820 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
825 #define PUSHQ(sp, val)\
828 stq_kernel(sp, (val));\
831 #define POPQ(sp, val)\
833 val = ldq_kernel(sp);\
837 static inline target_ulong
get_rsp_from_tss(int level
)
842 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
843 env
->tr
.base
, env
->tr
.limit
);
846 if (!(env
->tr
.flags
& DESC_P_MASK
))
847 cpu_abort(env
, "invalid tss");
848 index
= 8 * level
+ 4;
849 if ((index
+ 7) > env
->tr
.limit
)
850 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
851 return ldq_kernel(env
->tr
.base
+ index
);
854 /* 64 bit interrupt */
855 static void do_interrupt64(int intno
, int is_int
, int error_code
,
856 target_ulong next_eip
, int is_hw
)
860 int type
, dpl
, selector
, cpl
, ist
;
861 int has_error_code
, new_stack
;
862 uint32_t e1
, e2
, e3
, ss
;
863 target_ulong old_eip
, esp
, offset
;
866 if (!is_int
&& !is_hw
) {
885 if (intno
* 16 + 15 > dt
->limit
)
886 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
887 ptr
= dt
->base
+ intno
* 16;
888 e1
= ldl_kernel(ptr
);
889 e2
= ldl_kernel(ptr
+ 4);
890 e3
= ldl_kernel(ptr
+ 8);
891 /* check gate type */
892 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
894 case 14: /* 386 interrupt gate */
895 case 15: /* 386 trap gate */
898 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
901 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
902 cpl
= env
->hflags
& HF_CPL_MASK
;
903 /* check privledge if software int */
904 if (is_int
&& dpl
< cpl
)
905 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
906 /* check valid bit */
907 if (!(e2
& DESC_P_MASK
))
908 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
910 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
912 if ((selector
& 0xfffc) == 0)
913 raise_exception_err(EXCP0D_GPF
, 0);
915 if (load_segment(&e1
, &e2
, selector
) != 0)
916 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
917 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
918 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
919 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
921 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
922 if (!(e2
& DESC_P_MASK
))
923 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
924 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
925 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
926 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
927 /* to inner privilege */
929 esp
= get_rsp_from_tss(ist
+ 3);
931 esp
= get_rsp_from_tss(dpl
);
932 esp
&= ~0xfLL
; /* align stack */
935 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
936 /* to same privilege */
937 if (env
->eflags
& VM_MASK
)
938 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
941 esp
= get_rsp_from_tss(ist
+ 3);
944 esp
&= ~0xfLL
; /* align stack */
947 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
948 new_stack
= 0; /* avoid warning */
949 esp
= 0; /* avoid warning */
952 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
954 PUSHQ(esp
, compute_eflags());
955 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
957 if (has_error_code
) {
958 PUSHQ(esp
, error_code
);
963 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
967 selector
= (selector
& ~3) | dpl
;
968 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
969 get_seg_base(e1
, e2
),
970 get_seg_limit(e1
, e2
),
972 cpu_x86_set_cpl(env
, dpl
);
975 /* interrupt gate clear IF mask */
976 if ((type
& 1) == 0) {
977 env
->eflags
&= ~IF_MASK
;
979 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
983 #if defined(CONFIG_USER_ONLY)
984 void helper_syscall(int next_eip_addend
)
986 env
->exception_index
= EXCP_SYSCALL
;
987 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
991 void helper_syscall(int next_eip_addend
)
995 if (!(env
->efer
& MSR_EFER_SCE
)) {
996 raise_exception_err(EXCP06_ILLOP
, 0);
998 selector
= (env
->star
>> 32) & 0xffff;
1000 if (env
->hflags
& HF_LMA_MASK
) {
1003 ECX
= env
->eip
+ next_eip_addend
;
1004 env
->regs
[11] = compute_eflags();
1006 code64
= env
->hflags
& HF_CS64_MASK
;
1008 cpu_x86_set_cpl(env
, 0);
1009 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1011 DESC_G_MASK
| DESC_P_MASK
|
1013 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1014 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1016 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1018 DESC_W_MASK
| DESC_A_MASK
);
1019 env
->eflags
&= ~env
->fmask
;
1020 load_eflags(env
->eflags
, 0);
1022 env
->eip
= env
->lstar
;
1024 env
->eip
= env
->cstar
;
1028 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1030 cpu_x86_set_cpl(env
, 0);
1031 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1033 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1035 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1036 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1038 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1040 DESC_W_MASK
| DESC_A_MASK
);
1041 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1042 env
->eip
= (uint32_t)env
->star
;
1047 void helper_sysret(int dflag
)
1051 if (!(env
->efer
& MSR_EFER_SCE
)) {
1052 raise_exception_err(EXCP06_ILLOP
, 0);
1054 cpl
= env
->hflags
& HF_CPL_MASK
;
1055 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1056 raise_exception_err(EXCP0D_GPF
, 0);
1058 selector
= (env
->star
>> 48) & 0xffff;
1059 #ifdef TARGET_X86_64
1060 if (env
->hflags
& HF_LMA_MASK
) {
1062 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1064 DESC_G_MASK
| DESC_P_MASK
|
1065 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1066 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1070 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1072 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1073 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1074 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1075 env
->eip
= (uint32_t)ECX
;
1077 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1079 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1080 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1081 DESC_W_MASK
| DESC_A_MASK
);
1082 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1083 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1084 cpu_x86_set_cpl(env
, 3);
1088 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1090 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1091 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1092 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1093 env
->eip
= (uint32_t)ECX
;
1094 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1096 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1097 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1098 DESC_W_MASK
| DESC_A_MASK
);
1099 env
->eflags
|= IF_MASK
;
1100 cpu_x86_set_cpl(env
, 3);
1103 if (kqemu_is_ok(env
)) {
1104 if (env
->hflags
& HF_LMA_MASK
)
1105 CC_OP
= CC_OP_EFLAGS
;
1106 env
->exception_index
= -1;
1112 /* real mode interrupt */
1113 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1114 unsigned int next_eip
)
1117 target_ulong ptr
, ssp
;
1119 uint32_t offset
, esp
;
1120 uint32_t old_cs
, old_eip
;
1122 /* real mode (simpler !) */
1124 if (intno
* 4 + 3 > dt
->limit
)
1125 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1126 ptr
= dt
->base
+ intno
* 4;
1127 offset
= lduw_kernel(ptr
);
1128 selector
= lduw_kernel(ptr
+ 2);
1130 ssp
= env
->segs
[R_SS
].base
;
1135 old_cs
= env
->segs
[R_CS
].selector
;
1136 /* XXX: use SS segment size ? */
1137 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1138 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1139 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1141 /* update processor state */
1142 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1144 env
->segs
[R_CS
].selector
= selector
;
1145 env
->segs
[R_CS
].base
= (selector
<< 4);
1146 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1149 /* fake user mode interrupt */
1150 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1151 target_ulong next_eip
)
1155 int dpl
, cpl
, shift
;
1159 if (env
->hflags
& HF_LMA_MASK
) {
1164 ptr
= dt
->base
+ (intno
<< shift
);
1165 e2
= ldl_kernel(ptr
+ 4);
1167 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1168 cpl
= env
->hflags
& HF_CPL_MASK
;
1169 /* check privledge if software int */
1170 if (is_int
&& dpl
< cpl
)
1171 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1173 /* Since we emulate only user space, we cannot do more than
1174 exiting the emulation with the suitable exception and error
1181 * Begin execution of an interruption. is_int is TRUE if coming from
1182 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183 * instruction. It is only relevant if is_int is TRUE.
1185 void do_interrupt(int intno
, int is_int
, int error_code
,
1186 target_ulong next_eip
, int is_hw
)
1188 if (loglevel
& CPU_LOG_INT
) {
1189 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1191 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1192 count
, intno
, error_code
, is_int
,
1193 env
->hflags
& HF_CPL_MASK
,
1194 env
->segs
[R_CS
].selector
, EIP
,
1195 (int)env
->segs
[R_CS
].base
+ EIP
,
1196 env
->segs
[R_SS
].selector
, ESP
);
1197 if (intno
== 0x0e) {
1198 fprintf(logfile
, " CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1200 fprintf(logfile
, " EAX=" TARGET_FMT_lx
, EAX
);
1202 fprintf(logfile
, "\n");
1203 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1208 fprintf(logfile
, " code=");
1209 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1210 for(i
= 0; i
< 16; i
++) {
1211 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
1213 fprintf(logfile
, "\n");
1219 if (env
->cr
[0] & CR0_PE_MASK
) {
1221 if (env
->hflags
& HF_LMA_MASK
) {
1222 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1226 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1229 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1234 * Check nested exceptions and change to double or triple fault if
1235 * needed. It should only be called, if this is not an interrupt.
1236 * Returns the new exception number.
1238 static int check_exception(int intno
, int *error_code
)
1240 int first_contributory
= env
->old_exception
== 0 ||
1241 (env
->old_exception
>= 10 &&
1242 env
->old_exception
<= 13);
1243 int second_contributory
= intno
== 0 ||
1244 (intno
>= 10 && intno
<= 13);
1246 if (loglevel
& CPU_LOG_INT
)
1247 fprintf(logfile
, "check_exception old: 0x%x new 0x%x\n",
1248 env
->old_exception
, intno
);
1250 if (env
->old_exception
== EXCP08_DBLE
)
1251 cpu_abort(env
, "triple fault");
1253 if ((first_contributory
&& second_contributory
)
1254 || (env
->old_exception
== EXCP0E_PAGE
&&
1255 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1256 intno
= EXCP08_DBLE
;
1260 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1261 (intno
== EXCP08_DBLE
))
1262 env
->old_exception
= intno
;
1268 * Signal an interruption. It is executed in the main CPU loop.
1269 * is_int is TRUE if coming from the int instruction. next_eip is the
1270 * EIP value AFTER the interrupt instruction. It is only relevant if
1273 void raise_interrupt(int intno
, int is_int
, int error_code
,
1274 int next_eip_addend
)
1277 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1278 intno
= check_exception(intno
, &error_code
);
1280 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1283 env
->exception_index
= intno
;
1284 env
->error_code
= error_code
;
1285 env
->exception_is_int
= is_int
;
1286 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1290 /* shortcuts to generate exceptions */
1292 void (raise_exception_err
)(int exception_index
, int error_code
)
1294 raise_interrupt(exception_index
, 0, error_code
, 0);
1297 void raise_exception(int exception_index
)
1299 raise_interrupt(exception_index
, 0, 0, 0);
1304 #if defined(CONFIG_USER_ONLY)
1306 void do_smm_enter(void)
1310 void helper_rsm(void)
1316 #ifdef TARGET_X86_64
1317 #define SMM_REVISION_ID 0x00020064
1319 #define SMM_REVISION_ID 0x00020000
1322 void do_smm_enter(void)
1324 target_ulong sm_state
;
1328 if (loglevel
& CPU_LOG_INT
) {
1329 fprintf(logfile
, "SMM: enter\n");
1330 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1333 env
->hflags
|= HF_SMM_MASK
;
1334 cpu_smm_update(env
);
1336 sm_state
= env
->smbase
+ 0x8000;
1338 #ifdef TARGET_X86_64
1339 for(i
= 0; i
< 6; i
++) {
1341 offset
= 0x7e00 + i
* 16;
1342 stw_phys(sm_state
+ offset
, dt
->selector
);
1343 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1344 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1345 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1348 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1349 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1351 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1352 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1353 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1354 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1356 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1357 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1359 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1360 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1361 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1362 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1364 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1366 stq_phys(sm_state
+ 0x7ff8, EAX
);
1367 stq_phys(sm_state
+ 0x7ff0, ECX
);
1368 stq_phys(sm_state
+ 0x7fe8, EDX
);
1369 stq_phys(sm_state
+ 0x7fe0, EBX
);
1370 stq_phys(sm_state
+ 0x7fd8, ESP
);
1371 stq_phys(sm_state
+ 0x7fd0, EBP
);
1372 stq_phys(sm_state
+ 0x7fc8, ESI
);
1373 stq_phys(sm_state
+ 0x7fc0, EDI
);
1374 for(i
= 8; i
< 16; i
++)
1375 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1376 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1377 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1378 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1379 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1381 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1382 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1383 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1385 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1386 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1388 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1389 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1390 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1391 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1392 stl_phys(sm_state
+ 0x7fec, EDI
);
1393 stl_phys(sm_state
+ 0x7fe8, ESI
);
1394 stl_phys(sm_state
+ 0x7fe4, EBP
);
1395 stl_phys(sm_state
+ 0x7fe0, ESP
);
1396 stl_phys(sm_state
+ 0x7fdc, EBX
);
1397 stl_phys(sm_state
+ 0x7fd8, EDX
);
1398 stl_phys(sm_state
+ 0x7fd4, ECX
);
1399 stl_phys(sm_state
+ 0x7fd0, EAX
);
1400 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1401 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1403 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1404 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1405 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1406 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1408 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1409 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1410 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1411 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1413 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1414 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1416 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1417 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1419 for(i
= 0; i
< 6; i
++) {
1422 offset
= 0x7f84 + i
* 12;
1424 offset
= 0x7f2c + (i
- 3) * 12;
1425 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1426 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1427 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1428 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1430 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1432 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1433 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1435 /* init SMM cpu state */
1437 #ifdef TARGET_X86_64
1439 env
->hflags
&= ~HF_LMA_MASK
;
1441 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1442 env
->eip
= 0x00008000;
1443 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1445 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1446 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1447 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1449 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1451 cpu_x86_update_cr0(env
,
1452 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1453 cpu_x86_update_cr4(env
, 0);
1454 env
->dr
[7] = 0x00000400;
1455 CC_OP
= CC_OP_EFLAGS
;
1458 void helper_rsm(void)
1460 target_ulong sm_state
;
1464 sm_state
= env
->smbase
+ 0x8000;
1465 #ifdef TARGET_X86_64
1466 env
->efer
= ldq_phys(sm_state
+ 0x7ed0);
1467 if (env
->efer
& MSR_EFER_LMA
)
1468 env
->hflags
|= HF_LMA_MASK
;
1470 env
->hflags
&= ~HF_LMA_MASK
;
1472 for(i
= 0; i
< 6; i
++) {
1473 offset
= 0x7e00 + i
* 16;
1474 cpu_x86_load_seg_cache(env
, i
,
1475 lduw_phys(sm_state
+ offset
),
1476 ldq_phys(sm_state
+ offset
+ 8),
1477 ldl_phys(sm_state
+ offset
+ 4),
1478 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1481 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1482 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1484 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1485 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1486 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1487 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1489 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1490 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1492 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1493 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1494 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1495 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1497 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1498 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1499 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1500 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1501 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1502 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1503 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1504 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1505 for(i
= 8; i
< 16; i
++)
1506 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1507 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1508 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1509 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1510 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1511 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1513 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1514 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1515 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1517 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1518 if (val
& 0x20000) {
1519 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1522 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1523 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1524 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1525 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1526 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1527 EDI
= ldl_phys(sm_state
+ 0x7fec);
1528 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1529 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1530 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1531 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1532 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1533 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1534 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1535 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1536 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1538 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1539 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1540 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1541 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1543 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1544 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1545 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1546 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1548 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1549 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1551 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1552 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1554 for(i
= 0; i
< 6; i
++) {
1556 offset
= 0x7f84 + i
* 12;
1558 offset
= 0x7f2c + (i
- 3) * 12;
1559 cpu_x86_load_seg_cache(env
, i
,
1560 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1561 ldl_phys(sm_state
+ offset
+ 8),
1562 ldl_phys(sm_state
+ offset
+ 4),
1563 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1565 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1567 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1568 if (val
& 0x20000) {
1569 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1572 CC_OP
= CC_OP_EFLAGS
;
1573 env
->hflags
&= ~HF_SMM_MASK
;
1574 cpu_smm_update(env
);
1576 if (loglevel
& CPU_LOG_INT
) {
1577 fprintf(logfile
, "SMM: after RSM\n");
1578 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1582 #endif /* !CONFIG_USER_ONLY */
1585 /* division, flags are undefined */
1587 void helper_divb_AL(target_ulong t0
)
1589 unsigned int num
, den
, q
, r
;
1591 num
= (EAX
& 0xffff);
1594 raise_exception(EXCP00_DIVZ
);
1598 raise_exception(EXCP00_DIVZ
);
1600 r
= (num
% den
) & 0xff;
1601 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1604 void helper_idivb_AL(target_ulong t0
)
1611 raise_exception(EXCP00_DIVZ
);
1615 raise_exception(EXCP00_DIVZ
);
1617 r
= (num
% den
) & 0xff;
1618 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1621 void helper_divw_AX(target_ulong t0
)
1623 unsigned int num
, den
, q
, r
;
1625 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1626 den
= (t0
& 0xffff);
1628 raise_exception(EXCP00_DIVZ
);
1632 raise_exception(EXCP00_DIVZ
);
1634 r
= (num
% den
) & 0xffff;
1635 EAX
= (EAX
& ~0xffff) | q
;
1636 EDX
= (EDX
& ~0xffff) | r
;
1639 void helper_idivw_AX(target_ulong t0
)
1643 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1646 raise_exception(EXCP00_DIVZ
);
1649 if (q
!= (int16_t)q
)
1650 raise_exception(EXCP00_DIVZ
);
1652 r
= (num
% den
) & 0xffff;
1653 EAX
= (EAX
& ~0xffff) | q
;
1654 EDX
= (EDX
& ~0xffff) | r
;
1657 void helper_divl_EAX(target_ulong t0
)
1659 unsigned int den
, r
;
1662 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1665 raise_exception(EXCP00_DIVZ
);
1670 raise_exception(EXCP00_DIVZ
);
1675 void helper_idivl_EAX(target_ulong t0
)
1680 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1683 raise_exception(EXCP00_DIVZ
);
1687 if (q
!= (int32_t)q
)
1688 raise_exception(EXCP00_DIVZ
);
1695 /* XXX: exception */
1696 void helper_aam(int base
)
1702 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1706 void helper_aad(int base
)
1710 ah
= (EAX
>> 8) & 0xff;
1711 al
= ((ah
* base
) + al
) & 0xff;
1712 EAX
= (EAX
& ~0xffff) | al
;
1716 void helper_aaa(void)
1722 eflags
= cc_table
[CC_OP
].compute_all();
1725 ah
= (EAX
>> 8) & 0xff;
1727 icarry
= (al
> 0xf9);
1728 if (((al
& 0x0f) > 9 ) || af
) {
1729 al
= (al
+ 6) & 0x0f;
1730 ah
= (ah
+ 1 + icarry
) & 0xff;
1731 eflags
|= CC_C
| CC_A
;
1733 eflags
&= ~(CC_C
| CC_A
);
1736 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1741 void helper_aas(void)
1747 eflags
= cc_table
[CC_OP
].compute_all();
1750 ah
= (EAX
>> 8) & 0xff;
1753 if (((al
& 0x0f) > 9 ) || af
) {
1754 al
= (al
- 6) & 0x0f;
1755 ah
= (ah
- 1 - icarry
) & 0xff;
1756 eflags
|= CC_C
| CC_A
;
1758 eflags
&= ~(CC_C
| CC_A
);
1761 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1766 void helper_daa(void)
1771 eflags
= cc_table
[CC_OP
].compute_all();
1777 if (((al
& 0x0f) > 9 ) || af
) {
1778 al
= (al
+ 6) & 0xff;
1781 if ((al
> 0x9f) || cf
) {
1782 al
= (al
+ 0x60) & 0xff;
1785 EAX
= (EAX
& ~0xff) | al
;
1786 /* well, speed is not an issue here, so we compute the flags by hand */
1787 eflags
|= (al
== 0) << 6; /* zf */
1788 eflags
|= parity_table
[al
]; /* pf */
1789 eflags
|= (al
& 0x80); /* sf */
1794 void helper_das(void)
1796 int al
, al1
, af
, cf
;
1799 eflags
= cc_table
[CC_OP
].compute_all();
1806 if (((al
& 0x0f) > 9 ) || af
) {
1810 al
= (al
- 6) & 0xff;
1812 if ((al1
> 0x99) || cf
) {
1813 al
= (al
- 0x60) & 0xff;
1816 EAX
= (EAX
& ~0xff) | al
;
1817 /* well, speed is not an issue here, so we compute the flags by hand */
1818 eflags
|= (al
== 0) << 6; /* zf */
1819 eflags
|= parity_table
[al
]; /* pf */
1820 eflags
|= (al
& 0x80); /* sf */
1825 void helper_into(int next_eip_addend
)
1828 eflags
= cc_table
[CC_OP
].compute_all();
1829 if (eflags
& CC_O
) {
1830 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1834 void helper_cmpxchg8b(target_ulong a0
)
1839 eflags
= cc_table
[CC_OP
].compute_all();
1841 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1842 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1845 EDX
= (uint32_t)(d
>> 32);
1852 #ifdef TARGET_X86_64
1853 void helper_cmpxchg16b(target_ulong a0
)
1858 eflags
= cc_table
[CC_OP
].compute_all();
1861 if (d0
== EAX
&& d1
== EDX
) {
1874 void helper_single_step(void)
1876 env
->dr
[6] |= 0x4000;
1877 raise_exception(EXCP01_SSTP
);
1880 void helper_cpuid(void)
1884 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1886 index
= (uint32_t)EAX
;
1887 /* test if maximum index reached */
1888 if (index
& 0x80000000) {
1889 if (index
> env
->cpuid_xlevel
)
1890 index
= env
->cpuid_level
;
1892 if (index
> env
->cpuid_level
)
1893 index
= env
->cpuid_level
;
1898 EAX
= env
->cpuid_level
;
1899 EBX
= env
->cpuid_vendor1
;
1900 EDX
= env
->cpuid_vendor2
;
1901 ECX
= env
->cpuid_vendor3
;
1904 EAX
= env
->cpuid_version
;
1905 EBX
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1906 ECX
= env
->cpuid_ext_features
;
1907 EDX
= env
->cpuid_features
;
1910 /* cache info: needed for Pentium Pro compatibility */
1917 EAX
= env
->cpuid_xlevel
;
1918 EBX
= env
->cpuid_vendor1
;
1919 EDX
= env
->cpuid_vendor2
;
1920 ECX
= env
->cpuid_vendor3
;
1923 EAX
= env
->cpuid_features
;
1925 ECX
= env
->cpuid_ext3_features
;
1926 EDX
= env
->cpuid_ext2_features
;
1931 EAX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1932 EBX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1933 ECX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1934 EDX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1937 /* cache info (L1 cache) */
1944 /* cache info (L2 cache) */
1951 /* virtual & phys address size in low 2 bytes. */
1952 /* XXX: This value must match the one used in the MMU code. */
1953 #if defined(TARGET_X86_64)
1954 # if defined(USE_KQEMU)
1955 EAX
= 0x00003020; /* 48 bits virtual, 32 bits physical */
1957 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1958 EAX
= 0x00003028; /* 48 bits virtual, 40 bits physical */
1961 # if defined(USE_KQEMU)
1962 EAX
= 0x00000020; /* 32 bits physical */
1964 EAX
= 0x00000024; /* 36 bits physical */
1978 /* reserved values: zero */
1987 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1990 uint32_t esp_mask
, esp
, ebp
;
1992 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1993 ssp
= env
->segs
[R_SS
].base
;
2002 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
2005 stl(ssp
+ (esp
& esp_mask
), t1
);
2012 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
2015 stw(ssp
+ (esp
& esp_mask
), t1
);
2019 #ifdef TARGET_X86_64
2020 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
2022 target_ulong esp
, ebp
;
2042 stw(esp
, lduw(ebp
));
2050 void helper_lldt(int selector
)
2054 int index
, entry_limit
;
2058 if ((selector
& 0xfffc) == 0) {
2059 /* XXX: NULL selector case: invalid LDT */
2064 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2066 index
= selector
& ~7;
2067 #ifdef TARGET_X86_64
2068 if (env
->hflags
& HF_LMA_MASK
)
2073 if ((index
+ entry_limit
) > dt
->limit
)
2074 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2075 ptr
= dt
->base
+ index
;
2076 e1
= ldl_kernel(ptr
);
2077 e2
= ldl_kernel(ptr
+ 4);
2078 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2079 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2080 if (!(e2
& DESC_P_MASK
))
2081 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2082 #ifdef TARGET_X86_64
2083 if (env
->hflags
& HF_LMA_MASK
) {
2085 e3
= ldl_kernel(ptr
+ 8);
2086 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2087 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2091 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2094 env
->ldt
.selector
= selector
;
2097 void helper_ltr(int selector
)
2101 int index
, type
, entry_limit
;
2105 if ((selector
& 0xfffc) == 0) {
2106 /* NULL selector case: invalid TR */
2112 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2114 index
= selector
& ~7;
2115 #ifdef TARGET_X86_64
2116 if (env
->hflags
& HF_LMA_MASK
)
2121 if ((index
+ entry_limit
) > dt
->limit
)
2122 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2123 ptr
= dt
->base
+ index
;
2124 e1
= ldl_kernel(ptr
);
2125 e2
= ldl_kernel(ptr
+ 4);
2126 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2127 if ((e2
& DESC_S_MASK
) ||
2128 (type
!= 1 && type
!= 9))
2129 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2130 if (!(e2
& DESC_P_MASK
))
2131 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2132 #ifdef TARGET_X86_64
2133 if (env
->hflags
& HF_LMA_MASK
) {
2135 e3
= ldl_kernel(ptr
+ 8);
2136 e4
= ldl_kernel(ptr
+ 12);
2137 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2138 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2139 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2140 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2144 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2146 e2
|= DESC_TSS_BUSY_MASK
;
2147 stl_kernel(ptr
+ 4, e2
);
2149 env
->tr
.selector
= selector
;
2152 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2153 void helper_load_seg(int seg_reg
, int selector
)
2162 cpl
= env
->hflags
& HF_CPL_MASK
;
2163 if ((selector
& 0xfffc) == 0) {
2164 /* null selector case */
2166 #ifdef TARGET_X86_64
2167 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2170 raise_exception_err(EXCP0D_GPF
, 0);
2171 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2178 index
= selector
& ~7;
2179 if ((index
+ 7) > dt
->limit
)
2180 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2181 ptr
= dt
->base
+ index
;
2182 e1
= ldl_kernel(ptr
);
2183 e2
= ldl_kernel(ptr
+ 4);
2185 if (!(e2
& DESC_S_MASK
))
2186 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2188 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2189 if (seg_reg
== R_SS
) {
2190 /* must be writable segment */
2191 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2192 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2193 if (rpl
!= cpl
|| dpl
!= cpl
)
2194 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2196 /* must be readable segment */
2197 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2198 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2200 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2201 /* if not conforming code, test rights */
2202 if (dpl
< cpl
|| dpl
< rpl
)
2203 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2207 if (!(e2
& DESC_P_MASK
)) {
2208 if (seg_reg
== R_SS
)
2209 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2211 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2214 /* set the access bit if not already set */
2215 if (!(e2
& DESC_A_MASK
)) {
2217 stl_kernel(ptr
+ 4, e2
);
2220 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2221 get_seg_base(e1
, e2
),
2222 get_seg_limit(e1
, e2
),
2225 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2226 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2231 /* protected mode jump */
2232 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2233 int next_eip_addend
)
2236 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2237 target_ulong next_eip
;
2239 if ((new_cs
& 0xfffc) == 0)
2240 raise_exception_err(EXCP0D_GPF
, 0);
2241 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2242 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2243 cpl
= env
->hflags
& HF_CPL_MASK
;
2244 if (e2
& DESC_S_MASK
) {
2245 if (!(e2
& DESC_CS_MASK
))
2246 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2247 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2248 if (e2
& DESC_C_MASK
) {
2249 /* conforming code segment */
2251 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2253 /* non conforming code segment */
2256 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2258 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2260 if (!(e2
& DESC_P_MASK
))
2261 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2262 limit
= get_seg_limit(e1
, e2
);
2263 if (new_eip
> limit
&&
2264 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2265 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2266 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2267 get_seg_base(e1
, e2
), limit
, e2
);
2270 /* jump to call or task gate */
2271 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2273 cpl
= env
->hflags
& HF_CPL_MASK
;
2274 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2276 case 1: /* 286 TSS */
2277 case 9: /* 386 TSS */
2278 case 5: /* task gate */
2279 if (dpl
< cpl
|| dpl
< rpl
)
2280 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2281 next_eip
= env
->eip
+ next_eip_addend
;
2282 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2283 CC_OP
= CC_OP_EFLAGS
;
2285 case 4: /* 286 call gate */
2286 case 12: /* 386 call gate */
2287 if ((dpl
< cpl
) || (dpl
< rpl
))
2288 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2289 if (!(e2
& DESC_P_MASK
))
2290 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2292 new_eip
= (e1
& 0xffff);
2294 new_eip
|= (e2
& 0xffff0000);
2295 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2296 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2297 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2298 /* must be code segment */
2299 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2300 (DESC_S_MASK
| DESC_CS_MASK
)))
2301 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2302 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2303 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2304 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2305 if (!(e2
& DESC_P_MASK
))
2306 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2307 limit
= get_seg_limit(e1
, e2
);
2308 if (new_eip
> limit
)
2309 raise_exception_err(EXCP0D_GPF
, 0);
2310 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2311 get_seg_base(e1
, e2
), limit
, e2
);
2315 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2321 /* real mode call */
2322 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2323 int shift
, int next_eip
)
2326 uint32_t esp
, esp_mask
;
2331 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2332 ssp
= env
->segs
[R_SS
].base
;
2334 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2335 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2337 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2338 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2341 SET_ESP(esp
, esp_mask
);
2343 env
->segs
[R_CS
].selector
= new_cs
;
2344 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2347 /* protected mode call */
2348 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2349 int shift
, int next_eip_addend
)
2352 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2353 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
2354 uint32_t val
, limit
, old_sp_mask
;
2355 target_ulong ssp
, old_ssp
, next_eip
;
2357 next_eip
= env
->eip
+ next_eip_addend
;
2359 if (loglevel
& CPU_LOG_PCALL
) {
2360 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
2361 new_cs
, (uint32_t)new_eip
, shift
);
2362 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2365 if ((new_cs
& 0xfffc) == 0)
2366 raise_exception_err(EXCP0D_GPF
, 0);
2367 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2368 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2369 cpl
= env
->hflags
& HF_CPL_MASK
;
2371 if (loglevel
& CPU_LOG_PCALL
) {
2372 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
2375 if (e2
& DESC_S_MASK
) {
2376 if (!(e2
& DESC_CS_MASK
))
2377 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2378 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2379 if (e2
& DESC_C_MASK
) {
2380 /* conforming code segment */
2382 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2384 /* non conforming code segment */
2387 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2389 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2391 if (!(e2
& DESC_P_MASK
))
2392 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2394 #ifdef TARGET_X86_64
2395 /* XXX: check 16/32 bit cases in long mode */
2400 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2401 PUSHQ(rsp
, next_eip
);
2402 /* from this point, not restartable */
2404 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2405 get_seg_base(e1
, e2
),
2406 get_seg_limit(e1
, e2
), e2
);
2412 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2413 ssp
= env
->segs
[R_SS
].base
;
2415 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2416 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2418 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2419 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2422 limit
= get_seg_limit(e1
, e2
);
2423 if (new_eip
> limit
)
2424 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2425 /* from this point, not restartable */
2426 SET_ESP(sp
, sp_mask
);
2427 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2428 get_seg_base(e1
, e2
), limit
, e2
);
2432 /* check gate type */
2433 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2434 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2437 case 1: /* available 286 TSS */
2438 case 9: /* available 386 TSS */
2439 case 5: /* task gate */
2440 if (dpl
< cpl
|| dpl
< rpl
)
2441 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2442 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2443 CC_OP
= CC_OP_EFLAGS
;
2445 case 4: /* 286 call gate */
2446 case 12: /* 386 call gate */
2449 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2454 if (dpl
< cpl
|| dpl
< rpl
)
2455 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2456 /* check valid bit */
2457 if (!(e2
& DESC_P_MASK
))
2458 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2459 selector
= e1
>> 16;
2460 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2461 param_count
= e2
& 0x1f;
2462 if ((selector
& 0xfffc) == 0)
2463 raise_exception_err(EXCP0D_GPF
, 0);
2465 if (load_segment(&e1
, &e2
, selector
) != 0)
2466 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2467 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2468 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2469 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2471 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2472 if (!(e2
& DESC_P_MASK
))
2473 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2475 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2476 /* to inner privilege */
2477 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2479 if (loglevel
& CPU_LOG_PCALL
)
2480 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2481 ss
, sp
, param_count
, ESP
);
2483 if ((ss
& 0xfffc) == 0)
2484 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2485 if ((ss
& 3) != dpl
)
2486 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2487 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2488 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2489 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2491 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2492 if (!(ss_e2
& DESC_S_MASK
) ||
2493 (ss_e2
& DESC_CS_MASK
) ||
2494 !(ss_e2
& DESC_W_MASK
))
2495 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2496 if (!(ss_e2
& DESC_P_MASK
))
2497 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2499 // push_size = ((param_count * 2) + 8) << shift;
2501 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2502 old_ssp
= env
->segs
[R_SS
].base
;
2504 sp_mask
= get_sp_mask(ss_e2
);
2505 ssp
= get_seg_base(ss_e1
, ss_e2
);
2507 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2508 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2509 for(i
= param_count
- 1; i
>= 0; i
--) {
2510 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2511 PUSHL(ssp
, sp
, sp_mask
, val
);
2514 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2515 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2516 for(i
= param_count
- 1; i
>= 0; i
--) {
2517 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2518 PUSHW(ssp
, sp
, sp_mask
, val
);
2523 /* to same privilege */
2525 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2526 ssp
= env
->segs
[R_SS
].base
;
2527 // push_size = (4 << shift);
2532 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2533 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2535 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2536 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2539 /* from this point, not restartable */
2542 ss
= (ss
& ~3) | dpl
;
2543 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2545 get_seg_limit(ss_e1
, ss_e2
),
2549 selector
= (selector
& ~3) | dpl
;
2550 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2551 get_seg_base(e1
, e2
),
2552 get_seg_limit(e1
, e2
),
2554 cpu_x86_set_cpl(env
, dpl
);
2555 SET_ESP(sp
, sp_mask
);
2559 if (kqemu_is_ok(env
)) {
2560 env
->exception_index
= -1;
2566 /* real and vm86 mode iret */
2567 void helper_iret_real(int shift
)
2569 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2573 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2575 ssp
= env
->segs
[R_SS
].base
;
2578 POPL(ssp
, sp
, sp_mask
, new_eip
);
2579 POPL(ssp
, sp
, sp_mask
, new_cs
);
2581 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2584 POPW(ssp
, sp
, sp_mask
, new_eip
);
2585 POPW(ssp
, sp
, sp_mask
, new_cs
);
2586 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2588 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2589 load_seg_vm(R_CS
, new_cs
);
2591 if (env
->eflags
& VM_MASK
)
2592 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2594 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2596 eflags_mask
&= 0xffff;
2597 load_eflags(new_eflags
, eflags_mask
);
2598 env
->hflags
&= ~HF_NMI_MASK
;
2601 static inline void validate_seg(int seg_reg
, int cpl
)
2606 /* XXX: on x86_64, we do not want to nullify FS and GS because
2607 they may still contain a valid base. I would be interested to
2608 know how a real x86_64 CPU behaves */
2609 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2610 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2613 e2
= env
->segs
[seg_reg
].flags
;
2614 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2615 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2616 /* data or non conforming code segment */
2618 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2623 /* protected mode iret */
2624 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2626 uint32_t new_cs
, new_eflags
, new_ss
;
2627 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2628 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2629 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2630 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2632 #ifdef TARGET_X86_64
2637 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2639 ssp
= env
->segs
[R_SS
].base
;
2640 new_eflags
= 0; /* avoid warning */
2641 #ifdef TARGET_X86_64
2647 POPQ(sp
, new_eflags
);
2653 POPL(ssp
, sp
, sp_mask
, new_eip
);
2654 POPL(ssp
, sp
, sp_mask
, new_cs
);
2657 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2658 if (new_eflags
& VM_MASK
)
2659 goto return_to_vm86
;
2663 POPW(ssp
, sp
, sp_mask
, new_eip
);
2664 POPW(ssp
, sp
, sp_mask
, new_cs
);
2666 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2669 if (loglevel
& CPU_LOG_PCALL
) {
2670 fprintf(logfile
, "lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2671 new_cs
, new_eip
, shift
, addend
);
2672 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2675 if ((new_cs
& 0xfffc) == 0)
2676 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2677 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2678 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2679 if (!(e2
& DESC_S_MASK
) ||
2680 !(e2
& DESC_CS_MASK
))
2681 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2682 cpl
= env
->hflags
& HF_CPL_MASK
;
2685 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2686 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2687 if (e2
& DESC_C_MASK
) {
2689 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2692 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2694 if (!(e2
& DESC_P_MASK
))
2695 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2698 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2699 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2700 /* return to same priledge level */
2701 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2702 get_seg_base(e1
, e2
),
2703 get_seg_limit(e1
, e2
),
2706 /* return to different privilege level */
2707 #ifdef TARGET_X86_64
2716 POPL(ssp
, sp
, sp_mask
, new_esp
);
2717 POPL(ssp
, sp
, sp_mask
, new_ss
);
2721 POPW(ssp
, sp
, sp_mask
, new_esp
);
2722 POPW(ssp
, sp
, sp_mask
, new_ss
);
2725 if (loglevel
& CPU_LOG_PCALL
) {
2726 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2730 if ((new_ss
& 0xfffc) == 0) {
2731 #ifdef TARGET_X86_64
2732 /* NULL ss is allowed in long mode if cpl != 3*/
2733 /* XXX: test CS64 ? */
2734 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2735 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2737 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2738 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2739 DESC_W_MASK
| DESC_A_MASK
);
2740 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2744 raise_exception_err(EXCP0D_GPF
, 0);
2747 if ((new_ss
& 3) != rpl
)
2748 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2749 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2750 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2751 if (!(ss_e2
& DESC_S_MASK
) ||
2752 (ss_e2
& DESC_CS_MASK
) ||
2753 !(ss_e2
& DESC_W_MASK
))
2754 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2755 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2757 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2758 if (!(ss_e2
& DESC_P_MASK
))
2759 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2760 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2761 get_seg_base(ss_e1
, ss_e2
),
2762 get_seg_limit(ss_e1
, ss_e2
),
2766 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2767 get_seg_base(e1
, e2
),
2768 get_seg_limit(e1
, e2
),
2770 cpu_x86_set_cpl(env
, rpl
);
2772 #ifdef TARGET_X86_64
2773 if (env
->hflags
& HF_CS64_MASK
)
2777 sp_mask
= get_sp_mask(ss_e2
);
2779 /* validate data segments */
2780 validate_seg(R_ES
, rpl
);
2781 validate_seg(R_DS
, rpl
);
2782 validate_seg(R_FS
, rpl
);
2783 validate_seg(R_GS
, rpl
);
2787 SET_ESP(sp
, sp_mask
);
2790 /* NOTE: 'cpl' is the _old_ CPL */
2791 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2793 eflags_mask
|= IOPL_MASK
;
2794 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2796 eflags_mask
|= IF_MASK
;
2798 eflags_mask
&= 0xffff;
2799 load_eflags(new_eflags
, eflags_mask
);
2804 POPL(ssp
, sp
, sp_mask
, new_esp
);
2805 POPL(ssp
, sp
, sp_mask
, new_ss
);
2806 POPL(ssp
, sp
, sp_mask
, new_es
);
2807 POPL(ssp
, sp
, sp_mask
, new_ds
);
2808 POPL(ssp
, sp
, sp_mask
, new_fs
);
2809 POPL(ssp
, sp
, sp_mask
, new_gs
);
2811 /* modify processor state */
2812 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2813 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2814 load_seg_vm(R_CS
, new_cs
& 0xffff);
2815 cpu_x86_set_cpl(env
, 3);
2816 load_seg_vm(R_SS
, new_ss
& 0xffff);
2817 load_seg_vm(R_ES
, new_es
& 0xffff);
2818 load_seg_vm(R_DS
, new_ds
& 0xffff);
2819 load_seg_vm(R_FS
, new_fs
& 0xffff);
2820 load_seg_vm(R_GS
, new_gs
& 0xffff);
2822 env
->eip
= new_eip
& 0xffff;
2826 void helper_iret_protected(int shift
, int next_eip
)
2828 int tss_selector
, type
;
2831 /* specific case for TSS */
2832 if (env
->eflags
& NT_MASK
) {
2833 #ifdef TARGET_X86_64
2834 if (env
->hflags
& HF_LMA_MASK
)
2835 raise_exception_err(EXCP0D_GPF
, 0);
2837 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2838 if (tss_selector
& 4)
2839 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2840 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2841 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2842 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2843 /* NOTE: we check both segment and busy TSS */
2845 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2846 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2848 helper_ret_protected(shift
, 1, 0);
2850 env
->hflags
&= ~HF_NMI_MASK
;
2852 if (kqemu_is_ok(env
)) {
2853 CC_OP
= CC_OP_EFLAGS
;
2854 env
->exception_index
= -1;
2860 void helper_lret_protected(int shift
, int addend
)
2862 helper_ret_protected(shift
, 0, addend
);
2864 if (kqemu_is_ok(env
)) {
2865 env
->exception_index
= -1;
2871 void helper_sysenter(void)
2873 if (env
->sysenter_cs
== 0) {
2874 raise_exception_err(EXCP0D_GPF
, 0);
2876 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2877 cpu_x86_set_cpl(env
, 0);
2878 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2880 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2882 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2883 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2885 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2887 DESC_W_MASK
| DESC_A_MASK
);
2888 ESP
= env
->sysenter_esp
;
2889 EIP
= env
->sysenter_eip
;
2892 void helper_sysexit(void)
2896 cpl
= env
->hflags
& HF_CPL_MASK
;
2897 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2898 raise_exception_err(EXCP0D_GPF
, 0);
2900 cpu_x86_set_cpl(env
, 3);
2901 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2903 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2904 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2905 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2906 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2908 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2909 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2910 DESC_W_MASK
| DESC_A_MASK
);
2914 if (kqemu_is_ok(env
)) {
2915 env
->exception_index
= -1;
2921 #if defined(CONFIG_USER_ONLY)
2922 target_ulong
helper_read_crN(int reg
)
2927 void helper_write_crN(int reg
, target_ulong t0
)
2931 target_ulong
helper_read_crN(int reg
)
2935 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2941 val
= cpu_get_apic_tpr(env
);
2947 void helper_write_crN(int reg
, target_ulong t0
)
2949 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2952 cpu_x86_update_cr0(env
, t0
);
2955 cpu_x86_update_cr3(env
, t0
);
2958 cpu_x86_update_cr4(env
, t0
);
2961 cpu_set_apic_tpr(env
, t0
);
2971 void helper_lmsw(target_ulong t0
)
2973 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2974 if already set to one. */
2975 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2976 helper_write_crN(0, t0
);
2979 void helper_clts(void)
2981 env
->cr
[0] &= ~CR0_TS_MASK
;
2982 env
->hflags
&= ~HF_TS_MASK
;
2985 #if !defined(CONFIG_USER_ONLY)
2986 target_ulong
helper_movtl_T0_cr8(void)
2988 return cpu_get_apic_tpr(env
);
2993 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2998 void helper_invlpg(target_ulong addr
)
3000 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
3001 cpu_x86_flush_tlb(env
, addr
);
3004 void helper_rdtsc(void)
3008 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3009 raise_exception(EXCP0D_GPF
);
3011 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
3013 val
= cpu_get_tsc(env
);
3014 EAX
= (uint32_t)(val
);
3015 EDX
= (uint32_t)(val
>> 32);
3018 void helper_rdpmc(void)
3020 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3021 raise_exception(EXCP0D_GPF
);
3023 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3025 /* currently unimplemented */
3026 raise_exception_err(EXCP06_ILLOP
, 0);
3029 #if defined(CONFIG_USER_ONLY)
3030 void helper_wrmsr(void)
3034 void helper_rdmsr(void)
3038 void helper_wrmsr(void)
3042 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3044 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3046 switch((uint32_t)ECX
) {
3047 case MSR_IA32_SYSENTER_CS
:
3048 env
->sysenter_cs
= val
& 0xffff;
3050 case MSR_IA32_SYSENTER_ESP
:
3051 env
->sysenter_esp
= val
;
3053 case MSR_IA32_SYSENTER_EIP
:
3054 env
->sysenter_eip
= val
;
3056 case MSR_IA32_APICBASE
:
3057 cpu_set_apic_base(env
, val
);
3061 uint64_t update_mask
;
3063 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3064 update_mask
|= MSR_EFER_SCE
;
3065 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3066 update_mask
|= MSR_EFER_LME
;
3067 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3068 update_mask
|= MSR_EFER_FFXSR
;
3069 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3070 update_mask
|= MSR_EFER_NXE
;
3071 env
->efer
= (env
->efer
& ~update_mask
) |
3072 (val
& update_mask
);
3081 case MSR_VM_HSAVE_PA
:
3082 env
->vm_hsave
= val
;
3084 #ifdef TARGET_X86_64
3095 env
->segs
[R_FS
].base
= val
;
3098 env
->segs
[R_GS
].base
= val
;
3100 case MSR_KERNELGSBASE
:
3101 env
->kernelgsbase
= val
;
3105 /* XXX: exception ? */
3110 void helper_rdmsr(void)
3114 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3116 switch((uint32_t)ECX
) {
3117 case MSR_IA32_SYSENTER_CS
:
3118 val
= env
->sysenter_cs
;
3120 case MSR_IA32_SYSENTER_ESP
:
3121 val
= env
->sysenter_esp
;
3123 case MSR_IA32_SYSENTER_EIP
:
3124 val
= env
->sysenter_eip
;
3126 case MSR_IA32_APICBASE
:
3127 val
= cpu_get_apic_base(env
);
3138 case MSR_VM_HSAVE_PA
:
3139 val
= env
->vm_hsave
;
3141 #ifdef TARGET_X86_64
3152 val
= env
->segs
[R_FS
].base
;
3155 val
= env
->segs
[R_GS
].base
;
3157 case MSR_KERNELGSBASE
:
3158 val
= env
->kernelgsbase
;
3162 /* XXX: exception ? */
3166 EAX
= (uint32_t)(val
);
3167 EDX
= (uint32_t)(val
>> 32);
3171 target_ulong
helper_lsl(target_ulong selector1
)
3174 uint32_t e1
, e2
, eflags
, selector
;
3175 int rpl
, dpl
, cpl
, type
;
3177 selector
= selector1
& 0xffff;
3178 eflags
= cc_table
[CC_OP
].compute_all();
3179 if (load_segment(&e1
, &e2
, selector
) != 0)
3182 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3183 cpl
= env
->hflags
& HF_CPL_MASK
;
3184 if (e2
& DESC_S_MASK
) {
3185 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3188 if (dpl
< cpl
|| dpl
< rpl
)
3192 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3203 if (dpl
< cpl
|| dpl
< rpl
) {
3205 CC_SRC
= eflags
& ~CC_Z
;
3209 limit
= get_seg_limit(e1
, e2
);
3210 CC_SRC
= eflags
| CC_Z
;
3214 target_ulong
helper_lar(target_ulong selector1
)
3216 uint32_t e1
, e2
, eflags
, selector
;
3217 int rpl
, dpl
, cpl
, type
;
3219 selector
= selector1
& 0xffff;
3220 eflags
= cc_table
[CC_OP
].compute_all();
3221 if ((selector
& 0xfffc) == 0)
3223 if (load_segment(&e1
, &e2
, selector
) != 0)
3226 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3227 cpl
= env
->hflags
& HF_CPL_MASK
;
3228 if (e2
& DESC_S_MASK
) {
3229 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3232 if (dpl
< cpl
|| dpl
< rpl
)
3236 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3250 if (dpl
< cpl
|| dpl
< rpl
) {
3252 CC_SRC
= eflags
& ~CC_Z
;
3256 CC_SRC
= eflags
| CC_Z
;
3257 return e2
& 0x00f0ff00;
3260 void helper_verr(target_ulong selector1
)
3262 uint32_t e1
, e2
, eflags
, selector
;
3265 selector
= selector1
& 0xffff;
3266 eflags
= cc_table
[CC_OP
].compute_all();
3267 if ((selector
& 0xfffc) == 0)
3269 if (load_segment(&e1
, &e2
, selector
) != 0)
3271 if (!(e2
& DESC_S_MASK
))
3274 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3275 cpl
= env
->hflags
& HF_CPL_MASK
;
3276 if (e2
& DESC_CS_MASK
) {
3277 if (!(e2
& DESC_R_MASK
))
3279 if (!(e2
& DESC_C_MASK
)) {
3280 if (dpl
< cpl
|| dpl
< rpl
)
3284 if (dpl
< cpl
|| dpl
< rpl
) {
3286 CC_SRC
= eflags
& ~CC_Z
;
3290 CC_SRC
= eflags
| CC_Z
;
3293 void helper_verw(target_ulong selector1
)
3295 uint32_t e1
, e2
, eflags
, selector
;
3298 selector
= selector1
& 0xffff;
3299 eflags
= cc_table
[CC_OP
].compute_all();
3300 if ((selector
& 0xfffc) == 0)
3302 if (load_segment(&e1
, &e2
, selector
) != 0)
3304 if (!(e2
& DESC_S_MASK
))
3307 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3308 cpl
= env
->hflags
& HF_CPL_MASK
;
3309 if (e2
& DESC_CS_MASK
) {
3312 if (dpl
< cpl
|| dpl
< rpl
)
3314 if (!(e2
& DESC_W_MASK
)) {
3316 CC_SRC
= eflags
& ~CC_Z
;
3320 CC_SRC
= eflags
| CC_Z
;
3323 /* x87 FPU helpers */
3325 static void fpu_set_exception(int mask
)
3328 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3329 env
->fpus
|= FPUS_SE
| FPUS_B
;
3332 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3335 fpu_set_exception(FPUS_ZE
);
3339 void fpu_raise_exception(void)
3341 if (env
->cr
[0] & CR0_NE_MASK
) {
3342 raise_exception(EXCP10_COPR
);
3344 #if !defined(CONFIG_USER_ONLY)
3351 void helper_flds_FT0(uint32_t val
)
3358 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3361 void helper_fldl_FT0(uint64_t val
)
3368 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3371 void helper_fildl_FT0(int32_t val
)
3373 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3376 void helper_flds_ST0(uint32_t val
)
3383 new_fpstt
= (env
->fpstt
- 1) & 7;
3385 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3386 env
->fpstt
= new_fpstt
;
3387 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3390 void helper_fldl_ST0(uint64_t val
)
3397 new_fpstt
= (env
->fpstt
- 1) & 7;
3399 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3400 env
->fpstt
= new_fpstt
;
3401 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3404 void helper_fildl_ST0(int32_t val
)
3407 new_fpstt
= (env
->fpstt
- 1) & 7;
3408 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3409 env
->fpstt
= new_fpstt
;
3410 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3413 void helper_fildll_ST0(int64_t val
)
3416 new_fpstt
= (env
->fpstt
- 1) & 7;
3417 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3418 env
->fpstt
= new_fpstt
;
3419 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3422 uint32_t helper_fsts_ST0(void)
3428 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3432 uint64_t helper_fstl_ST0(void)
3438 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3442 int32_t helper_fist_ST0(void)
3445 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3446 if (val
!= (int16_t)val
)
3451 int32_t helper_fistl_ST0(void)
3454 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3458 int64_t helper_fistll_ST0(void)
3461 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3465 int32_t helper_fistt_ST0(void)
3468 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3469 if (val
!= (int16_t)val
)
3474 int32_t helper_fisttl_ST0(void)
3477 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3481 int64_t helper_fisttll_ST0(void)
3484 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3488 void helper_fldt_ST0(target_ulong ptr
)
3491 new_fpstt
= (env
->fpstt
- 1) & 7;
3492 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3493 env
->fpstt
= new_fpstt
;
3494 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3497 void helper_fstt_ST0(target_ulong ptr
)
3499 helper_fstt(ST0
, ptr
);
3502 void helper_fpush(void)
3507 void helper_fpop(void)
3512 void helper_fdecstp(void)
3514 env
->fpstt
= (env
->fpstt
- 1) & 7;
3515 env
->fpus
&= (~0x4700);
3518 void helper_fincstp(void)
3520 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3521 env
->fpus
&= (~0x4700);
3526 void helper_ffree_STN(int st_index
)
3528 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3531 void helper_fmov_ST0_FT0(void)
3536 void helper_fmov_FT0_STN(int st_index
)
3541 void helper_fmov_ST0_STN(int st_index
)
3546 void helper_fmov_STN_ST0(int st_index
)
3551 void helper_fxchg_ST0_STN(int st_index
)
3559 /* FPU operations */
3561 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3563 void helper_fcom_ST0_FT0(void)
3567 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3568 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3572 void helper_fucom_ST0_FT0(void)
3576 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3577 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3581 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3583 void helper_fcomi_ST0_FT0(void)
3588 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3589 eflags
= cc_table
[CC_OP
].compute_all();
3590 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3595 void helper_fucomi_ST0_FT0(void)
3600 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3601 eflags
= cc_table
[CC_OP
].compute_all();
3602 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3607 void helper_fadd_ST0_FT0(void)
3612 void helper_fmul_ST0_FT0(void)
3617 void helper_fsub_ST0_FT0(void)
3622 void helper_fsubr_ST0_FT0(void)
3627 void helper_fdiv_ST0_FT0(void)
3629 ST0
= helper_fdiv(ST0
, FT0
);
3632 void helper_fdivr_ST0_FT0(void)
3634 ST0
= helper_fdiv(FT0
, ST0
);
3637 /* fp operations between STN and ST0 */
3639 void helper_fadd_STN_ST0(int st_index
)
3641 ST(st_index
) += ST0
;
3644 void helper_fmul_STN_ST0(int st_index
)
3646 ST(st_index
) *= ST0
;
3649 void helper_fsub_STN_ST0(int st_index
)
3651 ST(st_index
) -= ST0
;
3654 void helper_fsubr_STN_ST0(int st_index
)
3661 void helper_fdiv_STN_ST0(int st_index
)
3665 *p
= helper_fdiv(*p
, ST0
);
3668 void helper_fdivr_STN_ST0(int st_index
)
3672 *p
= helper_fdiv(ST0
, *p
);
3675 /* misc FPU operations */
3676 void helper_fchs_ST0(void)
3678 ST0
= floatx_chs(ST0
);
3681 void helper_fabs_ST0(void)
3683 ST0
= floatx_abs(ST0
);
3686 void helper_fld1_ST0(void)
3691 void helper_fldl2t_ST0(void)
3696 void helper_fldl2e_ST0(void)
3701 void helper_fldpi_ST0(void)
3706 void helper_fldlg2_ST0(void)
3711 void helper_fldln2_ST0(void)
3716 void helper_fldz_ST0(void)
3721 void helper_fldz_FT0(void)
3726 uint32_t helper_fnstsw(void)
3728 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3731 uint32_t helper_fnstcw(void)
3736 static void update_fp_status(void)
3740 /* set rounding mode */
3741 switch(env
->fpuc
& RC_MASK
) {
3744 rnd_type
= float_round_nearest_even
;
3747 rnd_type
= float_round_down
;
3750 rnd_type
= float_round_up
;
3753 rnd_type
= float_round_to_zero
;
3756 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3758 switch((env
->fpuc
>> 8) & 3) {
3770 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3774 void helper_fldcw(uint32_t val
)
3780 void helper_fclex(void)
3782 env
->fpus
&= 0x7f00;
3785 void helper_fwait(void)
3787 if (env
->fpus
& FPUS_SE
)
3788 fpu_raise_exception();
3792 void helper_fninit(void)
3809 void helper_fbld_ST0(target_ulong ptr
)
3817 for(i
= 8; i
>= 0; i
--) {
3819 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3822 if (ldub(ptr
+ 9) & 0x80)
3828 void helper_fbst_ST0(target_ulong ptr
)
3831 target_ulong mem_ref
, mem_end
;
3834 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3836 mem_end
= mem_ref
+ 9;
3843 while (mem_ref
< mem_end
) {
3848 v
= ((v
/ 10) << 4) | (v
% 10);
3851 while (mem_ref
< mem_end
) {
3856 void helper_f2xm1(void)
3858 ST0
= pow(2.0,ST0
) - 1.0;
3861 void helper_fyl2x(void)
3863 CPU86_LDouble fptemp
;
3867 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3871 env
->fpus
&= (~0x4700);
3876 void helper_fptan(void)
3878 CPU86_LDouble fptemp
;
3881 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3887 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3888 /* the above code is for |arg| < 2**52 only */
3892 void helper_fpatan(void)
3894 CPU86_LDouble fptemp
, fpsrcop
;
3898 ST1
= atan2(fpsrcop
,fptemp
);
3902 void helper_fxtract(void)
3904 CPU86_LDoubleU temp
;
3905 unsigned int expdif
;
3908 expdif
= EXPD(temp
) - EXPBIAS
;
3909 /*DP exponent bias*/
3916 void helper_fprem1(void)
3918 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3919 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3921 signed long long int q
;
3923 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3924 ST0
= 0.0 / 0.0; /* NaN */
3925 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3931 fpsrcop1
.d
= fpsrcop
;
3933 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3936 /* optimisation? taken from the AMD docs */
3937 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3938 /* ST0 is unchanged */
3943 dblq
= fpsrcop
/ fptemp
;
3944 /* round dblq towards nearest integer */
3946 ST0
= fpsrcop
- fptemp
* dblq
;
3948 /* convert dblq to q by truncating towards zero */
3950 q
= (signed long long int)(-dblq
);
3952 q
= (signed long long int)dblq
;
3954 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3955 /* (C0,C3,C1) <-- (q2,q1,q0) */
3956 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3957 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3958 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3960 env
->fpus
|= 0x400; /* C2 <-- 1 */
3961 fptemp
= pow(2.0, expdif
- 50);
3962 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3963 /* fpsrcop = integer obtained by chopping */
3964 fpsrcop
= (fpsrcop
< 0.0) ?
3965 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3966 ST0
-= (ST1
* fpsrcop
* fptemp
);
3970 void helper_fprem(void)
3972 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3973 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3975 signed long long int q
;
3977 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3978 ST0
= 0.0 / 0.0; /* NaN */
3979 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3983 fpsrcop
= (CPU86_LDouble
)ST0
;
3984 fptemp
= (CPU86_LDouble
)ST1
;
3985 fpsrcop1
.d
= fpsrcop
;
3987 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3990 /* optimisation? taken from the AMD docs */
3991 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3992 /* ST0 is unchanged */
3996 if ( expdif
< 53 ) {
3997 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
3998 /* round dblq towards zero */
3999 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4000 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4002 /* convert dblq to q by truncating towards zero */
4004 q
= (signed long long int)(-dblq
);
4006 q
= (signed long long int)dblq
;
4008 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4009 /* (C0,C3,C1) <-- (q2,q1,q0) */
4010 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4011 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4012 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4014 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4015 env
->fpus
|= 0x400; /* C2 <-- 1 */
4016 fptemp
= pow(2.0, (double)(expdif
- N
));
4017 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4018 /* fpsrcop = integer obtained by chopping */
4019 fpsrcop
= (fpsrcop
< 0.0) ?
4020 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4021 ST0
-= (ST1
* fpsrcop
* fptemp
);
4025 void helper_fyl2xp1(void)
4027 CPU86_LDouble fptemp
;
4030 if ((fptemp
+1.0)>0.0) {
4031 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4035 env
->fpus
&= (~0x4700);
4040 void helper_fsqrt(void)
4042 CPU86_LDouble fptemp
;
4046 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4052 void helper_fsincos(void)
4054 CPU86_LDouble fptemp
;
4057 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4063 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4064 /* the above code is for |arg| < 2**63 only */
4068 void helper_frndint(void)
4070 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4073 void helper_fscale(void)
4075 ST0
= ldexp (ST0
, (int)(ST1
));
4078 void helper_fsin(void)
4080 CPU86_LDouble fptemp
;
4083 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4087 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4088 /* the above code is for |arg| < 2**53 only */
4092 void helper_fcos(void)
4094 CPU86_LDouble fptemp
;
4097 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4101 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4102 /* the above code is for |arg5 < 2**63 only */
4106 void helper_fxam_ST0(void)
4108 CPU86_LDoubleU temp
;
4113 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4115 env
->fpus
|= 0x200; /* C1 <-- 1 */
4117 /* XXX: test fptags too */
4118 expdif
= EXPD(temp
);
4119 if (expdif
== MAXEXPD
) {
4120 #ifdef USE_X86LDOUBLE
4121 if (MANTD(temp
) == 0x8000000000000000ULL
)
4123 if (MANTD(temp
) == 0)
4125 env
->fpus
|= 0x500 /*Infinity*/;
4127 env
->fpus
|= 0x100 /*NaN*/;
4128 } else if (expdif
== 0) {
4129 if (MANTD(temp
) == 0)
4130 env
->fpus
|= 0x4000 /*Zero*/;
4132 env
->fpus
|= 0x4400 /*Denormal*/;
4138 void helper_fstenv(target_ulong ptr
, int data32
)
4140 int fpus
, fptag
, exp
, i
;
4144 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4146 for (i
=7; i
>=0; i
--) {
4148 if (env
->fptags
[i
]) {
4151 tmp
.d
= env
->fpregs
[i
].d
;
4154 if (exp
== 0 && mant
== 0) {
4157 } else if (exp
== 0 || exp
== MAXEXPD
4158 #ifdef USE_X86LDOUBLE
4159 || (mant
& (1LL << 63)) == 0
4162 /* NaNs, infinity, denormal */
4169 stl(ptr
, env
->fpuc
);
4171 stl(ptr
+ 8, fptag
);
4172 stl(ptr
+ 12, 0); /* fpip */
4173 stl(ptr
+ 16, 0); /* fpcs */
4174 stl(ptr
+ 20, 0); /* fpoo */
4175 stl(ptr
+ 24, 0); /* fpos */
4178 stw(ptr
, env
->fpuc
);
4180 stw(ptr
+ 4, fptag
);
4188 void helper_fldenv(target_ulong ptr
, int data32
)
4193 env
->fpuc
= lduw(ptr
);
4194 fpus
= lduw(ptr
+ 4);
4195 fptag
= lduw(ptr
+ 8);
4198 env
->fpuc
= lduw(ptr
);
4199 fpus
= lduw(ptr
+ 2);
4200 fptag
= lduw(ptr
+ 4);
4202 env
->fpstt
= (fpus
>> 11) & 7;
4203 env
->fpus
= fpus
& ~0x3800;
4204 for(i
= 0;i
< 8; i
++) {
4205 env
->fptags
[i
] = ((fptag
& 3) == 3);
4210 void helper_fsave(target_ulong ptr
, int data32
)
4215 helper_fstenv(ptr
, data32
);
4217 ptr
+= (14 << data32
);
4218 for(i
= 0;i
< 8; i
++) {
4220 helper_fstt(tmp
, ptr
);
4238 void helper_frstor(target_ulong ptr
, int data32
)
4243 helper_fldenv(ptr
, data32
);
4244 ptr
+= (14 << data32
);
4246 for(i
= 0;i
< 8; i
++) {
4247 tmp
= helper_fldt(ptr
);
4253 void helper_fxsave(target_ulong ptr
, int data64
)
4255 int fpus
, fptag
, i
, nb_xmm_regs
;
4259 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4261 for(i
= 0; i
< 8; i
++) {
4262 fptag
|= (env
->fptags
[i
] << i
);
4264 stw(ptr
, env
->fpuc
);
4266 stw(ptr
+ 4, fptag
^ 0xff);
4267 #ifdef TARGET_X86_64
4269 stq(ptr
+ 0x08, 0); /* rip */
4270 stq(ptr
+ 0x10, 0); /* rdp */
4274 stl(ptr
+ 0x08, 0); /* eip */
4275 stl(ptr
+ 0x0c, 0); /* sel */
4276 stl(ptr
+ 0x10, 0); /* dp */
4277 stl(ptr
+ 0x14, 0); /* sel */
4281 for(i
= 0;i
< 8; i
++) {
4283 helper_fstt(tmp
, addr
);
4287 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4288 /* XXX: finish it */
4289 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4290 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4291 if (env
->hflags
& HF_CS64_MASK
)
4296 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4297 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4298 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4304 void helper_fxrstor(target_ulong ptr
, int data64
)
4306 int i
, fpus
, fptag
, nb_xmm_regs
;
4310 env
->fpuc
= lduw(ptr
);
4311 fpus
= lduw(ptr
+ 2);
4312 fptag
= lduw(ptr
+ 4);
4313 env
->fpstt
= (fpus
>> 11) & 7;
4314 env
->fpus
= fpus
& ~0x3800;
4316 for(i
= 0;i
< 8; i
++) {
4317 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4321 for(i
= 0;i
< 8; i
++) {
4322 tmp
= helper_fldt(addr
);
4327 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4328 /* XXX: finish it */
4329 env
->mxcsr
= ldl(ptr
+ 0x18);
4331 if (env
->hflags
& HF_CS64_MASK
)
4336 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4337 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4338 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4344 #ifndef USE_X86LDOUBLE
4346 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4348 CPU86_LDoubleU temp
;
4353 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4354 /* exponent + sign */
4355 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4356 e
|= SIGND(temp
) >> 16;
4360 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4362 CPU86_LDoubleU temp
;
4366 /* XXX: handle overflow ? */
4367 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4368 e
|= (upper
>> 4) & 0x800; /* sign */
4369 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4371 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4374 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4381 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4383 CPU86_LDoubleU temp
;
4386 *pmant
= temp
.l
.lower
;
4387 *pexp
= temp
.l
.upper
;
4390 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4392 CPU86_LDoubleU temp
;
4394 temp
.l
.upper
= upper
;
4395 temp
.l
.lower
= mant
;
4400 #ifdef TARGET_X86_64
4402 //#define DEBUG_MULDIV
4404 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4413 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4417 add128(plow
, phigh
, 1, 0);
4420 /* return TRUE if overflow */
4421 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4423 uint64_t q
, r
, a1
, a0
;
4436 /* XXX: use a better algorithm */
4437 for(i
= 0; i
< 64; i
++) {
4439 a1
= (a1
<< 1) | (a0
>> 63);
4440 if (ab
|| a1
>= b
) {
4446 a0
= (a0
<< 1) | qb
;
4448 #if defined(DEBUG_MULDIV)
4449 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4450 *phigh
, *plow
, b
, a0
, a1
);
4458 /* return TRUE if overflow */
4459 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4462 sa
= ((int64_t)*phigh
< 0);
4464 neg128(plow
, phigh
);
4468 if (div64(plow
, phigh
, b
) != 0)
4471 if (*plow
> (1ULL << 63))
4475 if (*plow
>= (1ULL << 63))
4483 void helper_mulq_EAX_T0(target_ulong t0
)
4487 mulu64(&r0
, &r1
, EAX
, t0
);
4494 void helper_imulq_EAX_T0(target_ulong t0
)
4498 muls64(&r0
, &r1
, EAX
, t0
);
4502 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4505 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4509 muls64(&r0
, &r1
, t0
, t1
);
4511 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4515 void helper_divq_EAX(target_ulong t0
)
4519 raise_exception(EXCP00_DIVZ
);
4523 if (div64(&r0
, &r1
, t0
))
4524 raise_exception(EXCP00_DIVZ
);
4529 void helper_idivq_EAX(target_ulong t0
)
4533 raise_exception(EXCP00_DIVZ
);
4537 if (idiv64(&r0
, &r1
, t0
))
4538 raise_exception(EXCP00_DIVZ
);
4544 void helper_hlt(void)
4546 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4548 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4550 env
->exception_index
= EXCP_HLT
;
4554 void helper_monitor(target_ulong ptr
)
4556 if ((uint32_t)ECX
!= 0)
4557 raise_exception(EXCP0D_GPF
);
4558 /* XXX: store address ? */
4559 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4562 void helper_mwait(void)
4564 if ((uint32_t)ECX
!= 0)
4565 raise_exception(EXCP0D_GPF
);
4566 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4567 /* XXX: not complete but not completely erroneous */
4568 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4569 /* more than one CPU: do not sleep because another CPU may
4576 void helper_debug(void)
4578 env
->exception_index
= EXCP_DEBUG
;
4582 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4584 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4587 void helper_raise_exception(int exception_index
)
4589 raise_exception(exception_index
);
4592 void helper_cli(void)
4594 env
->eflags
&= ~IF_MASK
;
4597 void helper_sti(void)
4599 env
->eflags
|= IF_MASK
;
4603 /* vm86plus instructions */
4604 void helper_cli_vm(void)
4606 env
->eflags
&= ~VIF_MASK
;
4609 void helper_sti_vm(void)
4611 env
->eflags
|= VIF_MASK
;
4612 if (env
->eflags
& VIP_MASK
) {
4613 raise_exception(EXCP0D_GPF
);
4618 void helper_set_inhibit_irq(void)
4620 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4623 void helper_reset_inhibit_irq(void)
4625 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4628 void helper_boundw(target_ulong a0
, int v
)
4632 high
= ldsw(a0
+ 2);
4634 if (v
< low
|| v
> high
) {
4635 raise_exception(EXCP05_BOUND
);
4640 void helper_boundl(target_ulong a0
, int v
)
4645 if (v
< low
|| v
> high
) {
4646 raise_exception(EXCP05_BOUND
);
4651 static float approx_rsqrt(float a
)
4653 return 1.0 / sqrt(a
);
4656 static float approx_rcp(float a
)
4661 #if !defined(CONFIG_USER_ONLY)
4663 #define MMUSUFFIX _mmu
4666 #include "softmmu_template.h"
4669 #include "softmmu_template.h"
4672 #include "softmmu_template.h"
4675 #include "softmmu_template.h"
4679 /* try to fill the TLB and return an exception if error. If retaddr is
4680 NULL, it means that the function was called in C code (i.e. not
4681 from generated code or from helper.c) */
4682 /* XXX: fix it to restore all registers */
4683 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4685 TranslationBlock
*tb
;
4688 CPUX86State
*saved_env
;
4690 /* XXX: hack to restore env in all cases, even if not called from
4693 env
= cpu_single_env
;
4695 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4698 /* now we have a real cpu fault */
4699 pc
= (unsigned long)retaddr
;
4700 tb
= tb_find_pc(pc
);
4702 /* the PC is inside the translated code. It means that we have
4703 a virtual CPU fault */
4704 cpu_restore_state(tb
, env
, pc
, NULL
);
4707 raise_exception_err(env
->exception_index
, env
->error_code
);
4713 /* Secure Virtual Machine helpers */
4715 #if defined(CONFIG_USER_ONLY)
4717 void helper_vmrun(void)
4720 void helper_vmmcall(void)
4723 void helper_vmload(void)
4726 void helper_vmsave(void)
4729 void helper_stgi(void)
4732 void helper_clgi(void)
4735 void helper_skinit(void)
4738 void helper_invlpga(void)
4741 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4744 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4748 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4749 uint32_t next_eip_addend
)
4754 static inline void svm_save_seg(target_phys_addr_t addr
,
4755 const SegmentCache
*sc
)
4757 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4759 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4761 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4763 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4764 (sc
->flags
>> 8) | ((sc
->flags
>> 12) & 0x0f00));
4767 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4771 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4772 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4773 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4774 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4775 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4778 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4779 CPUState
*env
, int seg_reg
)
4781 SegmentCache sc1
, *sc
= &sc1
;
4782 svm_load_seg(addr
, sc
);
4783 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4784 sc
->base
, sc
->limit
, sc
->flags
);
4787 void helper_vmrun(void)
4793 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4796 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4797 fprintf(logfile
,"vmrun! " TARGET_FMT_lx
"\n", addr
);
4799 env
->vm_vmcb
= addr
;
4801 /* save the current CPU state in the hsave page */
4802 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4803 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4805 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4806 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4808 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4809 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4810 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4811 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4812 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
), env
->cr
[8]);
4813 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4814 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4816 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4817 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4819 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4821 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4823 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4825 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4828 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
), EIP
);
4829 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4830 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4832 /* load the interception bitmaps so we do not need to access the
4834 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4835 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4836 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4837 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4838 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4839 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4841 /* enable intercepts */
4842 env
->hflags
|= HF_SVMI_MASK
;
4844 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4845 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4847 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4848 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4850 /* clear exit_info_2 so we behave like the real hardware */
4851 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4853 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4854 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4855 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4856 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4857 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4858 if (int_ctl
& V_INTR_MASKING_MASK
) {
4859 env
->cr
[8] = int_ctl
& V_TPR_MASK
;
4860 cpu_set_apic_tpr(env
, env
->cr
[8]);
4861 if (env
->eflags
& IF_MASK
)
4862 env
->hflags
|= HF_HIF_MASK
;
4865 #ifdef TARGET_X86_64
4866 env
->efer
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
));
4867 env
->hflags
&= ~HF_LMA_MASK
;
4868 if (env
->efer
& MSR_EFER_LMA
)
4869 env
->hflags
|= HF_LMA_MASK
;
4872 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4873 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4874 CC_OP
= CC_OP_EFLAGS
;
4876 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
4878 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
4880 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
4882 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
4885 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4887 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4888 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4889 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4890 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4891 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4893 /* FIXME: guest state consistency checks */
4895 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4896 case TLB_CONTROL_DO_NOTHING
:
4898 case TLB_CONTROL_FLUSH_ALL_ASID
:
4899 /* FIXME: this is not 100% correct but should work for now */
4906 /* maybe we need to inject an event */
4907 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4908 if (event_inj
& SVM_EVTINJ_VALID
) {
4909 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4910 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4911 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4912 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4914 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4915 fprintf(logfile
, "Injecting(%#hx): ", valid_err
);
4916 /* FIXME: need to implement valid_err */
4917 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
4918 case SVM_EVTINJ_TYPE_INTR
:
4919 env
->exception_index
= vector
;
4920 env
->error_code
= event_inj_err
;
4921 env
->exception_is_int
= 0;
4922 env
->exception_next_eip
= -1;
4923 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4924 fprintf(logfile
, "INTR");
4926 case SVM_EVTINJ_TYPE_NMI
:
4927 env
->exception_index
= vector
;
4928 env
->error_code
= event_inj_err
;
4929 env
->exception_is_int
= 0;
4930 env
->exception_next_eip
= EIP
;
4931 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4932 fprintf(logfile
, "NMI");
4934 case SVM_EVTINJ_TYPE_EXEPT
:
4935 env
->exception_index
= vector
;
4936 env
->error_code
= event_inj_err
;
4937 env
->exception_is_int
= 0;
4938 env
->exception_next_eip
= -1;
4939 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4940 fprintf(logfile
, "EXEPT");
4942 case SVM_EVTINJ_TYPE_SOFT
:
4943 env
->exception_index
= vector
;
4944 env
->error_code
= event_inj_err
;
4945 env
->exception_is_int
= 1;
4946 env
->exception_next_eip
= EIP
;
4947 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4948 fprintf(logfile
, "SOFT");
4951 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4952 fprintf(logfile
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
4954 if ((int_ctl
& V_IRQ_MASK
) ||
4955 (env
->intercept
& (1ULL << (SVM_EXIT_INTR
- SVM_EXIT_INTR
)))) {
4956 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4962 void helper_vmmcall(void)
4964 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
4965 raise_exception(EXCP06_ILLOP
);
4968 void helper_vmload(void)
4971 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
4973 /* XXX: invalid in 32 bit */
4975 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4976 fprintf(logfile
,"vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4977 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4978 env
->segs
[R_FS
].base
);
4980 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
4982 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
4984 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
4986 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
4989 #ifdef TARGET_X86_64
4990 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
4991 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
4992 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
4993 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
4995 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
4996 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
4997 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
4998 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5001 void helper_vmsave(void)
5004 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5006 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5007 fprintf(logfile
,"vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5008 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5009 env
->segs
[R_FS
].base
);
5011 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5013 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5015 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5017 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5020 #ifdef TARGET_X86_64
5021 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5022 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5023 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5024 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5026 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5027 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5028 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5029 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5032 void helper_stgi(void)
5034 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5035 env
->hflags
|= HF_GIF_MASK
;
5038 void helper_clgi(void)
5040 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5041 env
->hflags
&= ~HF_GIF_MASK
;
5044 void helper_skinit(void)
5046 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5047 /* XXX: not implemented */
5048 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5049 fprintf(logfile
,"skinit!\n");
5050 raise_exception(EXCP06_ILLOP
);
5053 void helper_invlpga(void)
5055 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5059 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5061 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5064 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5065 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5066 helper_vmexit(type
, param
);
5069 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5070 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5071 helper_vmexit(type
, param
);
5074 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5075 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5076 helper_vmexit(type
, param
);
5079 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5080 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5081 helper_vmexit(type
, param
);
5084 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5085 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5086 helper_vmexit(type
, param
);
5090 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5091 /* FIXME: this should be read in at vmrun (faster this way?) */
5092 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5094 switch((uint32_t)ECX
) {
5099 case 0xc0000000 ... 0xc0001fff:
5100 t0
= (8192 + ECX
- 0xc0000000) * 2;
5104 case 0xc0010000 ... 0xc0011fff:
5105 t0
= (16384 + ECX
- 0xc0010000) * 2;
5110 helper_vmexit(type
, param
);
5115 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5116 helper_vmexit(type
, param
);
5120 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5121 helper_vmexit(type
, param
);
5127 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5128 uint32_t next_eip_addend
)
5130 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5131 /* FIXME: this should be read in at vmrun (faster this way?) */
5132 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5133 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5134 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5136 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5137 env
->eip
+ next_eip_addend
);
5138 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5143 /* Note: currently only 32 bits of exit_code are used */
5144 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5148 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5149 fprintf(logfile
,"vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5150 exit_code
, exit_info_1
,
5151 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5154 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5155 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5156 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5158 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5161 /* Save the VM state in the vmcb */
5162 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5164 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5166 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5168 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5171 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5172 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5174 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5175 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5177 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5178 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5179 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5180 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5181 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5183 if ((int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
))) & V_INTR_MASKING_MASK
) {
5184 int_ctl
&= ~V_TPR_MASK
;
5185 int_ctl
|= env
->cr
[8] & V_TPR_MASK
;
5186 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5189 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5190 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5191 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5192 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5193 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5194 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5195 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5197 /* Reload the host state from vm_hsave */
5198 env
->hflags
&= ~HF_HIF_MASK
;
5199 env
->hflags
&= ~HF_SVMI_MASK
;
5201 env
->intercept_exceptions
= 0;
5202 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5204 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5205 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5207 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5208 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5210 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5211 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5212 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5213 if (int_ctl
& V_INTR_MASKING_MASK
) {
5214 env
->cr
[8] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
));
5215 cpu_set_apic_tpr(env
, env
->cr
[8]);
5217 /* we need to set the efer after the crs so the hidden flags get set properly */
5218 #ifdef TARGET_X86_64
5219 env
->efer
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
));
5220 env
->hflags
&= ~HF_LMA_MASK
;
5221 if (env
->efer
& MSR_EFER_LMA
)
5222 env
->hflags
|= HF_LMA_MASK
;
5223 /* XXX: should also emulate the VM_CR MSR */
5224 env
->hflags
&= ~HF_SVME_MASK
;
5225 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
) {
5226 if (env
->efer
& MSR_EFER_SVME
)
5227 env
->hflags
|= HF_SVME_MASK
;
5229 env
->efer
&= ~MSR_EFER_SVME
;
5234 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5235 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5236 CC_OP
= CC_OP_EFLAGS
;
5238 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5240 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5242 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5244 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5247 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5248 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5249 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5251 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5252 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5255 cpu_x86_set_cpl(env
, 0);
5256 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5257 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5260 /* FIXME: Resets the current ASID register to zero (host ASID). */
5262 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5264 /* Clears the TSC_OFFSET inside the processor. */
5266 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5267 from the page table indicated the host's CR3. If the PDPEs contain
5268 illegal state, the processor causes a shutdown. */
5270 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5271 env
->cr
[0] |= CR0_PE_MASK
;
5272 env
->eflags
&= ~VM_MASK
;
5274 /* Disables all breakpoints in the host DR7 register. */
5276 /* Checks the reloaded host state for consistency. */
5278 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5279 host's code segment or non-canonical (in the case of long mode), a
5280 #GP fault is delivered inside the host.) */
5282 /* remove any pending exception */
5283 env
->exception_index
= -1;
5284 env
->error_code
= 0;
5285 env
->old_exception
= -1;
5293 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5294 void helper_enter_mmx(void)
5297 *(uint32_t *)(env
->fptags
) = 0;
5298 *(uint32_t *)(env
->fptags
+ 4) = 0;
5301 void helper_emms(void)
5303 /* set to empty state */
5304 *(uint32_t *)(env
->fptags
) = 0x01010101;
5305 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5309 void helper_movq(uint64_t *d
, uint64_t *s
)
5315 #include "ops_sse.h"
5318 #include "ops_sse.h"
5321 #include "helper_template.h"
5325 #include "helper_template.h"
5329 #include "helper_template.h"
5332 #ifdef TARGET_X86_64
5335 #include "helper_template.h"
5340 /* bit operations */
5341 target_ulong
helper_bsf(target_ulong t0
)
5348 while ((res
& 1) == 0) {
5355 target_ulong
helper_bsr(target_ulong t0
)
5358 target_ulong res
, mask
;
5361 count
= TARGET_LONG_BITS
- 1;
5362 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5363 while ((res
& mask
) == 0) {
5371 static int compute_all_eflags(void)
5376 static int compute_c_eflags(void)
5378 return CC_SRC
& CC_C
;
5381 CCTable cc_table
[CC_OP_NB
] = {
5382 [CC_OP_DYNAMIC
] = { /* should never happen */ },
5384 [CC_OP_EFLAGS
] = { compute_all_eflags
, compute_c_eflags
},
5386 [CC_OP_MULB
] = { compute_all_mulb
, compute_c_mull
},
5387 [CC_OP_MULW
] = { compute_all_mulw
, compute_c_mull
},
5388 [CC_OP_MULL
] = { compute_all_mull
, compute_c_mull
},
5390 [CC_OP_ADDB
] = { compute_all_addb
, compute_c_addb
},
5391 [CC_OP_ADDW
] = { compute_all_addw
, compute_c_addw
},
5392 [CC_OP_ADDL
] = { compute_all_addl
, compute_c_addl
},
5394 [CC_OP_ADCB
] = { compute_all_adcb
, compute_c_adcb
},
5395 [CC_OP_ADCW
] = { compute_all_adcw
, compute_c_adcw
},
5396 [CC_OP_ADCL
] = { compute_all_adcl
, compute_c_adcl
},
5398 [CC_OP_SUBB
] = { compute_all_subb
, compute_c_subb
},
5399 [CC_OP_SUBW
] = { compute_all_subw
, compute_c_subw
},
5400 [CC_OP_SUBL
] = { compute_all_subl
, compute_c_subl
},
5402 [CC_OP_SBBB
] = { compute_all_sbbb
, compute_c_sbbb
},
5403 [CC_OP_SBBW
] = { compute_all_sbbw
, compute_c_sbbw
},
5404 [CC_OP_SBBL
] = { compute_all_sbbl
, compute_c_sbbl
},
5406 [CC_OP_LOGICB
] = { compute_all_logicb
, compute_c_logicb
},
5407 [CC_OP_LOGICW
] = { compute_all_logicw
, compute_c_logicw
},
5408 [CC_OP_LOGICL
] = { compute_all_logicl
, compute_c_logicl
},
5410 [CC_OP_INCB
] = { compute_all_incb
, compute_c_incl
},
5411 [CC_OP_INCW
] = { compute_all_incw
, compute_c_incl
},
5412 [CC_OP_INCL
] = { compute_all_incl
, compute_c_incl
},
5414 [CC_OP_DECB
] = { compute_all_decb
, compute_c_incl
},
5415 [CC_OP_DECW
] = { compute_all_decw
, compute_c_incl
},
5416 [CC_OP_DECL
] = { compute_all_decl
, compute_c_incl
},
5418 [CC_OP_SHLB
] = { compute_all_shlb
, compute_c_shlb
},
5419 [CC_OP_SHLW
] = { compute_all_shlw
, compute_c_shlw
},
5420 [CC_OP_SHLL
] = { compute_all_shll
, compute_c_shll
},
5422 [CC_OP_SARB
] = { compute_all_sarb
, compute_c_sarl
},
5423 [CC_OP_SARW
] = { compute_all_sarw
, compute_c_sarl
},
5424 [CC_OP_SARL
] = { compute_all_sarl
, compute_c_sarl
},
5426 #ifdef TARGET_X86_64
5427 [CC_OP_MULQ
] = { compute_all_mulq
, compute_c_mull
},
5429 [CC_OP_ADDQ
] = { compute_all_addq
, compute_c_addq
},
5431 [CC_OP_ADCQ
] = { compute_all_adcq
, compute_c_adcq
},
5433 [CC_OP_SUBQ
] = { compute_all_subq
, compute_c_subq
},
5435 [CC_OP_SBBQ
] = { compute_all_sbbq
, compute_c_sbbq
},
5437 [CC_OP_LOGICQ
] = { compute_all_logicq
, compute_c_logicq
},
5439 [CC_OP_INCQ
] = { compute_all_incq
, compute_c_incl
},
5441 [CC_OP_DECQ
] = { compute_all_decq
, compute_c_incl
},
5443 [CC_OP_SHLQ
] = { compute_all_shlq
, compute_c_shlq
},
5445 [CC_OP_SARQ
] = { compute_all_sarq
, compute_c_sarl
},