4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define CPU_NO_GLOBAL_REGS
22 #include "host-utils.h"
27 #define raise_exception_err(a, b)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
35 const uint8_t parity_table
[256] = {
36 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
41 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
71 const uint8_t rclw_table
[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
79 const uint8_t rclb_table
[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
86 const CPU86_LDouble f15rk
[7] =
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
97 /* broken thread support */
99 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
101 void helper_lock(void)
103 spin_lock(&global_cpu_lock
);
106 void helper_unlock(void)
108 spin_unlock(&global_cpu_lock
);
111 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
113 load_eflags(t0
, update_mask
);
116 target_ulong
helper_read_eflags(void)
119 eflags
= cc_table
[CC_OP
].compute_all();
120 eflags
|= (DF
& DF_MASK
);
121 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
125 /* return non zero if error */
126 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
137 index
= selector
& ~7;
138 if ((index
+ 7) > dt
->limit
)
140 ptr
= dt
->base
+ index
;
141 *e1_ptr
= ldl_kernel(ptr
);
142 *e2_ptr
= ldl_kernel(ptr
+ 4);
146 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
149 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
150 if (e2
& DESC_G_MASK
)
151 limit
= (limit
<< 12) | 0xfff;
155 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
157 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
160 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
162 sc
->base
= get_seg_base(e1
, e2
);
163 sc
->limit
= get_seg_limit(e1
, e2
);
167 /* init the segment cache in vm86 mode. */
168 static inline void load_seg_vm(int seg
, int selector
)
171 cpu_x86_load_seg_cache(env
, seg
, selector
,
172 (selector
<< 4), 0xffff, 0);
175 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
176 uint32_t *esp_ptr
, int dpl
)
178 int type
, index
, shift
;
183 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
184 for(i
=0;i
<env
->tr
.limit
;i
++) {
185 printf("%02x ", env
->tr
.base
[i
]);
186 if ((i
& 7) == 7) printf("\n");
192 if (!(env
->tr
.flags
& DESC_P_MASK
))
193 cpu_abort(env
, "invalid tss");
194 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
196 cpu_abort(env
, "invalid tss type");
198 index
= (dpl
* 4 + 2) << shift
;
199 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
200 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
202 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
203 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
205 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
206 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
210 /* XXX: merge with load_seg() */
211 static void tss_load_seg(int seg_reg
, int selector
)
216 if ((selector
& 0xfffc) != 0) {
217 if (load_segment(&e1
, &e2
, selector
) != 0)
218 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
219 if (!(e2
& DESC_S_MASK
))
220 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
222 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
223 cpl
= env
->hflags
& HF_CPL_MASK
;
224 if (seg_reg
== R_CS
) {
225 if (!(e2
& DESC_CS_MASK
))
226 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
227 /* XXX: is it correct ? */
229 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
230 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
231 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
232 } else if (seg_reg
== R_SS
) {
233 /* SS must be writable data */
234 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
235 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
236 if (dpl
!= cpl
|| dpl
!= rpl
)
237 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
239 /* not readable code */
240 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
241 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
244 if (dpl
< cpl
|| dpl
< rpl
)
245 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
248 if (!(e2
& DESC_P_MASK
))
249 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
250 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
251 get_seg_base(e1
, e2
),
252 get_seg_limit(e1
, e2
),
255 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
256 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
260 #define SWITCH_TSS_JMP 0
261 #define SWITCH_TSS_IRET 1
262 #define SWITCH_TSS_CALL 2
264 /* XXX: restore CPU state in registers (PowerPC case) */
265 static void switch_tss(int tss_selector
,
266 uint32_t e1
, uint32_t e2
, int source
,
269 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
270 target_ulong tss_base
;
271 uint32_t new_regs
[8], new_segs
[6];
272 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
273 uint32_t old_eflags
, eflags_mask
;
278 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
280 if (loglevel
& CPU_LOG_PCALL
)
281 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
284 /* if task gate, we read the TSS segment and we load it */
286 if (!(e2
& DESC_P_MASK
))
287 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
288 tss_selector
= e1
>> 16;
289 if (tss_selector
& 4)
290 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
291 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
292 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
293 if (e2
& DESC_S_MASK
)
294 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
295 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
297 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
300 if (!(e2
& DESC_P_MASK
))
301 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
307 tss_limit
= get_seg_limit(e1
, e2
);
308 tss_base
= get_seg_base(e1
, e2
);
309 if ((tss_selector
& 4) != 0 ||
310 tss_limit
< tss_limit_max
)
311 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
312 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
314 old_tss_limit_max
= 103;
316 old_tss_limit_max
= 43;
318 /* read all the registers from the new TSS */
321 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
322 new_eip
= ldl_kernel(tss_base
+ 0x20);
323 new_eflags
= ldl_kernel(tss_base
+ 0x24);
324 for(i
= 0; i
< 8; i
++)
325 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
326 for(i
= 0; i
< 6; i
++)
327 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
328 new_ldt
= lduw_kernel(tss_base
+ 0x60);
329 new_trap
= ldl_kernel(tss_base
+ 0x64);
333 new_eip
= lduw_kernel(tss_base
+ 0x0e);
334 new_eflags
= lduw_kernel(tss_base
+ 0x10);
335 for(i
= 0; i
< 8; i
++)
336 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
337 for(i
= 0; i
< 4; i
++)
338 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
339 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
350 v1
= ldub_kernel(env
->tr
.base
);
351 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
352 stb_kernel(env
->tr
.base
, v1
);
353 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
355 /* clear busy bit (it is restartable) */
356 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
359 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
360 e2
= ldl_kernel(ptr
+ 4);
361 e2
&= ~DESC_TSS_BUSY_MASK
;
362 stl_kernel(ptr
+ 4, e2
);
364 old_eflags
= compute_eflags();
365 if (source
== SWITCH_TSS_IRET
)
366 old_eflags
&= ~NT_MASK
;
368 /* save the current state in the old TSS */
371 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
372 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
373 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
374 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
375 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
376 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
377 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
378 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
379 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
380 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
381 for(i
= 0; i
< 6; i
++)
382 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
385 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
386 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
387 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
388 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
389 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
390 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
391 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
392 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
393 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
394 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
395 for(i
= 0; i
< 4; i
++)
396 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
399 /* now if an exception occurs, it will occurs in the next task
402 if (source
== SWITCH_TSS_CALL
) {
403 stw_kernel(tss_base
, env
->tr
.selector
);
404 new_eflags
|= NT_MASK
;
408 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
411 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
412 e2
= ldl_kernel(ptr
+ 4);
413 e2
|= DESC_TSS_BUSY_MASK
;
414 stl_kernel(ptr
+ 4, e2
);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env
->cr
[0] |= CR0_TS_MASK
;
420 env
->hflags
|= HF_TS_MASK
;
421 env
->tr
.selector
= tss_selector
;
422 env
->tr
.base
= tss_base
;
423 env
->tr
.limit
= tss_limit
;
424 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
426 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
427 cpu_x86_update_cr3(env
, new_cr3
);
430 /* load all registers without an exception, then reload them with
431 possible exception */
433 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
434 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
436 eflags_mask
&= 0xffff;
437 load_eflags(new_eflags
, eflags_mask
);
438 /* XXX: what to do in 16 bit case ? */
447 if (new_eflags
& VM_MASK
) {
448 for(i
= 0; i
< 6; i
++)
449 load_seg_vm(i
, new_segs
[i
]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env
, 3);
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i
= 0; i
< 6; i
++)
457 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
460 env
->ldt
.selector
= new_ldt
& ~4;
467 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
469 if ((new_ldt
& 0xfffc) != 0) {
471 index
= new_ldt
& ~7;
472 if ((index
+ 7) > dt
->limit
)
473 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
474 ptr
= dt
->base
+ index
;
475 e1
= ldl_kernel(ptr
);
476 e2
= ldl_kernel(ptr
+ 4);
477 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
479 if (!(e2
& DESC_P_MASK
))
480 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
481 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
484 /* load the segments */
485 if (!(new_eflags
& VM_MASK
)) {
486 tss_load_seg(R_CS
, new_segs
[R_CS
]);
487 tss_load_seg(R_SS
, new_segs
[R_SS
]);
488 tss_load_seg(R_ES
, new_segs
[R_ES
]);
489 tss_load_seg(R_DS
, new_segs
[R_DS
]);
490 tss_load_seg(R_FS
, new_segs
[R_FS
]);
491 tss_load_seg(R_GS
, new_segs
[R_GS
]);
494 /* check that EIP is in the CS segment limits */
495 if (new_eip
> env
->segs
[R_CS
].limit
) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF
, 0);
501 /* check if Port I/O is allowed in TSS */
502 static inline void check_io(int addr
, int size
)
504 int io_offset
, val
, mask
;
506 /* TSS must be a valid 32 bit one */
507 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
508 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
511 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
512 io_offset
+= (addr
>> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset
+ 1) > env
->tr
.limit
)
516 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
518 mask
= (1 << size
) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val
& mask
) != 0) {
522 raise_exception_err(EXCP0D_GPF
, 0);
526 void helper_check_iob(uint32_t t0
)
531 void helper_check_iow(uint32_t t0
)
536 void helper_check_iol(uint32_t t0
)
541 void helper_outb(uint32_t port
, uint32_t data
)
543 cpu_outb(env
, port
, data
& 0xff);
546 target_ulong
helper_inb(uint32_t port
)
548 return cpu_inb(env
, port
);
551 void helper_outw(uint32_t port
, uint32_t data
)
553 cpu_outw(env
, port
, data
& 0xffff);
556 target_ulong
helper_inw(uint32_t port
)
558 return cpu_inw(env
, port
);
561 void helper_outl(uint32_t port
, uint32_t data
)
563 cpu_outl(env
, port
, data
);
566 target_ulong
helper_inl(uint32_t port
)
568 return cpu_inl(env
, port
);
571 static inline unsigned int get_sp_mask(unsigned int e2
)
573 if (e2
& DESC_B_MASK
)
580 #define SET_ESP(val, sp_mask)\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
590 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
593 /* XXX: add a is_user flag to have proper security support */
594 #define PUSHW(ssp, sp, sp_mask, val)\
597 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
600 #define PUSHL(ssp, sp, sp_mask, val)\
603 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
606 #define POPW(ssp, sp, sp_mask, val)\
608 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
612 #define POPL(ssp, sp, sp_mask, val)\
614 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
618 /* protected mode interrupt */
619 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
620 unsigned int next_eip
, int is_hw
)
623 target_ulong ptr
, ssp
;
624 int type
, dpl
, selector
, ss_dpl
, cpl
;
625 int has_error_code
, new_stack
, shift
;
626 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
627 uint32_t old_eip
, sp_mask
;
630 if (!is_int
&& !is_hw
) {
649 if (intno
* 8 + 7 > dt
->limit
)
650 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
651 ptr
= dt
->base
+ intno
* 8;
652 e1
= ldl_kernel(ptr
);
653 e2
= ldl_kernel(ptr
+ 4);
654 /* check gate type */
655 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
657 case 5: /* task gate */
658 /* must do that check here to return the correct error code */
659 if (!(e2
& DESC_P_MASK
))
660 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
661 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
662 if (has_error_code
) {
665 /* push the error code */
666 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
668 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
672 esp
= (ESP
- (2 << shift
)) & mask
;
673 ssp
= env
->segs
[R_SS
].base
+ esp
;
675 stl_kernel(ssp
, error_code
);
677 stw_kernel(ssp
, error_code
);
681 case 6: /* 286 interrupt gate */
682 case 7: /* 286 trap gate */
683 case 14: /* 386 interrupt gate */
684 case 15: /* 386 trap gate */
687 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
690 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
691 cpl
= env
->hflags
& HF_CPL_MASK
;
692 /* check privilege if software int */
693 if (is_int
&& dpl
< cpl
)
694 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
695 /* check valid bit */
696 if (!(e2
& DESC_P_MASK
))
697 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
699 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
700 if ((selector
& 0xfffc) == 0)
701 raise_exception_err(EXCP0D_GPF
, 0);
703 if (load_segment(&e1
, &e2
, selector
) != 0)
704 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
705 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
706 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
707 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
709 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
710 if (!(e2
& DESC_P_MASK
))
711 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
712 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
713 /* to inner privilege */
714 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
715 if ((ss
& 0xfffc) == 0)
716 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
718 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
719 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
720 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
721 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
723 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
724 if (!(ss_e2
& DESC_S_MASK
) ||
725 (ss_e2
& DESC_CS_MASK
) ||
726 !(ss_e2
& DESC_W_MASK
))
727 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
728 if (!(ss_e2
& DESC_P_MASK
))
729 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
731 sp_mask
= get_sp_mask(ss_e2
);
732 ssp
= get_seg_base(ss_e1
, ss_e2
);
733 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
734 /* to same privilege */
735 if (env
->eflags
& VM_MASK
)
736 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
738 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
739 ssp
= env
->segs
[R_SS
].base
;
743 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
744 new_stack
= 0; /* avoid warning */
745 sp_mask
= 0; /* avoid warning */
746 ssp
= 0; /* avoid warning */
747 esp
= 0; /* avoid warning */
753 /* XXX: check that enough room is available */
754 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
755 if (env
->eflags
& VM_MASK
)
761 if (env
->eflags
& VM_MASK
) {
762 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
763 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
764 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
765 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
767 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
768 PUSHL(ssp
, esp
, sp_mask
, ESP
);
770 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
771 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
772 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
773 if (has_error_code
) {
774 PUSHL(ssp
, esp
, sp_mask
, error_code
);
778 if (env
->eflags
& VM_MASK
) {
779 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
780 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
781 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
782 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
784 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
785 PUSHW(ssp
, esp
, sp_mask
, ESP
);
787 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
788 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
789 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
790 if (has_error_code
) {
791 PUSHW(ssp
, esp
, sp_mask
, error_code
);
796 if (env
->eflags
& VM_MASK
) {
797 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
802 ss
= (ss
& ~3) | dpl
;
803 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
804 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
806 SET_ESP(esp
, sp_mask
);
808 selector
= (selector
& ~3) | dpl
;
809 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
810 get_seg_base(e1
, e2
),
811 get_seg_limit(e1
, e2
),
813 cpu_x86_set_cpl(env
, dpl
);
816 /* interrupt gate clear IF mask */
817 if ((type
& 1) == 0) {
818 env
->eflags
&= ~IF_MASK
;
820 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
825 #define PUSHQ(sp, val)\
828 stq_kernel(sp, (val));\
831 #define POPQ(sp, val)\
833 val = ldq_kernel(sp);\
837 static inline target_ulong
get_rsp_from_tss(int level
)
842 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
843 env
->tr
.base
, env
->tr
.limit
);
846 if (!(env
->tr
.flags
& DESC_P_MASK
))
847 cpu_abort(env
, "invalid tss");
848 index
= 8 * level
+ 4;
849 if ((index
+ 7) > env
->tr
.limit
)
850 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
851 return ldq_kernel(env
->tr
.base
+ index
);
854 /* 64 bit interrupt */
855 static void do_interrupt64(int intno
, int is_int
, int error_code
,
856 target_ulong next_eip
, int is_hw
)
860 int type
, dpl
, selector
, cpl
, ist
;
861 int has_error_code
, new_stack
;
862 uint32_t e1
, e2
, e3
, ss
;
863 target_ulong old_eip
, esp
, offset
;
866 if (!is_int
&& !is_hw
) {
885 if (intno
* 16 + 15 > dt
->limit
)
886 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
887 ptr
= dt
->base
+ intno
* 16;
888 e1
= ldl_kernel(ptr
);
889 e2
= ldl_kernel(ptr
+ 4);
890 e3
= ldl_kernel(ptr
+ 8);
891 /* check gate type */
892 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
894 case 14: /* 386 interrupt gate */
895 case 15: /* 386 trap gate */
898 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
901 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
902 cpl
= env
->hflags
& HF_CPL_MASK
;
903 /* check privilege if software int */
904 if (is_int
&& dpl
< cpl
)
905 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
906 /* check valid bit */
907 if (!(e2
& DESC_P_MASK
))
908 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
910 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
912 if ((selector
& 0xfffc) == 0)
913 raise_exception_err(EXCP0D_GPF
, 0);
915 if (load_segment(&e1
, &e2
, selector
) != 0)
916 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
917 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
918 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
919 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
921 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
922 if (!(e2
& DESC_P_MASK
))
923 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
924 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
925 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
926 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
927 /* to inner privilege */
929 esp
= get_rsp_from_tss(ist
+ 3);
931 esp
= get_rsp_from_tss(dpl
);
932 esp
&= ~0xfLL
; /* align stack */
935 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
936 /* to same privilege */
937 if (env
->eflags
& VM_MASK
)
938 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
941 esp
= get_rsp_from_tss(ist
+ 3);
944 esp
&= ~0xfLL
; /* align stack */
947 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
948 new_stack
= 0; /* avoid warning */
949 esp
= 0; /* avoid warning */
952 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
954 PUSHQ(esp
, compute_eflags());
955 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
957 if (has_error_code
) {
958 PUSHQ(esp
, error_code
);
963 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
967 selector
= (selector
& ~3) | dpl
;
968 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
969 get_seg_base(e1
, e2
),
970 get_seg_limit(e1
, e2
),
972 cpu_x86_set_cpl(env
, dpl
);
975 /* interrupt gate clear IF mask */
976 if ((type
& 1) == 0) {
977 env
->eflags
&= ~IF_MASK
;
979 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
983 #if defined(CONFIG_USER_ONLY)
984 void helper_syscall(int next_eip_addend
)
986 env
->exception_index
= EXCP_SYSCALL
;
987 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
991 void helper_syscall(int next_eip_addend
)
995 if (!(env
->efer
& MSR_EFER_SCE
)) {
996 raise_exception_err(EXCP06_ILLOP
, 0);
998 selector
= (env
->star
>> 32) & 0xffff;
1000 if (env
->hflags
& HF_LMA_MASK
) {
1003 ECX
= env
->eip
+ next_eip_addend
;
1004 env
->regs
[11] = compute_eflags();
1006 code64
= env
->hflags
& HF_CS64_MASK
;
1008 cpu_x86_set_cpl(env
, 0);
1009 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1011 DESC_G_MASK
| DESC_P_MASK
|
1013 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1014 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1016 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1018 DESC_W_MASK
| DESC_A_MASK
);
1019 env
->eflags
&= ~env
->fmask
;
1020 load_eflags(env
->eflags
, 0);
1022 env
->eip
= env
->lstar
;
1024 env
->eip
= env
->cstar
;
1028 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1030 cpu_x86_set_cpl(env
, 0);
1031 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1033 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1035 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1036 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1038 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1040 DESC_W_MASK
| DESC_A_MASK
);
1041 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1042 env
->eip
= (uint32_t)env
->star
;
1047 void helper_sysret(int dflag
)
1051 if (!(env
->efer
& MSR_EFER_SCE
)) {
1052 raise_exception_err(EXCP06_ILLOP
, 0);
1054 cpl
= env
->hflags
& HF_CPL_MASK
;
1055 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1056 raise_exception_err(EXCP0D_GPF
, 0);
1058 selector
= (env
->star
>> 48) & 0xffff;
1059 #ifdef TARGET_X86_64
1060 if (env
->hflags
& HF_LMA_MASK
) {
1062 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1064 DESC_G_MASK
| DESC_P_MASK
|
1065 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1066 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1070 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1072 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1073 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1074 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1075 env
->eip
= (uint32_t)ECX
;
1077 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1079 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1080 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1081 DESC_W_MASK
| DESC_A_MASK
);
1082 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1083 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1084 cpu_x86_set_cpl(env
, 3);
1088 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1090 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1091 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1092 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1093 env
->eip
= (uint32_t)ECX
;
1094 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1096 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1097 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1098 DESC_W_MASK
| DESC_A_MASK
);
1099 env
->eflags
|= IF_MASK
;
1100 cpu_x86_set_cpl(env
, 3);
1103 if (kqemu_is_ok(env
)) {
1104 if (env
->hflags
& HF_LMA_MASK
)
1105 CC_OP
= CC_OP_EFLAGS
;
1106 env
->exception_index
= -1;
1112 /* real mode interrupt */
1113 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1114 unsigned int next_eip
)
1117 target_ulong ptr
, ssp
;
1119 uint32_t offset
, esp
;
1120 uint32_t old_cs
, old_eip
;
1122 /* real mode (simpler !) */
1124 if (intno
* 4 + 3 > dt
->limit
)
1125 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1126 ptr
= dt
->base
+ intno
* 4;
1127 offset
= lduw_kernel(ptr
);
1128 selector
= lduw_kernel(ptr
+ 2);
1130 ssp
= env
->segs
[R_SS
].base
;
1135 old_cs
= env
->segs
[R_CS
].selector
;
1136 /* XXX: use SS segment size ? */
1137 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1138 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1139 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1141 /* update processor state */
1142 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1144 env
->segs
[R_CS
].selector
= selector
;
1145 env
->segs
[R_CS
].base
= (selector
<< 4);
1146 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1149 /* fake user mode interrupt */
1150 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1151 target_ulong next_eip
)
1155 int dpl
, cpl
, shift
;
1159 if (env
->hflags
& HF_LMA_MASK
) {
1164 ptr
= dt
->base
+ (intno
<< shift
);
1165 e2
= ldl_kernel(ptr
+ 4);
1167 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1168 cpl
= env
->hflags
& HF_CPL_MASK
;
1169 /* check privilege if software int */
1170 if (is_int
&& dpl
< cpl
)
1171 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1173 /* Since we emulate only user space, we cannot do more than
1174 exiting the emulation with the suitable exception and error
1181 * Begin execution of an interruption. is_int is TRUE if coming from
1182 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183 * instruction. It is only relevant if is_int is TRUE.
1185 void do_interrupt(int intno
, int is_int
, int error_code
,
1186 target_ulong next_eip
, int is_hw
)
1188 if (loglevel
& CPU_LOG_INT
) {
1189 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1191 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1192 count
, intno
, error_code
, is_int
,
1193 env
->hflags
& HF_CPL_MASK
,
1194 env
->segs
[R_CS
].selector
, EIP
,
1195 (int)env
->segs
[R_CS
].base
+ EIP
,
1196 env
->segs
[R_SS
].selector
, ESP
);
1197 if (intno
== 0x0e) {
1198 fprintf(logfile
, " CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1200 fprintf(logfile
, " EAX=" TARGET_FMT_lx
, EAX
);
1202 fprintf(logfile
, "\n");
1203 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1208 fprintf(logfile
, " code=");
1209 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1210 for(i
= 0; i
< 16; i
++) {
1211 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
1213 fprintf(logfile
, "\n");
1219 if (env
->cr
[0] & CR0_PE_MASK
) {
1221 if (env
->hflags
& HF_LMA_MASK
) {
1222 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1226 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1229 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1234 * Check nested exceptions and change to double or triple fault if
1235 * needed. It should only be called, if this is not an interrupt.
1236 * Returns the new exception number.
1238 static int check_exception(int intno
, int *error_code
)
1240 int first_contributory
= env
->old_exception
== 0 ||
1241 (env
->old_exception
>= 10 &&
1242 env
->old_exception
<= 13);
1243 int second_contributory
= intno
== 0 ||
1244 (intno
>= 10 && intno
<= 13);
1246 if (loglevel
& CPU_LOG_INT
)
1247 fprintf(logfile
, "check_exception old: 0x%x new 0x%x\n",
1248 env
->old_exception
, intno
);
1250 if (env
->old_exception
== EXCP08_DBLE
)
1251 cpu_abort(env
, "triple fault");
1253 if ((first_contributory
&& second_contributory
)
1254 || (env
->old_exception
== EXCP0E_PAGE
&&
1255 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1256 intno
= EXCP08_DBLE
;
1260 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1261 (intno
== EXCP08_DBLE
))
1262 env
->old_exception
= intno
;
1268 * Signal an interruption. It is executed in the main CPU loop.
1269 * is_int is TRUE if coming from the int instruction. next_eip is the
1270 * EIP value AFTER the interrupt instruction. It is only relevant if
1273 void raise_interrupt(int intno
, int is_int
, int error_code
,
1274 int next_eip_addend
)
1277 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1278 intno
= check_exception(intno
, &error_code
);
1280 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1283 env
->exception_index
= intno
;
1284 env
->error_code
= error_code
;
1285 env
->exception_is_int
= is_int
;
1286 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1290 /* shortcuts to generate exceptions */
1292 void (raise_exception_err
)(int exception_index
, int error_code
)
1294 raise_interrupt(exception_index
, 0, error_code
, 0);
1297 void raise_exception(int exception_index
)
1299 raise_interrupt(exception_index
, 0, 0, 0);
1304 #if defined(CONFIG_USER_ONLY)
1306 void do_smm_enter(void)
1310 void helper_rsm(void)
1316 #ifdef TARGET_X86_64
1317 #define SMM_REVISION_ID 0x00020064
1319 #define SMM_REVISION_ID 0x00020000
1322 void do_smm_enter(void)
1324 target_ulong sm_state
;
1328 if (loglevel
& CPU_LOG_INT
) {
1329 fprintf(logfile
, "SMM: enter\n");
1330 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1333 env
->hflags
|= HF_SMM_MASK
;
1334 cpu_smm_update(env
);
1336 sm_state
= env
->smbase
+ 0x8000;
1338 #ifdef TARGET_X86_64
1339 for(i
= 0; i
< 6; i
++) {
1341 offset
= 0x7e00 + i
* 16;
1342 stw_phys(sm_state
+ offset
, dt
->selector
);
1343 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1344 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1345 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1348 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1349 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1351 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1352 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1353 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1354 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1356 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1357 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1359 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1360 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1361 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1362 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1364 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1366 stq_phys(sm_state
+ 0x7ff8, EAX
);
1367 stq_phys(sm_state
+ 0x7ff0, ECX
);
1368 stq_phys(sm_state
+ 0x7fe8, EDX
);
1369 stq_phys(sm_state
+ 0x7fe0, EBX
);
1370 stq_phys(sm_state
+ 0x7fd8, ESP
);
1371 stq_phys(sm_state
+ 0x7fd0, EBP
);
1372 stq_phys(sm_state
+ 0x7fc8, ESI
);
1373 stq_phys(sm_state
+ 0x7fc0, EDI
);
1374 for(i
= 8; i
< 16; i
++)
1375 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1376 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1377 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1378 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1379 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1381 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1382 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1383 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1385 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1386 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1388 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1389 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1390 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1391 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1392 stl_phys(sm_state
+ 0x7fec, EDI
);
1393 stl_phys(sm_state
+ 0x7fe8, ESI
);
1394 stl_phys(sm_state
+ 0x7fe4, EBP
);
1395 stl_phys(sm_state
+ 0x7fe0, ESP
);
1396 stl_phys(sm_state
+ 0x7fdc, EBX
);
1397 stl_phys(sm_state
+ 0x7fd8, EDX
);
1398 stl_phys(sm_state
+ 0x7fd4, ECX
);
1399 stl_phys(sm_state
+ 0x7fd0, EAX
);
1400 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1401 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1403 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1404 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1405 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1406 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1408 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1409 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1410 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1411 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1413 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1414 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1416 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1417 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1419 for(i
= 0; i
< 6; i
++) {
1422 offset
= 0x7f84 + i
* 12;
1424 offset
= 0x7f2c + (i
- 3) * 12;
1425 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1426 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1427 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1428 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1430 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1432 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1433 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1435 /* init SMM cpu state */
1437 #ifdef TARGET_X86_64
1439 env
->hflags
&= ~HF_LMA_MASK
;
1441 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1442 env
->eip
= 0x00008000;
1443 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1445 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1446 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1447 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1449 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1451 cpu_x86_update_cr0(env
,
1452 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1453 cpu_x86_update_cr4(env
, 0);
1454 env
->dr
[7] = 0x00000400;
1455 CC_OP
= CC_OP_EFLAGS
;
1458 void helper_rsm(void)
1460 target_ulong sm_state
;
1464 sm_state
= env
->smbase
+ 0x8000;
1465 #ifdef TARGET_X86_64
1466 env
->efer
= ldq_phys(sm_state
+ 0x7ed0);
1467 if (env
->efer
& MSR_EFER_LMA
)
1468 env
->hflags
|= HF_LMA_MASK
;
1470 env
->hflags
&= ~HF_LMA_MASK
;
1472 for(i
= 0; i
< 6; i
++) {
1473 offset
= 0x7e00 + i
* 16;
1474 cpu_x86_load_seg_cache(env
, i
,
1475 lduw_phys(sm_state
+ offset
),
1476 ldq_phys(sm_state
+ offset
+ 8),
1477 ldl_phys(sm_state
+ offset
+ 4),
1478 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1481 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1482 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1484 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1485 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1486 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1487 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1489 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1490 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1492 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1493 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1494 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1495 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1497 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1498 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1499 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1500 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1501 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1502 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1503 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1504 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1505 for(i
= 8; i
< 16; i
++)
1506 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1507 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1508 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1509 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1510 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1511 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1513 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1514 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1515 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1517 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1518 if (val
& 0x20000) {
1519 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1522 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1523 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1524 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1525 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1526 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1527 EDI
= ldl_phys(sm_state
+ 0x7fec);
1528 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1529 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1530 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1531 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1532 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1533 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1534 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1535 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1536 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1538 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1539 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1540 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1541 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1543 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1544 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1545 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1546 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1548 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1549 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1551 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1552 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1554 for(i
= 0; i
< 6; i
++) {
1556 offset
= 0x7f84 + i
* 12;
1558 offset
= 0x7f2c + (i
- 3) * 12;
1559 cpu_x86_load_seg_cache(env
, i
,
1560 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1561 ldl_phys(sm_state
+ offset
+ 8),
1562 ldl_phys(sm_state
+ offset
+ 4),
1563 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1565 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1567 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1568 if (val
& 0x20000) {
1569 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1572 CC_OP
= CC_OP_EFLAGS
;
1573 env
->hflags
&= ~HF_SMM_MASK
;
1574 cpu_smm_update(env
);
1576 if (loglevel
& CPU_LOG_INT
) {
1577 fprintf(logfile
, "SMM: after RSM\n");
1578 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1582 #endif /* !CONFIG_USER_ONLY */
1585 /* division, flags are undefined */
1587 void helper_divb_AL(target_ulong t0
)
1589 unsigned int num
, den
, q
, r
;
1591 num
= (EAX
& 0xffff);
1594 raise_exception(EXCP00_DIVZ
);
1598 raise_exception(EXCP00_DIVZ
);
1600 r
= (num
% den
) & 0xff;
1601 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1604 void helper_idivb_AL(target_ulong t0
)
1611 raise_exception(EXCP00_DIVZ
);
1615 raise_exception(EXCP00_DIVZ
);
1617 r
= (num
% den
) & 0xff;
1618 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1621 void helper_divw_AX(target_ulong t0
)
1623 unsigned int num
, den
, q
, r
;
1625 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1626 den
= (t0
& 0xffff);
1628 raise_exception(EXCP00_DIVZ
);
1632 raise_exception(EXCP00_DIVZ
);
1634 r
= (num
% den
) & 0xffff;
1635 EAX
= (EAX
& ~0xffff) | q
;
1636 EDX
= (EDX
& ~0xffff) | r
;
1639 void helper_idivw_AX(target_ulong t0
)
1643 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1646 raise_exception(EXCP00_DIVZ
);
1649 if (q
!= (int16_t)q
)
1650 raise_exception(EXCP00_DIVZ
);
1652 r
= (num
% den
) & 0xffff;
1653 EAX
= (EAX
& ~0xffff) | q
;
1654 EDX
= (EDX
& ~0xffff) | r
;
1657 void helper_divl_EAX(target_ulong t0
)
1659 unsigned int den
, r
;
1662 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1665 raise_exception(EXCP00_DIVZ
);
1670 raise_exception(EXCP00_DIVZ
);
1675 void helper_idivl_EAX(target_ulong t0
)
1680 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1683 raise_exception(EXCP00_DIVZ
);
1687 if (q
!= (int32_t)q
)
1688 raise_exception(EXCP00_DIVZ
);
1695 /* XXX: exception */
1696 void helper_aam(int base
)
1702 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1706 void helper_aad(int base
)
1710 ah
= (EAX
>> 8) & 0xff;
1711 al
= ((ah
* base
) + al
) & 0xff;
1712 EAX
= (EAX
& ~0xffff) | al
;
1716 void helper_aaa(void)
1722 eflags
= cc_table
[CC_OP
].compute_all();
1725 ah
= (EAX
>> 8) & 0xff;
1727 icarry
= (al
> 0xf9);
1728 if (((al
& 0x0f) > 9 ) || af
) {
1729 al
= (al
+ 6) & 0x0f;
1730 ah
= (ah
+ 1 + icarry
) & 0xff;
1731 eflags
|= CC_C
| CC_A
;
1733 eflags
&= ~(CC_C
| CC_A
);
1736 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1741 void helper_aas(void)
1747 eflags
= cc_table
[CC_OP
].compute_all();
1750 ah
= (EAX
>> 8) & 0xff;
1753 if (((al
& 0x0f) > 9 ) || af
) {
1754 al
= (al
- 6) & 0x0f;
1755 ah
= (ah
- 1 - icarry
) & 0xff;
1756 eflags
|= CC_C
| CC_A
;
1758 eflags
&= ~(CC_C
| CC_A
);
1761 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1766 void helper_daa(void)
1771 eflags
= cc_table
[CC_OP
].compute_all();
1777 if (((al
& 0x0f) > 9 ) || af
) {
1778 al
= (al
+ 6) & 0xff;
1781 if ((al
> 0x9f) || cf
) {
1782 al
= (al
+ 0x60) & 0xff;
1785 EAX
= (EAX
& ~0xff) | al
;
1786 /* well, speed is not an issue here, so we compute the flags by hand */
1787 eflags
|= (al
== 0) << 6; /* zf */
1788 eflags
|= parity_table
[al
]; /* pf */
1789 eflags
|= (al
& 0x80); /* sf */
1794 void helper_das(void)
1796 int al
, al1
, af
, cf
;
1799 eflags
= cc_table
[CC_OP
].compute_all();
1806 if (((al
& 0x0f) > 9 ) || af
) {
1810 al
= (al
- 6) & 0xff;
1812 if ((al1
> 0x99) || cf
) {
1813 al
= (al
- 0x60) & 0xff;
1816 EAX
= (EAX
& ~0xff) | al
;
1817 /* well, speed is not an issue here, so we compute the flags by hand */
1818 eflags
|= (al
== 0) << 6; /* zf */
1819 eflags
|= parity_table
[al
]; /* pf */
1820 eflags
|= (al
& 0x80); /* sf */
1825 void helper_into(int next_eip_addend
)
1828 eflags
= cc_table
[CC_OP
].compute_all();
1829 if (eflags
& CC_O
) {
1830 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1834 void helper_cmpxchg8b(target_ulong a0
)
1839 eflags
= cc_table
[CC_OP
].compute_all();
1841 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1842 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1845 EDX
= (uint32_t)(d
>> 32);
1852 #ifdef TARGET_X86_64
1853 void helper_cmpxchg16b(target_ulong a0
)
1858 eflags
= cc_table
[CC_OP
].compute_all();
1861 if (d0
== EAX
&& d1
== EDX
) {
1874 void helper_single_step(void)
1876 env
->dr
[6] |= 0x4000;
1877 raise_exception(EXCP01_SSTP
);
1880 void helper_cpuid(void)
1884 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1886 index
= (uint32_t)EAX
;
1887 /* test if maximum index reached */
1888 if (index
& 0x80000000) {
1889 if (index
> env
->cpuid_xlevel
)
1890 index
= env
->cpuid_level
;
1892 if (index
> env
->cpuid_level
)
1893 index
= env
->cpuid_level
;
1898 EAX
= env
->cpuid_level
;
1899 EBX
= env
->cpuid_vendor1
;
1900 EDX
= env
->cpuid_vendor2
;
1901 ECX
= env
->cpuid_vendor3
;
1904 EAX
= env
->cpuid_version
;
1905 EBX
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1906 ECX
= env
->cpuid_ext_features
;
1907 EDX
= env
->cpuid_features
;
1910 /* cache info: needed for Pentium Pro compatibility */
1917 EAX
= env
->cpuid_xlevel
;
1918 EBX
= env
->cpuid_vendor1
;
1919 EDX
= env
->cpuid_vendor2
;
1920 ECX
= env
->cpuid_vendor3
;
1923 EAX
= env
->cpuid_features
;
1925 ECX
= env
->cpuid_ext3_features
;
1926 EDX
= env
->cpuid_ext2_features
;
1931 EAX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1932 EBX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1933 ECX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1934 EDX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1937 /* cache info (L1 cache) */
1944 /* cache info (L2 cache) */
1951 /* virtual & phys address size in low 2 bytes. */
1952 /* XXX: This value must match the one used in the MMU code. */
1953 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
) {
1954 /* 64 bit processor */
1955 #if defined(USE_KQEMU)
1956 EAX
= 0x00003020; /* 48 bits virtual, 32 bits physical */
1958 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1959 EAX
= 0x00003028; /* 48 bits virtual, 40 bits physical */
1962 #if defined(USE_KQEMU)
1963 EAX
= 0x00000020; /* 32 bits physical */
1965 EAX
= 0x00000024; /* 36 bits physical */
1979 /* reserved values: zero */
1988 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1991 uint32_t esp_mask
, esp
, ebp
;
1993 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1994 ssp
= env
->segs
[R_SS
].base
;
2003 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
2006 stl(ssp
+ (esp
& esp_mask
), t1
);
2013 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
2016 stw(ssp
+ (esp
& esp_mask
), t1
);
2020 #ifdef TARGET_X86_64
2021 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
2023 target_ulong esp
, ebp
;
2043 stw(esp
, lduw(ebp
));
2051 void helper_lldt(int selector
)
2055 int index
, entry_limit
;
2059 if ((selector
& 0xfffc) == 0) {
2060 /* XXX: NULL selector case: invalid LDT */
2065 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2067 index
= selector
& ~7;
2068 #ifdef TARGET_X86_64
2069 if (env
->hflags
& HF_LMA_MASK
)
2074 if ((index
+ entry_limit
) > dt
->limit
)
2075 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2076 ptr
= dt
->base
+ index
;
2077 e1
= ldl_kernel(ptr
);
2078 e2
= ldl_kernel(ptr
+ 4);
2079 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2080 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2081 if (!(e2
& DESC_P_MASK
))
2082 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2083 #ifdef TARGET_X86_64
2084 if (env
->hflags
& HF_LMA_MASK
) {
2086 e3
= ldl_kernel(ptr
+ 8);
2087 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2088 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2092 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2095 env
->ldt
.selector
= selector
;
2098 void helper_ltr(int selector
)
2102 int index
, type
, entry_limit
;
2106 if ((selector
& 0xfffc) == 0) {
2107 /* NULL selector case: invalid TR */
2113 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2115 index
= selector
& ~7;
2116 #ifdef TARGET_X86_64
2117 if (env
->hflags
& HF_LMA_MASK
)
2122 if ((index
+ entry_limit
) > dt
->limit
)
2123 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2124 ptr
= dt
->base
+ index
;
2125 e1
= ldl_kernel(ptr
);
2126 e2
= ldl_kernel(ptr
+ 4);
2127 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2128 if ((e2
& DESC_S_MASK
) ||
2129 (type
!= 1 && type
!= 9))
2130 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2131 if (!(e2
& DESC_P_MASK
))
2132 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2133 #ifdef TARGET_X86_64
2134 if (env
->hflags
& HF_LMA_MASK
) {
2136 e3
= ldl_kernel(ptr
+ 8);
2137 e4
= ldl_kernel(ptr
+ 12);
2138 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2139 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2140 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2141 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2145 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2147 e2
|= DESC_TSS_BUSY_MASK
;
2148 stl_kernel(ptr
+ 4, e2
);
2150 env
->tr
.selector
= selector
;
2153 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2154 void helper_load_seg(int seg_reg
, int selector
)
2163 cpl
= env
->hflags
& HF_CPL_MASK
;
2164 if ((selector
& 0xfffc) == 0) {
2165 /* null selector case */
2167 #ifdef TARGET_X86_64
2168 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2171 raise_exception_err(EXCP0D_GPF
, 0);
2172 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2179 index
= selector
& ~7;
2180 if ((index
+ 7) > dt
->limit
)
2181 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2182 ptr
= dt
->base
+ index
;
2183 e1
= ldl_kernel(ptr
);
2184 e2
= ldl_kernel(ptr
+ 4);
2186 if (!(e2
& DESC_S_MASK
))
2187 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2189 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2190 if (seg_reg
== R_SS
) {
2191 /* must be writable segment */
2192 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2193 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2194 if (rpl
!= cpl
|| dpl
!= cpl
)
2195 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2197 /* must be readable segment */
2198 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2199 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2201 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2202 /* if not conforming code, test rights */
2203 if (dpl
< cpl
|| dpl
< rpl
)
2204 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2208 if (!(e2
& DESC_P_MASK
)) {
2209 if (seg_reg
== R_SS
)
2210 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2212 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2215 /* set the access bit if not already set */
2216 if (!(e2
& DESC_A_MASK
)) {
2218 stl_kernel(ptr
+ 4, e2
);
2221 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2222 get_seg_base(e1
, e2
),
2223 get_seg_limit(e1
, e2
),
2226 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2227 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2232 /* protected mode jump */
2233 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2234 int next_eip_addend
)
2237 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2238 target_ulong next_eip
;
2240 if ((new_cs
& 0xfffc) == 0)
2241 raise_exception_err(EXCP0D_GPF
, 0);
2242 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2243 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2244 cpl
= env
->hflags
& HF_CPL_MASK
;
2245 if (e2
& DESC_S_MASK
) {
2246 if (!(e2
& DESC_CS_MASK
))
2247 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2248 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2249 if (e2
& DESC_C_MASK
) {
2250 /* conforming code segment */
2252 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2254 /* non conforming code segment */
2257 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2259 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2261 if (!(e2
& DESC_P_MASK
))
2262 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2263 limit
= get_seg_limit(e1
, e2
);
2264 if (new_eip
> limit
&&
2265 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2266 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2267 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2268 get_seg_base(e1
, e2
), limit
, e2
);
2271 /* jump to call or task gate */
2272 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2274 cpl
= env
->hflags
& HF_CPL_MASK
;
2275 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2277 case 1: /* 286 TSS */
2278 case 9: /* 386 TSS */
2279 case 5: /* task gate */
2280 if (dpl
< cpl
|| dpl
< rpl
)
2281 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2282 next_eip
= env
->eip
+ next_eip_addend
;
2283 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2284 CC_OP
= CC_OP_EFLAGS
;
2286 case 4: /* 286 call gate */
2287 case 12: /* 386 call gate */
2288 if ((dpl
< cpl
) || (dpl
< rpl
))
2289 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2290 if (!(e2
& DESC_P_MASK
))
2291 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2293 new_eip
= (e1
& 0xffff);
2295 new_eip
|= (e2
& 0xffff0000);
2296 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2297 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2298 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2299 /* must be code segment */
2300 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2301 (DESC_S_MASK
| DESC_CS_MASK
)))
2302 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2303 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2304 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2305 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2306 if (!(e2
& DESC_P_MASK
))
2307 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2308 limit
= get_seg_limit(e1
, e2
);
2309 if (new_eip
> limit
)
2310 raise_exception_err(EXCP0D_GPF
, 0);
2311 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2312 get_seg_base(e1
, e2
), limit
, e2
);
2316 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2322 /* real mode call */
2323 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2324 int shift
, int next_eip
)
2327 uint32_t esp
, esp_mask
;
2332 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2333 ssp
= env
->segs
[R_SS
].base
;
2335 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2336 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2338 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2339 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2342 SET_ESP(esp
, esp_mask
);
2344 env
->segs
[R_CS
].selector
= new_cs
;
2345 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2348 /* protected mode call */
2349 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2350 int shift
, int next_eip_addend
)
2353 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2354 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
2355 uint32_t val
, limit
, old_sp_mask
;
2356 target_ulong ssp
, old_ssp
, next_eip
;
2358 next_eip
= env
->eip
+ next_eip_addend
;
2360 if (loglevel
& CPU_LOG_PCALL
) {
2361 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
2362 new_cs
, (uint32_t)new_eip
, shift
);
2363 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2366 if ((new_cs
& 0xfffc) == 0)
2367 raise_exception_err(EXCP0D_GPF
, 0);
2368 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2369 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2370 cpl
= env
->hflags
& HF_CPL_MASK
;
2372 if (loglevel
& CPU_LOG_PCALL
) {
2373 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
2376 if (e2
& DESC_S_MASK
) {
2377 if (!(e2
& DESC_CS_MASK
))
2378 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2379 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2380 if (e2
& DESC_C_MASK
) {
2381 /* conforming code segment */
2383 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2385 /* non conforming code segment */
2388 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2390 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2392 if (!(e2
& DESC_P_MASK
))
2393 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2395 #ifdef TARGET_X86_64
2396 /* XXX: check 16/32 bit cases in long mode */
2401 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2402 PUSHQ(rsp
, next_eip
);
2403 /* from this point, not restartable */
2405 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2406 get_seg_base(e1
, e2
),
2407 get_seg_limit(e1
, e2
), e2
);
2413 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2414 ssp
= env
->segs
[R_SS
].base
;
2416 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2417 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2419 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2420 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2423 limit
= get_seg_limit(e1
, e2
);
2424 if (new_eip
> limit
)
2425 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2426 /* from this point, not restartable */
2427 SET_ESP(sp
, sp_mask
);
2428 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2429 get_seg_base(e1
, e2
), limit
, e2
);
2433 /* check gate type */
2434 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2435 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2438 case 1: /* available 286 TSS */
2439 case 9: /* available 386 TSS */
2440 case 5: /* task gate */
2441 if (dpl
< cpl
|| dpl
< rpl
)
2442 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2443 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2444 CC_OP
= CC_OP_EFLAGS
;
2446 case 4: /* 286 call gate */
2447 case 12: /* 386 call gate */
2450 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2455 if (dpl
< cpl
|| dpl
< rpl
)
2456 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2457 /* check valid bit */
2458 if (!(e2
& DESC_P_MASK
))
2459 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2460 selector
= e1
>> 16;
2461 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2462 param_count
= e2
& 0x1f;
2463 if ((selector
& 0xfffc) == 0)
2464 raise_exception_err(EXCP0D_GPF
, 0);
2466 if (load_segment(&e1
, &e2
, selector
) != 0)
2467 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2468 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2469 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2470 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2472 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2473 if (!(e2
& DESC_P_MASK
))
2474 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2476 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2477 /* to inner privilege */
2478 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2480 if (loglevel
& CPU_LOG_PCALL
)
2481 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2482 ss
, sp
, param_count
, ESP
);
2484 if ((ss
& 0xfffc) == 0)
2485 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2486 if ((ss
& 3) != dpl
)
2487 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2488 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2489 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2490 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2492 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2493 if (!(ss_e2
& DESC_S_MASK
) ||
2494 (ss_e2
& DESC_CS_MASK
) ||
2495 !(ss_e2
& DESC_W_MASK
))
2496 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2497 if (!(ss_e2
& DESC_P_MASK
))
2498 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2500 // push_size = ((param_count * 2) + 8) << shift;
2502 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2503 old_ssp
= env
->segs
[R_SS
].base
;
2505 sp_mask
= get_sp_mask(ss_e2
);
2506 ssp
= get_seg_base(ss_e1
, ss_e2
);
2508 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2509 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2510 for(i
= param_count
- 1; i
>= 0; i
--) {
2511 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2512 PUSHL(ssp
, sp
, sp_mask
, val
);
2515 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2516 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2517 for(i
= param_count
- 1; i
>= 0; i
--) {
2518 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2519 PUSHW(ssp
, sp
, sp_mask
, val
);
2524 /* to same privilege */
2526 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2527 ssp
= env
->segs
[R_SS
].base
;
2528 // push_size = (4 << shift);
2533 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2534 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2536 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2537 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2540 /* from this point, not restartable */
2543 ss
= (ss
& ~3) | dpl
;
2544 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2546 get_seg_limit(ss_e1
, ss_e2
),
2550 selector
= (selector
& ~3) | dpl
;
2551 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2552 get_seg_base(e1
, e2
),
2553 get_seg_limit(e1
, e2
),
2555 cpu_x86_set_cpl(env
, dpl
);
2556 SET_ESP(sp
, sp_mask
);
2560 if (kqemu_is_ok(env
)) {
2561 env
->exception_index
= -1;
2567 /* real and vm86 mode iret */
2568 void helper_iret_real(int shift
)
2570 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2574 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2576 ssp
= env
->segs
[R_SS
].base
;
2579 POPL(ssp
, sp
, sp_mask
, new_eip
);
2580 POPL(ssp
, sp
, sp_mask
, new_cs
);
2582 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2585 POPW(ssp
, sp
, sp_mask
, new_eip
);
2586 POPW(ssp
, sp
, sp_mask
, new_cs
);
2587 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2589 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2590 load_seg_vm(R_CS
, new_cs
);
2592 if (env
->eflags
& VM_MASK
)
2593 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2595 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2597 eflags_mask
&= 0xffff;
2598 load_eflags(new_eflags
, eflags_mask
);
2599 env
->hflags
&= ~HF_NMI_MASK
;
2602 static inline void validate_seg(int seg_reg
, int cpl
)
2607 /* XXX: on x86_64, we do not want to nullify FS and GS because
2608 they may still contain a valid base. I would be interested to
2609 know how a real x86_64 CPU behaves */
2610 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2611 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2614 e2
= env
->segs
[seg_reg
].flags
;
2615 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2616 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2617 /* data or non conforming code segment */
2619 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2624 /* protected mode iret */
2625 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2627 uint32_t new_cs
, new_eflags
, new_ss
;
2628 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2629 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2630 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2631 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2633 #ifdef TARGET_X86_64
2638 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2640 ssp
= env
->segs
[R_SS
].base
;
2641 new_eflags
= 0; /* avoid warning */
2642 #ifdef TARGET_X86_64
2648 POPQ(sp
, new_eflags
);
2654 POPL(ssp
, sp
, sp_mask
, new_eip
);
2655 POPL(ssp
, sp
, sp_mask
, new_cs
);
2658 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2659 if (new_eflags
& VM_MASK
)
2660 goto return_to_vm86
;
2664 POPW(ssp
, sp
, sp_mask
, new_eip
);
2665 POPW(ssp
, sp
, sp_mask
, new_cs
);
2667 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2670 if (loglevel
& CPU_LOG_PCALL
) {
2671 fprintf(logfile
, "lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2672 new_cs
, new_eip
, shift
, addend
);
2673 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2676 if ((new_cs
& 0xfffc) == 0)
2677 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2678 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2679 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2680 if (!(e2
& DESC_S_MASK
) ||
2681 !(e2
& DESC_CS_MASK
))
2682 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2683 cpl
= env
->hflags
& HF_CPL_MASK
;
2686 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2687 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2688 if (e2
& DESC_C_MASK
) {
2690 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2693 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2695 if (!(e2
& DESC_P_MASK
))
2696 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2699 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2700 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2701 /* return to same privilege level */
2702 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2703 get_seg_base(e1
, e2
),
2704 get_seg_limit(e1
, e2
),
2707 /* return to different privilege level */
2708 #ifdef TARGET_X86_64
2717 POPL(ssp
, sp
, sp_mask
, new_esp
);
2718 POPL(ssp
, sp
, sp_mask
, new_ss
);
2722 POPW(ssp
, sp
, sp_mask
, new_esp
);
2723 POPW(ssp
, sp
, sp_mask
, new_ss
);
2726 if (loglevel
& CPU_LOG_PCALL
) {
2727 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2731 if ((new_ss
& 0xfffc) == 0) {
2732 #ifdef TARGET_X86_64
2733 /* NULL ss is allowed in long mode if cpl != 3*/
2734 /* XXX: test CS64 ? */
2735 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2736 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2738 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2739 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2740 DESC_W_MASK
| DESC_A_MASK
);
2741 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2745 raise_exception_err(EXCP0D_GPF
, 0);
2748 if ((new_ss
& 3) != rpl
)
2749 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2750 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2751 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2752 if (!(ss_e2
& DESC_S_MASK
) ||
2753 (ss_e2
& DESC_CS_MASK
) ||
2754 !(ss_e2
& DESC_W_MASK
))
2755 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2756 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2758 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2759 if (!(ss_e2
& DESC_P_MASK
))
2760 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2761 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2762 get_seg_base(ss_e1
, ss_e2
),
2763 get_seg_limit(ss_e1
, ss_e2
),
2767 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2768 get_seg_base(e1
, e2
),
2769 get_seg_limit(e1
, e2
),
2771 cpu_x86_set_cpl(env
, rpl
);
2773 #ifdef TARGET_X86_64
2774 if (env
->hflags
& HF_CS64_MASK
)
2778 sp_mask
= get_sp_mask(ss_e2
);
2780 /* validate data segments */
2781 validate_seg(R_ES
, rpl
);
2782 validate_seg(R_DS
, rpl
);
2783 validate_seg(R_FS
, rpl
);
2784 validate_seg(R_GS
, rpl
);
2788 SET_ESP(sp
, sp_mask
);
2791 /* NOTE: 'cpl' is the _old_ CPL */
2792 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2794 eflags_mask
|= IOPL_MASK
;
2795 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2797 eflags_mask
|= IF_MASK
;
2799 eflags_mask
&= 0xffff;
2800 load_eflags(new_eflags
, eflags_mask
);
2805 POPL(ssp
, sp
, sp_mask
, new_esp
);
2806 POPL(ssp
, sp
, sp_mask
, new_ss
);
2807 POPL(ssp
, sp
, sp_mask
, new_es
);
2808 POPL(ssp
, sp
, sp_mask
, new_ds
);
2809 POPL(ssp
, sp
, sp_mask
, new_fs
);
2810 POPL(ssp
, sp
, sp_mask
, new_gs
);
2812 /* modify processor state */
2813 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2814 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2815 load_seg_vm(R_CS
, new_cs
& 0xffff);
2816 cpu_x86_set_cpl(env
, 3);
2817 load_seg_vm(R_SS
, new_ss
& 0xffff);
2818 load_seg_vm(R_ES
, new_es
& 0xffff);
2819 load_seg_vm(R_DS
, new_ds
& 0xffff);
2820 load_seg_vm(R_FS
, new_fs
& 0xffff);
2821 load_seg_vm(R_GS
, new_gs
& 0xffff);
2823 env
->eip
= new_eip
& 0xffff;
2827 void helper_iret_protected(int shift
, int next_eip
)
2829 int tss_selector
, type
;
2832 /* specific case for TSS */
2833 if (env
->eflags
& NT_MASK
) {
2834 #ifdef TARGET_X86_64
2835 if (env
->hflags
& HF_LMA_MASK
)
2836 raise_exception_err(EXCP0D_GPF
, 0);
2838 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2839 if (tss_selector
& 4)
2840 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2841 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2842 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2843 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2844 /* NOTE: we check both segment and busy TSS */
2846 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2847 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2849 helper_ret_protected(shift
, 1, 0);
2851 env
->hflags
&= ~HF_NMI_MASK
;
2853 if (kqemu_is_ok(env
)) {
2854 CC_OP
= CC_OP_EFLAGS
;
2855 env
->exception_index
= -1;
2861 void helper_lret_protected(int shift
, int addend
)
2863 helper_ret_protected(shift
, 0, addend
);
2865 if (kqemu_is_ok(env
)) {
2866 env
->exception_index
= -1;
2872 void helper_sysenter(void)
2874 if (env
->sysenter_cs
== 0) {
2875 raise_exception_err(EXCP0D_GPF
, 0);
2877 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2878 cpu_x86_set_cpl(env
, 0);
2879 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2881 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2883 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2884 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2886 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2888 DESC_W_MASK
| DESC_A_MASK
);
2889 ESP
= env
->sysenter_esp
;
2890 EIP
= env
->sysenter_eip
;
2893 void helper_sysexit(void)
2897 cpl
= env
->hflags
& HF_CPL_MASK
;
2898 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2899 raise_exception_err(EXCP0D_GPF
, 0);
2901 cpu_x86_set_cpl(env
, 3);
2902 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2904 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2905 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2906 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2907 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2909 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2910 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2911 DESC_W_MASK
| DESC_A_MASK
);
2915 if (kqemu_is_ok(env
)) {
2916 env
->exception_index
= -1;
2922 #if defined(CONFIG_USER_ONLY)
2923 target_ulong
helper_read_crN(int reg
)
2928 void helper_write_crN(int reg
, target_ulong t0
)
2932 target_ulong
helper_read_crN(int reg
)
2936 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2942 val
= cpu_get_apic_tpr(env
);
2948 void helper_write_crN(int reg
, target_ulong t0
)
2950 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2953 cpu_x86_update_cr0(env
, t0
);
2956 cpu_x86_update_cr3(env
, t0
);
2959 cpu_x86_update_cr4(env
, t0
);
2962 cpu_set_apic_tpr(env
, t0
);
2972 void helper_lmsw(target_ulong t0
)
2974 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2975 if already set to one. */
2976 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2977 helper_write_crN(0, t0
);
2980 void helper_clts(void)
2982 env
->cr
[0] &= ~CR0_TS_MASK
;
2983 env
->hflags
&= ~HF_TS_MASK
;
2986 #if !defined(CONFIG_USER_ONLY)
2987 target_ulong
helper_movtl_T0_cr8(void)
2989 return cpu_get_apic_tpr(env
);
2994 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2999 void helper_invlpg(target_ulong addr
)
3001 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
3002 cpu_x86_flush_tlb(env
, addr
);
3005 void helper_rdtsc(void)
3009 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3010 raise_exception(EXCP0D_GPF
);
3012 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
3014 val
= cpu_get_tsc(env
);
3015 EAX
= (uint32_t)(val
);
3016 EDX
= (uint32_t)(val
>> 32);
3019 void helper_rdpmc(void)
3021 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
3022 raise_exception(EXCP0D_GPF
);
3024 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3026 /* currently unimplemented */
3027 raise_exception_err(EXCP06_ILLOP
, 0);
3030 #if defined(CONFIG_USER_ONLY)
3031 void helper_wrmsr(void)
3035 void helper_rdmsr(void)
3039 void helper_wrmsr(void)
3043 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3045 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3047 switch((uint32_t)ECX
) {
3048 case MSR_IA32_SYSENTER_CS
:
3049 env
->sysenter_cs
= val
& 0xffff;
3051 case MSR_IA32_SYSENTER_ESP
:
3052 env
->sysenter_esp
= val
;
3054 case MSR_IA32_SYSENTER_EIP
:
3055 env
->sysenter_eip
= val
;
3057 case MSR_IA32_APICBASE
:
3058 cpu_set_apic_base(env
, val
);
3062 uint64_t update_mask
;
3064 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3065 update_mask
|= MSR_EFER_SCE
;
3066 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3067 update_mask
|= MSR_EFER_LME
;
3068 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3069 update_mask
|= MSR_EFER_FFXSR
;
3070 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3071 update_mask
|= MSR_EFER_NXE
;
3072 env
->efer
= (env
->efer
& ~update_mask
) |
3073 (val
& update_mask
);
3082 case MSR_VM_HSAVE_PA
:
3083 env
->vm_hsave
= val
;
3085 #ifdef TARGET_X86_64
3096 env
->segs
[R_FS
].base
= val
;
3099 env
->segs
[R_GS
].base
= val
;
3101 case MSR_KERNELGSBASE
:
3102 env
->kernelgsbase
= val
;
3106 /* XXX: exception ? */
3111 void helper_rdmsr(void)
3115 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3117 switch((uint32_t)ECX
) {
3118 case MSR_IA32_SYSENTER_CS
:
3119 val
= env
->sysenter_cs
;
3121 case MSR_IA32_SYSENTER_ESP
:
3122 val
= env
->sysenter_esp
;
3124 case MSR_IA32_SYSENTER_EIP
:
3125 val
= env
->sysenter_eip
;
3127 case MSR_IA32_APICBASE
:
3128 val
= cpu_get_apic_base(env
);
3139 case MSR_VM_HSAVE_PA
:
3140 val
= env
->vm_hsave
;
3142 #ifdef TARGET_X86_64
3153 val
= env
->segs
[R_FS
].base
;
3156 val
= env
->segs
[R_GS
].base
;
3158 case MSR_KERNELGSBASE
:
3159 val
= env
->kernelgsbase
;
3163 case MSR_QPI_COMMBASE
:
3164 if (env
->kqemu_enabled
) {
3165 val
= kqemu_comm_base
;
3172 /* XXX: exception ? */
3176 EAX
= (uint32_t)(val
);
3177 EDX
= (uint32_t)(val
>> 32);
3181 target_ulong
helper_lsl(target_ulong selector1
)
3184 uint32_t e1
, e2
, eflags
, selector
;
3185 int rpl
, dpl
, cpl
, type
;
3187 selector
= selector1
& 0xffff;
3188 eflags
= cc_table
[CC_OP
].compute_all();
3189 if (load_segment(&e1
, &e2
, selector
) != 0)
3192 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3193 cpl
= env
->hflags
& HF_CPL_MASK
;
3194 if (e2
& DESC_S_MASK
) {
3195 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3198 if (dpl
< cpl
|| dpl
< rpl
)
3202 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3213 if (dpl
< cpl
|| dpl
< rpl
) {
3215 CC_SRC
= eflags
& ~CC_Z
;
3219 limit
= get_seg_limit(e1
, e2
);
3220 CC_SRC
= eflags
| CC_Z
;
3224 target_ulong
helper_lar(target_ulong selector1
)
3226 uint32_t e1
, e2
, eflags
, selector
;
3227 int rpl
, dpl
, cpl
, type
;
3229 selector
= selector1
& 0xffff;
3230 eflags
= cc_table
[CC_OP
].compute_all();
3231 if ((selector
& 0xfffc) == 0)
3233 if (load_segment(&e1
, &e2
, selector
) != 0)
3236 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3237 cpl
= env
->hflags
& HF_CPL_MASK
;
3238 if (e2
& DESC_S_MASK
) {
3239 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3242 if (dpl
< cpl
|| dpl
< rpl
)
3246 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3260 if (dpl
< cpl
|| dpl
< rpl
) {
3262 CC_SRC
= eflags
& ~CC_Z
;
3266 CC_SRC
= eflags
| CC_Z
;
3267 return e2
& 0x00f0ff00;
3270 void helper_verr(target_ulong selector1
)
3272 uint32_t e1
, e2
, eflags
, selector
;
3275 selector
= selector1
& 0xffff;
3276 eflags
= cc_table
[CC_OP
].compute_all();
3277 if ((selector
& 0xfffc) == 0)
3279 if (load_segment(&e1
, &e2
, selector
) != 0)
3281 if (!(e2
& DESC_S_MASK
))
3284 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3285 cpl
= env
->hflags
& HF_CPL_MASK
;
3286 if (e2
& DESC_CS_MASK
) {
3287 if (!(e2
& DESC_R_MASK
))
3289 if (!(e2
& DESC_C_MASK
)) {
3290 if (dpl
< cpl
|| dpl
< rpl
)
3294 if (dpl
< cpl
|| dpl
< rpl
) {
3296 CC_SRC
= eflags
& ~CC_Z
;
3300 CC_SRC
= eflags
| CC_Z
;
3303 void helper_verw(target_ulong selector1
)
3305 uint32_t e1
, e2
, eflags
, selector
;
3308 selector
= selector1
& 0xffff;
3309 eflags
= cc_table
[CC_OP
].compute_all();
3310 if ((selector
& 0xfffc) == 0)
3312 if (load_segment(&e1
, &e2
, selector
) != 0)
3314 if (!(e2
& DESC_S_MASK
))
3317 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3318 cpl
= env
->hflags
& HF_CPL_MASK
;
3319 if (e2
& DESC_CS_MASK
) {
3322 if (dpl
< cpl
|| dpl
< rpl
)
3324 if (!(e2
& DESC_W_MASK
)) {
3326 CC_SRC
= eflags
& ~CC_Z
;
3330 CC_SRC
= eflags
| CC_Z
;
3333 /* x87 FPU helpers */
3335 static void fpu_set_exception(int mask
)
3338 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3339 env
->fpus
|= FPUS_SE
| FPUS_B
;
3342 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3345 fpu_set_exception(FPUS_ZE
);
3349 void fpu_raise_exception(void)
3351 if (env
->cr
[0] & CR0_NE_MASK
) {
3352 raise_exception(EXCP10_COPR
);
3354 #if !defined(CONFIG_USER_ONLY)
3361 void helper_flds_FT0(uint32_t val
)
3368 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3371 void helper_fldl_FT0(uint64_t val
)
3378 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3381 void helper_fildl_FT0(int32_t val
)
3383 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3386 void helper_flds_ST0(uint32_t val
)
3393 new_fpstt
= (env
->fpstt
- 1) & 7;
3395 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3396 env
->fpstt
= new_fpstt
;
3397 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3400 void helper_fldl_ST0(uint64_t val
)
3407 new_fpstt
= (env
->fpstt
- 1) & 7;
3409 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3410 env
->fpstt
= new_fpstt
;
3411 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3414 void helper_fildl_ST0(int32_t val
)
3417 new_fpstt
= (env
->fpstt
- 1) & 7;
3418 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3419 env
->fpstt
= new_fpstt
;
3420 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3423 void helper_fildll_ST0(int64_t val
)
3426 new_fpstt
= (env
->fpstt
- 1) & 7;
3427 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3428 env
->fpstt
= new_fpstt
;
3429 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3432 uint32_t helper_fsts_ST0(void)
3438 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3442 uint64_t helper_fstl_ST0(void)
3448 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3452 int32_t helper_fist_ST0(void)
3455 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3456 if (val
!= (int16_t)val
)
3461 int32_t helper_fistl_ST0(void)
3464 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3468 int64_t helper_fistll_ST0(void)
3471 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3475 int32_t helper_fistt_ST0(void)
3478 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3479 if (val
!= (int16_t)val
)
3484 int32_t helper_fisttl_ST0(void)
3487 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3491 int64_t helper_fisttll_ST0(void)
3494 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3498 void helper_fldt_ST0(target_ulong ptr
)
3501 new_fpstt
= (env
->fpstt
- 1) & 7;
3502 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3503 env
->fpstt
= new_fpstt
;
3504 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3507 void helper_fstt_ST0(target_ulong ptr
)
3509 helper_fstt(ST0
, ptr
);
3512 void helper_fpush(void)
3517 void helper_fpop(void)
3522 void helper_fdecstp(void)
3524 env
->fpstt
= (env
->fpstt
- 1) & 7;
3525 env
->fpus
&= (~0x4700);
3528 void helper_fincstp(void)
3530 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3531 env
->fpus
&= (~0x4700);
3536 void helper_ffree_STN(int st_index
)
3538 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3541 void helper_fmov_ST0_FT0(void)
3546 void helper_fmov_FT0_STN(int st_index
)
3551 void helper_fmov_ST0_STN(int st_index
)
3556 void helper_fmov_STN_ST0(int st_index
)
3561 void helper_fxchg_ST0_STN(int st_index
)
3569 /* FPU operations */
3571 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3573 void helper_fcom_ST0_FT0(void)
3577 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3578 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3582 void helper_fucom_ST0_FT0(void)
3586 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3587 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3591 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3593 void helper_fcomi_ST0_FT0(void)
3598 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3599 eflags
= cc_table
[CC_OP
].compute_all();
3600 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3605 void helper_fucomi_ST0_FT0(void)
3610 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3611 eflags
= cc_table
[CC_OP
].compute_all();
3612 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3617 void helper_fadd_ST0_FT0(void)
3622 void helper_fmul_ST0_FT0(void)
3627 void helper_fsub_ST0_FT0(void)
3632 void helper_fsubr_ST0_FT0(void)
3637 void helper_fdiv_ST0_FT0(void)
3639 ST0
= helper_fdiv(ST0
, FT0
);
3642 void helper_fdivr_ST0_FT0(void)
3644 ST0
= helper_fdiv(FT0
, ST0
);
3647 /* fp operations between STN and ST0 */
3649 void helper_fadd_STN_ST0(int st_index
)
3651 ST(st_index
) += ST0
;
3654 void helper_fmul_STN_ST0(int st_index
)
3656 ST(st_index
) *= ST0
;
3659 void helper_fsub_STN_ST0(int st_index
)
3661 ST(st_index
) -= ST0
;
3664 void helper_fsubr_STN_ST0(int st_index
)
3671 void helper_fdiv_STN_ST0(int st_index
)
3675 *p
= helper_fdiv(*p
, ST0
);
3678 void helper_fdivr_STN_ST0(int st_index
)
3682 *p
= helper_fdiv(ST0
, *p
);
3685 /* misc FPU operations */
3686 void helper_fchs_ST0(void)
3688 ST0
= floatx_chs(ST0
);
3691 void helper_fabs_ST0(void)
3693 ST0
= floatx_abs(ST0
);
3696 void helper_fld1_ST0(void)
3701 void helper_fldl2t_ST0(void)
3706 void helper_fldl2e_ST0(void)
3711 void helper_fldpi_ST0(void)
3716 void helper_fldlg2_ST0(void)
3721 void helper_fldln2_ST0(void)
3726 void helper_fldz_ST0(void)
3731 void helper_fldz_FT0(void)
3736 uint32_t helper_fnstsw(void)
3738 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3741 uint32_t helper_fnstcw(void)
3746 static void update_fp_status(void)
3750 /* set rounding mode */
3751 switch(env
->fpuc
& RC_MASK
) {
3754 rnd_type
= float_round_nearest_even
;
3757 rnd_type
= float_round_down
;
3760 rnd_type
= float_round_up
;
3763 rnd_type
= float_round_to_zero
;
3766 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3768 switch((env
->fpuc
>> 8) & 3) {
3780 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3784 void helper_fldcw(uint32_t val
)
3790 void helper_fclex(void)
3792 env
->fpus
&= 0x7f00;
3795 void helper_fwait(void)
3797 if (env
->fpus
& FPUS_SE
)
3798 fpu_raise_exception();
3802 void helper_fninit(void)
3819 void helper_fbld_ST0(target_ulong ptr
)
3827 for(i
= 8; i
>= 0; i
--) {
3829 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3832 if (ldub(ptr
+ 9) & 0x80)
3838 void helper_fbst_ST0(target_ulong ptr
)
3841 target_ulong mem_ref
, mem_end
;
3844 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3846 mem_end
= mem_ref
+ 9;
3853 while (mem_ref
< mem_end
) {
3858 v
= ((v
/ 10) << 4) | (v
% 10);
3861 while (mem_ref
< mem_end
) {
3866 void helper_f2xm1(void)
3868 ST0
= pow(2.0,ST0
) - 1.0;
3871 void helper_fyl2x(void)
3873 CPU86_LDouble fptemp
;
3877 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3881 env
->fpus
&= (~0x4700);
3886 void helper_fptan(void)
3888 CPU86_LDouble fptemp
;
3891 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3897 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3898 /* the above code is for |arg| < 2**52 only */
3902 void helper_fpatan(void)
3904 CPU86_LDouble fptemp
, fpsrcop
;
3908 ST1
= atan2(fpsrcop
,fptemp
);
3912 void helper_fxtract(void)
3914 CPU86_LDoubleU temp
;
3915 unsigned int expdif
;
3918 expdif
= EXPD(temp
) - EXPBIAS
;
3919 /*DP exponent bias*/
3926 void helper_fprem1(void)
3928 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3929 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3931 signed long long int q
;
3933 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3934 ST0
= 0.0 / 0.0; /* NaN */
3935 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3941 fpsrcop1
.d
= fpsrcop
;
3943 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3946 /* optimisation? taken from the AMD docs */
3947 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3948 /* ST0 is unchanged */
3953 dblq
= fpsrcop
/ fptemp
;
3954 /* round dblq towards nearest integer */
3956 ST0
= fpsrcop
- fptemp
* dblq
;
3958 /* convert dblq to q by truncating towards zero */
3960 q
= (signed long long int)(-dblq
);
3962 q
= (signed long long int)dblq
;
3964 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3965 /* (C0,C3,C1) <-- (q2,q1,q0) */
3966 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3967 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3968 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3970 env
->fpus
|= 0x400; /* C2 <-- 1 */
3971 fptemp
= pow(2.0, expdif
- 50);
3972 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3973 /* fpsrcop = integer obtained by chopping */
3974 fpsrcop
= (fpsrcop
< 0.0) ?
3975 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3976 ST0
-= (ST1
* fpsrcop
* fptemp
);
3980 void helper_fprem(void)
3982 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3983 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3985 signed long long int q
;
3987 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3988 ST0
= 0.0 / 0.0; /* NaN */
3989 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3993 fpsrcop
= (CPU86_LDouble
)ST0
;
3994 fptemp
= (CPU86_LDouble
)ST1
;
3995 fpsrcop1
.d
= fpsrcop
;
3997 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4000 /* optimisation? taken from the AMD docs */
4001 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4002 /* ST0 is unchanged */
4006 if ( expdif
< 53 ) {
4007 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4008 /* round dblq towards zero */
4009 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4010 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4012 /* convert dblq to q by truncating towards zero */
4014 q
= (signed long long int)(-dblq
);
4016 q
= (signed long long int)dblq
;
4018 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4019 /* (C0,C3,C1) <-- (q2,q1,q0) */
4020 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4021 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4022 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4024 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4025 env
->fpus
|= 0x400; /* C2 <-- 1 */
4026 fptemp
= pow(2.0, (double)(expdif
- N
));
4027 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4028 /* fpsrcop = integer obtained by chopping */
4029 fpsrcop
= (fpsrcop
< 0.0) ?
4030 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4031 ST0
-= (ST1
* fpsrcop
* fptemp
);
4035 void helper_fyl2xp1(void)
4037 CPU86_LDouble fptemp
;
4040 if ((fptemp
+1.0)>0.0) {
4041 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4045 env
->fpus
&= (~0x4700);
4050 void helper_fsqrt(void)
4052 CPU86_LDouble fptemp
;
4056 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4062 void helper_fsincos(void)
4064 CPU86_LDouble fptemp
;
4067 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4073 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4074 /* the above code is for |arg| < 2**63 only */
4078 void helper_frndint(void)
4080 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4083 void helper_fscale(void)
4085 ST0
= ldexp (ST0
, (int)(ST1
));
4088 void helper_fsin(void)
4090 CPU86_LDouble fptemp
;
4093 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4097 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4098 /* the above code is for |arg| < 2**53 only */
4102 void helper_fcos(void)
4104 CPU86_LDouble fptemp
;
4107 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4111 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4112 /* the above code is for |arg5 < 2**63 only */
4116 void helper_fxam_ST0(void)
4118 CPU86_LDoubleU temp
;
4123 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4125 env
->fpus
|= 0x200; /* C1 <-- 1 */
4127 /* XXX: test fptags too */
4128 expdif
= EXPD(temp
);
4129 if (expdif
== MAXEXPD
) {
4130 #ifdef USE_X86LDOUBLE
4131 if (MANTD(temp
) == 0x8000000000000000ULL
)
4133 if (MANTD(temp
) == 0)
4135 env
->fpus
|= 0x500 /*Infinity*/;
4137 env
->fpus
|= 0x100 /*NaN*/;
4138 } else if (expdif
== 0) {
4139 if (MANTD(temp
) == 0)
4140 env
->fpus
|= 0x4000 /*Zero*/;
4142 env
->fpus
|= 0x4400 /*Denormal*/;
4148 void helper_fstenv(target_ulong ptr
, int data32
)
4150 int fpus
, fptag
, exp
, i
;
4154 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4156 for (i
=7; i
>=0; i
--) {
4158 if (env
->fptags
[i
]) {
4161 tmp
.d
= env
->fpregs
[i
].d
;
4164 if (exp
== 0 && mant
== 0) {
4167 } else if (exp
== 0 || exp
== MAXEXPD
4168 #ifdef USE_X86LDOUBLE
4169 || (mant
& (1LL << 63)) == 0
4172 /* NaNs, infinity, denormal */
4179 stl(ptr
, env
->fpuc
);
4181 stl(ptr
+ 8, fptag
);
4182 stl(ptr
+ 12, 0); /* fpip */
4183 stl(ptr
+ 16, 0); /* fpcs */
4184 stl(ptr
+ 20, 0); /* fpoo */
4185 stl(ptr
+ 24, 0); /* fpos */
4188 stw(ptr
, env
->fpuc
);
4190 stw(ptr
+ 4, fptag
);
4198 void helper_fldenv(target_ulong ptr
, int data32
)
4203 env
->fpuc
= lduw(ptr
);
4204 fpus
= lduw(ptr
+ 4);
4205 fptag
= lduw(ptr
+ 8);
4208 env
->fpuc
= lduw(ptr
);
4209 fpus
= lduw(ptr
+ 2);
4210 fptag
= lduw(ptr
+ 4);
4212 env
->fpstt
= (fpus
>> 11) & 7;
4213 env
->fpus
= fpus
& ~0x3800;
4214 for(i
= 0;i
< 8; i
++) {
4215 env
->fptags
[i
] = ((fptag
& 3) == 3);
4220 void helper_fsave(target_ulong ptr
, int data32
)
4225 helper_fstenv(ptr
, data32
);
4227 ptr
+= (14 << data32
);
4228 for(i
= 0;i
< 8; i
++) {
4230 helper_fstt(tmp
, ptr
);
4248 void helper_frstor(target_ulong ptr
, int data32
)
4253 helper_fldenv(ptr
, data32
);
4254 ptr
+= (14 << data32
);
4256 for(i
= 0;i
< 8; i
++) {
4257 tmp
= helper_fldt(ptr
);
4263 void helper_fxsave(target_ulong ptr
, int data64
)
4265 int fpus
, fptag
, i
, nb_xmm_regs
;
4269 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4271 for(i
= 0; i
< 8; i
++) {
4272 fptag
|= (env
->fptags
[i
] << i
);
4274 stw(ptr
, env
->fpuc
);
4276 stw(ptr
+ 4, fptag
^ 0xff);
4277 #ifdef TARGET_X86_64
4279 stq(ptr
+ 0x08, 0); /* rip */
4280 stq(ptr
+ 0x10, 0); /* rdp */
4284 stl(ptr
+ 0x08, 0); /* eip */
4285 stl(ptr
+ 0x0c, 0); /* sel */
4286 stl(ptr
+ 0x10, 0); /* dp */
4287 stl(ptr
+ 0x14, 0); /* sel */
4291 for(i
= 0;i
< 8; i
++) {
4293 helper_fstt(tmp
, addr
);
4297 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4298 /* XXX: finish it */
4299 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4300 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4301 if (env
->hflags
& HF_CS64_MASK
)
4306 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4307 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4308 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4314 void helper_fxrstor(target_ulong ptr
, int data64
)
4316 int i
, fpus
, fptag
, nb_xmm_regs
;
4320 env
->fpuc
= lduw(ptr
);
4321 fpus
= lduw(ptr
+ 2);
4322 fptag
= lduw(ptr
+ 4);
4323 env
->fpstt
= (fpus
>> 11) & 7;
4324 env
->fpus
= fpus
& ~0x3800;
4326 for(i
= 0;i
< 8; i
++) {
4327 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4331 for(i
= 0;i
< 8; i
++) {
4332 tmp
= helper_fldt(addr
);
4337 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4338 /* XXX: finish it */
4339 env
->mxcsr
= ldl(ptr
+ 0x18);
4341 if (env
->hflags
& HF_CS64_MASK
)
4346 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4347 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4348 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4354 #ifndef USE_X86LDOUBLE
4356 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4358 CPU86_LDoubleU temp
;
4363 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4364 /* exponent + sign */
4365 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4366 e
|= SIGND(temp
) >> 16;
4370 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4372 CPU86_LDoubleU temp
;
4376 /* XXX: handle overflow ? */
4377 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4378 e
|= (upper
>> 4) & 0x800; /* sign */
4379 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4381 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4384 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4391 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4393 CPU86_LDoubleU temp
;
4396 *pmant
= temp
.l
.lower
;
4397 *pexp
= temp
.l
.upper
;
4400 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4402 CPU86_LDoubleU temp
;
4404 temp
.l
.upper
= upper
;
4405 temp
.l
.lower
= mant
;
4410 #ifdef TARGET_X86_64
4412 //#define DEBUG_MULDIV
4414 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4423 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4427 add128(plow
, phigh
, 1, 0);
4430 /* return TRUE if overflow */
4431 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4433 uint64_t q
, r
, a1
, a0
;
4446 /* XXX: use a better algorithm */
4447 for(i
= 0; i
< 64; i
++) {
4449 a1
= (a1
<< 1) | (a0
>> 63);
4450 if (ab
|| a1
>= b
) {
4456 a0
= (a0
<< 1) | qb
;
4458 #if defined(DEBUG_MULDIV)
4459 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4460 *phigh
, *plow
, b
, a0
, a1
);
4468 /* return TRUE if overflow */
4469 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4472 sa
= ((int64_t)*phigh
< 0);
4474 neg128(plow
, phigh
);
4478 if (div64(plow
, phigh
, b
) != 0)
4481 if (*plow
> (1ULL << 63))
4485 if (*plow
>= (1ULL << 63))
4493 void helper_mulq_EAX_T0(target_ulong t0
)
4497 mulu64(&r0
, &r1
, EAX
, t0
);
4504 void helper_imulq_EAX_T0(target_ulong t0
)
4508 muls64(&r0
, &r1
, EAX
, t0
);
4512 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4515 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4519 muls64(&r0
, &r1
, t0
, t1
);
4521 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4525 void helper_divq_EAX(target_ulong t0
)
4529 raise_exception(EXCP00_DIVZ
);
4533 if (div64(&r0
, &r1
, t0
))
4534 raise_exception(EXCP00_DIVZ
);
4539 void helper_idivq_EAX(target_ulong t0
)
4543 raise_exception(EXCP00_DIVZ
);
4547 if (idiv64(&r0
, &r1
, t0
))
4548 raise_exception(EXCP00_DIVZ
);
4554 void helper_hlt(void)
4556 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4558 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4560 env
->exception_index
= EXCP_HLT
;
4564 void helper_monitor(target_ulong ptr
)
4566 if ((uint32_t)ECX
!= 0)
4567 raise_exception(EXCP0D_GPF
);
4568 /* XXX: store address ? */
4569 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4572 void helper_mwait(void)
4574 if ((uint32_t)ECX
!= 0)
4575 raise_exception(EXCP0D_GPF
);
4576 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4577 /* XXX: not complete but not completely erroneous */
4578 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4579 /* more than one CPU: do not sleep because another CPU may
4586 void helper_debug(void)
4588 env
->exception_index
= EXCP_DEBUG
;
4592 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4594 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4597 void helper_raise_exception(int exception_index
)
4599 raise_exception(exception_index
);
4602 void helper_cli(void)
4604 env
->eflags
&= ~IF_MASK
;
4607 void helper_sti(void)
4609 env
->eflags
|= IF_MASK
;
4613 /* vm86plus instructions */
4614 void helper_cli_vm(void)
4616 env
->eflags
&= ~VIF_MASK
;
4619 void helper_sti_vm(void)
4621 env
->eflags
|= VIF_MASK
;
4622 if (env
->eflags
& VIP_MASK
) {
4623 raise_exception(EXCP0D_GPF
);
4628 void helper_set_inhibit_irq(void)
4630 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4633 void helper_reset_inhibit_irq(void)
4635 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4638 void helper_boundw(target_ulong a0
, int v
)
4642 high
= ldsw(a0
+ 2);
4644 if (v
< low
|| v
> high
) {
4645 raise_exception(EXCP05_BOUND
);
4650 void helper_boundl(target_ulong a0
, int v
)
4655 if (v
< low
|| v
> high
) {
4656 raise_exception(EXCP05_BOUND
);
4661 static float approx_rsqrt(float a
)
4663 return 1.0 / sqrt(a
);
4666 static float approx_rcp(float a
)
4671 #if !defined(CONFIG_USER_ONLY)
4673 #define MMUSUFFIX _mmu
4676 #include "softmmu_template.h"
4679 #include "softmmu_template.h"
4682 #include "softmmu_template.h"
4685 #include "softmmu_template.h"
4689 /* try to fill the TLB and return an exception if error. If retaddr is
4690 NULL, it means that the function was called in C code (i.e. not
4691 from generated code or from helper.c) */
4692 /* XXX: fix it to restore all registers */
4693 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4695 TranslationBlock
*tb
;
4698 CPUX86State
*saved_env
;
4700 /* XXX: hack to restore env in all cases, even if not called from
4703 env
= cpu_single_env
;
4705 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4708 /* now we have a real cpu fault */
4709 pc
= (unsigned long)retaddr
;
4710 tb
= tb_find_pc(pc
);
4712 /* the PC is inside the translated code. It means that we have
4713 a virtual CPU fault */
4714 cpu_restore_state(tb
, env
, pc
, NULL
);
4717 raise_exception_err(env
->exception_index
, env
->error_code
);
4723 /* Secure Virtual Machine helpers */
4725 #if defined(CONFIG_USER_ONLY)
4727 void helper_vmrun(void)
4730 void helper_vmmcall(void)
4733 void helper_vmload(void)
4736 void helper_vmsave(void)
4739 void helper_stgi(void)
4742 void helper_clgi(void)
4745 void helper_skinit(void)
4748 void helper_invlpga(void)
4751 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4754 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4758 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4759 uint32_t next_eip_addend
)
4764 static inline void svm_save_seg(target_phys_addr_t addr
,
4765 const SegmentCache
*sc
)
4767 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4769 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4771 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4773 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4774 (sc
->flags
>> 8) | ((sc
->flags
>> 12) & 0x0f00));
4777 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4781 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4782 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4783 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4784 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4785 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4788 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4789 CPUState
*env
, int seg_reg
)
4791 SegmentCache sc1
, *sc
= &sc1
;
4792 svm_load_seg(addr
, sc
);
4793 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4794 sc
->base
, sc
->limit
, sc
->flags
);
4797 void helper_vmrun(void)
4803 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4806 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4807 fprintf(logfile
,"vmrun! " TARGET_FMT_lx
"\n", addr
);
4809 env
->vm_vmcb
= addr
;
4811 /* save the current CPU state in the hsave page */
4812 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4813 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4815 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4816 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4818 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4819 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4820 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4821 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4822 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
), env
->cr
[8]);
4823 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4824 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4826 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4827 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4829 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4831 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4833 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4835 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4838 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
), EIP
);
4839 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4840 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4842 /* load the interception bitmaps so we do not need to access the
4844 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4845 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4846 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4847 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4848 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4849 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4851 /* enable intercepts */
4852 env
->hflags
|= HF_SVMI_MASK
;
4854 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4855 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4857 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4858 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4860 /* clear exit_info_2 so we behave like the real hardware */
4861 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4863 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4864 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4865 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4866 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4867 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4868 if (int_ctl
& V_INTR_MASKING_MASK
) {
4869 env
->cr
[8] = int_ctl
& V_TPR_MASK
;
4870 cpu_set_apic_tpr(env
, env
->cr
[8]);
4871 if (env
->eflags
& IF_MASK
)
4872 env
->hflags
|= HF_HIF_MASK
;
4875 #ifdef TARGET_X86_64
4876 env
->efer
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
));
4877 env
->hflags
&= ~HF_LMA_MASK
;
4878 if (env
->efer
& MSR_EFER_LMA
)
4879 env
->hflags
|= HF_LMA_MASK
;
4882 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4883 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4884 CC_OP
= CC_OP_EFLAGS
;
4886 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
4888 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
4890 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
4892 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
4895 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4897 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4898 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4899 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4900 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4901 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4903 /* FIXME: guest state consistency checks */
4905 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4906 case TLB_CONTROL_DO_NOTHING
:
4908 case TLB_CONTROL_FLUSH_ALL_ASID
:
4909 /* FIXME: this is not 100% correct but should work for now */
4916 /* maybe we need to inject an event */
4917 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4918 if (event_inj
& SVM_EVTINJ_VALID
) {
4919 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4920 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4921 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4922 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4924 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4925 fprintf(logfile
, "Injecting(%#hx): ", valid_err
);
4926 /* FIXME: need to implement valid_err */
4927 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
4928 case SVM_EVTINJ_TYPE_INTR
:
4929 env
->exception_index
= vector
;
4930 env
->error_code
= event_inj_err
;
4931 env
->exception_is_int
= 0;
4932 env
->exception_next_eip
= -1;
4933 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4934 fprintf(logfile
, "INTR");
4936 case SVM_EVTINJ_TYPE_NMI
:
4937 env
->exception_index
= vector
;
4938 env
->error_code
= event_inj_err
;
4939 env
->exception_is_int
= 0;
4940 env
->exception_next_eip
= EIP
;
4941 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4942 fprintf(logfile
, "NMI");
4944 case SVM_EVTINJ_TYPE_EXEPT
:
4945 env
->exception_index
= vector
;
4946 env
->error_code
= event_inj_err
;
4947 env
->exception_is_int
= 0;
4948 env
->exception_next_eip
= -1;
4949 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4950 fprintf(logfile
, "EXEPT");
4952 case SVM_EVTINJ_TYPE_SOFT
:
4953 env
->exception_index
= vector
;
4954 env
->error_code
= event_inj_err
;
4955 env
->exception_is_int
= 1;
4956 env
->exception_next_eip
= EIP
;
4957 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4958 fprintf(logfile
, "SOFT");
4961 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4962 fprintf(logfile
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
4964 if ((int_ctl
& V_IRQ_MASK
) ||
4965 (env
->intercept
& (1ULL << (SVM_EXIT_INTR
- SVM_EXIT_INTR
)))) {
4966 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4972 void helper_vmmcall(void)
4974 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
4975 raise_exception(EXCP06_ILLOP
);
4978 void helper_vmload(void)
4981 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
4983 /* XXX: invalid in 32 bit */
4985 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4986 fprintf(logfile
,"vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4987 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4988 env
->segs
[R_FS
].base
);
4990 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
4992 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
4994 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
4996 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
4999 #ifdef TARGET_X86_64
5000 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5001 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5002 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5003 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5005 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5006 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5007 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5008 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5011 void helper_vmsave(void)
5014 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5016 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5017 fprintf(logfile
,"vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5018 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5019 env
->segs
[R_FS
].base
);
5021 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5023 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5025 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5027 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5030 #ifdef TARGET_X86_64
5031 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5032 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5033 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5034 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5036 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5037 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5038 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5039 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5042 void helper_stgi(void)
5044 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5045 env
->hflags
|= HF_GIF_MASK
;
5048 void helper_clgi(void)
5050 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5051 env
->hflags
&= ~HF_GIF_MASK
;
5054 void helper_skinit(void)
5056 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5057 /* XXX: not implemented */
5058 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5059 fprintf(logfile
,"skinit!\n");
5060 raise_exception(EXCP06_ILLOP
);
5063 void helper_invlpga(void)
5065 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5069 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5071 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5074 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5075 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5076 helper_vmexit(type
, param
);
5079 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5080 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5081 helper_vmexit(type
, param
);
5084 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5085 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5086 helper_vmexit(type
, param
);
5089 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5090 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5091 helper_vmexit(type
, param
);
5094 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5095 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5096 helper_vmexit(type
, param
);
5100 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5101 /* FIXME: this should be read in at vmrun (faster this way?) */
5102 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5104 switch((uint32_t)ECX
) {
5109 case 0xc0000000 ... 0xc0001fff:
5110 t0
= (8192 + ECX
- 0xc0000000) * 2;
5114 case 0xc0010000 ... 0xc0011fff:
5115 t0
= (16384 + ECX
- 0xc0010000) * 2;
5120 helper_vmexit(type
, param
);
5125 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5126 helper_vmexit(type
, param
);
5130 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5131 helper_vmexit(type
, param
);
5137 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5138 uint32_t next_eip_addend
)
5140 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5141 /* FIXME: this should be read in at vmrun (faster this way?) */
5142 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5143 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5144 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5146 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5147 env
->eip
+ next_eip_addend
);
5148 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5153 /* Note: currently only 32 bits of exit_code are used */
5154 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5158 if (loglevel
& CPU_LOG_TB_IN_ASM
)
5159 fprintf(logfile
,"vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5160 exit_code
, exit_info_1
,
5161 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5164 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5165 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5166 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5168 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5171 /* Save the VM state in the vmcb */
5172 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5174 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5176 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5178 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5181 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5182 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5184 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5185 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5187 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5188 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5189 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5190 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5191 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5193 if ((int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
))) & V_INTR_MASKING_MASK
) {
5194 int_ctl
&= ~V_TPR_MASK
;
5195 int_ctl
|= env
->cr
[8] & V_TPR_MASK
;
5196 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5199 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5200 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5201 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5202 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5203 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5204 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5205 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5207 /* Reload the host state from vm_hsave */
5208 env
->hflags
&= ~HF_HIF_MASK
;
5209 env
->hflags
&= ~HF_SVMI_MASK
;
5211 env
->intercept_exceptions
= 0;
5212 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5214 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5215 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5217 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5218 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5220 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5221 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5222 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5223 if (int_ctl
& V_INTR_MASKING_MASK
) {
5224 env
->cr
[8] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
));
5225 cpu_set_apic_tpr(env
, env
->cr
[8]);
5227 /* we need to set the efer after the crs so the hidden flags get set properly */
5228 #ifdef TARGET_X86_64
5229 env
->efer
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
));
5230 env
->hflags
&= ~HF_LMA_MASK
;
5231 if (env
->efer
& MSR_EFER_LMA
)
5232 env
->hflags
|= HF_LMA_MASK
;
5233 /* XXX: should also emulate the VM_CR MSR */
5234 env
->hflags
&= ~HF_SVME_MASK
;
5235 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
) {
5236 if (env
->efer
& MSR_EFER_SVME
)
5237 env
->hflags
|= HF_SVME_MASK
;
5239 env
->efer
&= ~MSR_EFER_SVME
;
5244 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5245 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5246 CC_OP
= CC_OP_EFLAGS
;
5248 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5250 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5252 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5254 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5257 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5258 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5259 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5261 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5262 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5265 cpu_x86_set_cpl(env
, 0);
5266 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5267 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5270 /* FIXME: Resets the current ASID register to zero (host ASID). */
5272 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5274 /* Clears the TSC_OFFSET inside the processor. */
5276 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5277 from the page table indicated the host's CR3. If the PDPEs contain
5278 illegal state, the processor causes a shutdown. */
5280 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5281 env
->cr
[0] |= CR0_PE_MASK
;
5282 env
->eflags
&= ~VM_MASK
;
5284 /* Disables all breakpoints in the host DR7 register. */
5286 /* Checks the reloaded host state for consistency. */
5288 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5289 host's code segment or non-canonical (in the case of long mode), a
5290 #GP fault is delivered inside the host.) */
5292 /* remove any pending exception */
5293 env
->exception_index
= -1;
5294 env
->error_code
= 0;
5295 env
->old_exception
= -1;
5303 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5304 void helper_enter_mmx(void)
5307 *(uint32_t *)(env
->fptags
) = 0;
5308 *(uint32_t *)(env
->fptags
+ 4) = 0;
5311 void helper_emms(void)
5313 /* set to empty state */
5314 *(uint32_t *)(env
->fptags
) = 0x01010101;
5315 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5319 void helper_movq(uint64_t *d
, uint64_t *s
)
5325 #include "ops_sse.h"
5328 #include "ops_sse.h"
5331 #include "helper_template.h"
5335 #include "helper_template.h"
5339 #include "helper_template.h"
5342 #ifdef TARGET_X86_64
5345 #include "helper_template.h"
5350 /* bit operations */
5351 target_ulong
helper_bsf(target_ulong t0
)
5358 while ((res
& 1) == 0) {
5365 target_ulong
helper_bsr(target_ulong t0
)
5368 target_ulong res
, mask
;
5371 count
= TARGET_LONG_BITS
- 1;
5372 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5373 while ((res
& mask
) == 0) {
5381 static int compute_all_eflags(void)
5386 static int compute_c_eflags(void)
5388 return CC_SRC
& CC_C
;
5391 CCTable cc_table
[CC_OP_NB
] = {
5392 [CC_OP_DYNAMIC
] = { /* should never happen */ },
5394 [CC_OP_EFLAGS
] = { compute_all_eflags
, compute_c_eflags
},
5396 [CC_OP_MULB
] = { compute_all_mulb
, compute_c_mull
},
5397 [CC_OP_MULW
] = { compute_all_mulw
, compute_c_mull
},
5398 [CC_OP_MULL
] = { compute_all_mull
, compute_c_mull
},
5400 [CC_OP_ADDB
] = { compute_all_addb
, compute_c_addb
},
5401 [CC_OP_ADDW
] = { compute_all_addw
, compute_c_addw
},
5402 [CC_OP_ADDL
] = { compute_all_addl
, compute_c_addl
},
5404 [CC_OP_ADCB
] = { compute_all_adcb
, compute_c_adcb
},
5405 [CC_OP_ADCW
] = { compute_all_adcw
, compute_c_adcw
},
5406 [CC_OP_ADCL
] = { compute_all_adcl
, compute_c_adcl
},
5408 [CC_OP_SUBB
] = { compute_all_subb
, compute_c_subb
},
5409 [CC_OP_SUBW
] = { compute_all_subw
, compute_c_subw
},
5410 [CC_OP_SUBL
] = { compute_all_subl
, compute_c_subl
},
5412 [CC_OP_SBBB
] = { compute_all_sbbb
, compute_c_sbbb
},
5413 [CC_OP_SBBW
] = { compute_all_sbbw
, compute_c_sbbw
},
5414 [CC_OP_SBBL
] = { compute_all_sbbl
, compute_c_sbbl
},
5416 [CC_OP_LOGICB
] = { compute_all_logicb
, compute_c_logicb
},
5417 [CC_OP_LOGICW
] = { compute_all_logicw
, compute_c_logicw
},
5418 [CC_OP_LOGICL
] = { compute_all_logicl
, compute_c_logicl
},
5420 [CC_OP_INCB
] = { compute_all_incb
, compute_c_incl
},
5421 [CC_OP_INCW
] = { compute_all_incw
, compute_c_incl
},
5422 [CC_OP_INCL
] = { compute_all_incl
, compute_c_incl
},
5424 [CC_OP_DECB
] = { compute_all_decb
, compute_c_incl
},
5425 [CC_OP_DECW
] = { compute_all_decw
, compute_c_incl
},
5426 [CC_OP_DECL
] = { compute_all_decl
, compute_c_incl
},
5428 [CC_OP_SHLB
] = { compute_all_shlb
, compute_c_shlb
},
5429 [CC_OP_SHLW
] = { compute_all_shlw
, compute_c_shlw
},
5430 [CC_OP_SHLL
] = { compute_all_shll
, compute_c_shll
},
5432 [CC_OP_SARB
] = { compute_all_sarb
, compute_c_sarl
},
5433 [CC_OP_SARW
] = { compute_all_sarw
, compute_c_sarl
},
5434 [CC_OP_SARL
] = { compute_all_sarl
, compute_c_sarl
},
5436 #ifdef TARGET_X86_64
5437 [CC_OP_MULQ
] = { compute_all_mulq
, compute_c_mull
},
5439 [CC_OP_ADDQ
] = { compute_all_addq
, compute_c_addq
},
5441 [CC_OP_ADCQ
] = { compute_all_adcq
, compute_c_adcq
},
5443 [CC_OP_SUBQ
] = { compute_all_subq
, compute_c_subq
},
5445 [CC_OP_SBBQ
] = { compute_all_sbbq
, compute_c_sbbq
},
5447 [CC_OP_LOGICQ
] = { compute_all_logicq
, compute_c_logicq
},
5449 [CC_OP_INCQ
] = { compute_all_incq
, compute_c_incl
},
5451 [CC_OP_DECQ
] = { compute_all_decq
, compute_c_incl
},
5453 [CC_OP_SHLQ
] = { compute_all_shlq
, compute_c_shlq
},
5455 [CC_OP_SARQ
] = { compute_all_sarq
, compute_c_sarl
},