4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #define CPU_NO_GLOBAL_REGS
22 #include "host-utils.h"
28 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
29 # define LOG_PCALL_STATE(env) \
30 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32 # define LOG_PCALL(...) do { } while (0)
33 # define LOG_PCALL_STATE(env) do { } while (0)
38 #define raise_exception_err(a, b)\
40 qemu_log("raise_exception line=%d\n", __LINE__);\
41 (raise_exception_err)(a, b);\
45 static const uint8_t parity_table
[256] = {
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
58 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
69 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
70 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
71 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
72 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
73 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
74 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
75 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
76 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
77 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
81 static const uint8_t rclw_table
[32] = {
82 0, 1, 2, 3, 4, 5, 6, 7,
83 8, 9,10,11,12,13,14,15,
84 16, 0, 1, 2, 3, 4, 5, 6,
85 7, 8, 9,10,11,12,13,14,
89 static const uint8_t rclb_table
[32] = {
90 0, 1, 2, 3, 4, 5, 6, 7,
91 8, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 0, 1, 2, 3, 4, 5,
93 6, 7, 8, 0, 1, 2, 3, 4,
96 static const CPU86_LDouble f15rk
[7] =
98 0.00000000000000000000L,
99 1.00000000000000000000L,
100 3.14159265358979323851L, /*pi*/
101 0.30102999566398119523L, /*lg2*/
102 0.69314718055994530943L, /*ln2*/
103 1.44269504088896340739L, /*l2e*/
104 3.32192809488736234781L, /*l2t*/
107 /* broken thread support */
109 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
111 void helper_lock(void)
113 spin_lock(&global_cpu_lock
);
116 void helper_unlock(void)
118 spin_unlock(&global_cpu_lock
);
121 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
123 load_eflags(t0
, update_mask
);
126 target_ulong
helper_read_eflags(void)
129 eflags
= helper_cc_compute_all(CC_OP
);
130 eflags
|= (DF
& DF_MASK
);
131 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
135 /* return non zero if error */
136 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
147 index
= selector
& ~7;
148 if ((index
+ 7) > dt
->limit
)
150 ptr
= dt
->base
+ index
;
151 *e1_ptr
= ldl_kernel(ptr
);
152 *e2_ptr
= ldl_kernel(ptr
+ 4);
156 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
159 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
160 if (e2
& DESC_G_MASK
)
161 limit
= (limit
<< 12) | 0xfff;
165 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
167 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
170 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
172 sc
->base
= get_seg_base(e1
, e2
);
173 sc
->limit
= get_seg_limit(e1
, e2
);
177 /* init the segment cache in vm86 mode. */
178 static inline void load_seg_vm(int seg
, int selector
)
181 cpu_x86_load_seg_cache(env
, seg
, selector
,
182 (selector
<< 4), 0xffff, 0);
185 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
186 uint32_t *esp_ptr
, int dpl
)
188 int type
, index
, shift
;
193 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
194 for(i
=0;i
<env
->tr
.limit
;i
++) {
195 printf("%02x ", env
->tr
.base
[i
]);
196 if ((i
& 7) == 7) printf("\n");
202 if (!(env
->tr
.flags
& DESC_P_MASK
))
203 cpu_abort(env
, "invalid tss");
204 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
206 cpu_abort(env
, "invalid tss type");
208 index
= (dpl
* 4 + 2) << shift
;
209 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
210 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
212 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
213 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
215 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
216 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
220 /* XXX: merge with load_seg() */
221 static void tss_load_seg(int seg_reg
, int selector
)
226 if ((selector
& 0xfffc) != 0) {
227 if (load_segment(&e1
, &e2
, selector
) != 0)
228 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
229 if (!(e2
& DESC_S_MASK
))
230 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
232 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
233 cpl
= env
->hflags
& HF_CPL_MASK
;
234 if (seg_reg
== R_CS
) {
235 if (!(e2
& DESC_CS_MASK
))
236 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
237 /* XXX: is it correct ? */
239 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
240 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
241 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 } else if (seg_reg
== R_SS
) {
243 /* SS must be writable data */
244 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
245 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
246 if (dpl
!= cpl
|| dpl
!= rpl
)
247 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
249 /* not readable code */
250 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
251 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
252 /* if data or non conforming code, checks the rights */
253 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
254 if (dpl
< cpl
|| dpl
< rpl
)
255 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
258 if (!(e2
& DESC_P_MASK
))
259 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
260 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
261 get_seg_base(e1
, e2
),
262 get_seg_limit(e1
, e2
),
265 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
266 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
270 #define SWITCH_TSS_JMP 0
271 #define SWITCH_TSS_IRET 1
272 #define SWITCH_TSS_CALL 2
274 /* XXX: restore CPU state in registers (PowerPC case) */
275 static void switch_tss(int tss_selector
,
276 uint32_t e1
, uint32_t e2
, int source
,
279 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
280 target_ulong tss_base
;
281 uint32_t new_regs
[8], new_segs
[6];
282 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
283 uint32_t old_eflags
, eflags_mask
;
288 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
289 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
291 /* if task gate, we read the TSS segment and we load it */
293 if (!(e2
& DESC_P_MASK
))
294 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
295 tss_selector
= e1
>> 16;
296 if (tss_selector
& 4)
297 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
298 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
299 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
300 if (e2
& DESC_S_MASK
)
301 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
302 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
304 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
307 if (!(e2
& DESC_P_MASK
))
308 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
314 tss_limit
= get_seg_limit(e1
, e2
);
315 tss_base
= get_seg_base(e1
, e2
);
316 if ((tss_selector
& 4) != 0 ||
317 tss_limit
< tss_limit_max
)
318 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
319 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
321 old_tss_limit_max
= 103;
323 old_tss_limit_max
= 43;
325 /* read all the registers from the new TSS */
328 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
329 new_eip
= ldl_kernel(tss_base
+ 0x20);
330 new_eflags
= ldl_kernel(tss_base
+ 0x24);
331 for(i
= 0; i
< 8; i
++)
332 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
333 for(i
= 0; i
< 6; i
++)
334 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
335 new_ldt
= lduw_kernel(tss_base
+ 0x60);
336 new_trap
= ldl_kernel(tss_base
+ 0x64);
340 new_eip
= lduw_kernel(tss_base
+ 0x0e);
341 new_eflags
= lduw_kernel(tss_base
+ 0x10);
342 for(i
= 0; i
< 8; i
++)
343 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
344 for(i
= 0; i
< 4; i
++)
345 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
346 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
352 /* NOTE: we must avoid memory exceptions during the task switch,
353 so we make dummy accesses before */
354 /* XXX: it can still fail in some cases, so a bigger hack is
355 necessary to valid the TLB after having done the accesses */
357 v1
= ldub_kernel(env
->tr
.base
);
358 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
359 stb_kernel(env
->tr
.base
, v1
);
360 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
362 /* clear busy bit (it is restartable) */
363 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
366 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
367 e2
= ldl_kernel(ptr
+ 4);
368 e2
&= ~DESC_TSS_BUSY_MASK
;
369 stl_kernel(ptr
+ 4, e2
);
371 old_eflags
= compute_eflags();
372 if (source
== SWITCH_TSS_IRET
)
373 old_eflags
&= ~NT_MASK
;
375 /* save the current state in the old TSS */
378 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
379 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
380 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
381 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
382 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
383 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
384 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
385 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
386 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
387 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
388 for(i
= 0; i
< 6; i
++)
389 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
392 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
393 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
394 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
395 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
396 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
397 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
398 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
399 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
400 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
401 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
402 for(i
= 0; i
< 4; i
++)
403 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
406 /* now if an exception occurs, it will occurs in the next task
409 if (source
== SWITCH_TSS_CALL
) {
410 stw_kernel(tss_base
, env
->tr
.selector
);
411 new_eflags
|= NT_MASK
;
415 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
418 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
419 e2
= ldl_kernel(ptr
+ 4);
420 e2
|= DESC_TSS_BUSY_MASK
;
421 stl_kernel(ptr
+ 4, e2
);
424 /* set the new CPU state */
425 /* from this point, any exception which occurs can give problems */
426 env
->cr
[0] |= CR0_TS_MASK
;
427 env
->hflags
|= HF_TS_MASK
;
428 env
->tr
.selector
= tss_selector
;
429 env
->tr
.base
= tss_base
;
430 env
->tr
.limit
= tss_limit
;
431 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
433 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
434 cpu_x86_update_cr3(env
, new_cr3
);
437 /* load all registers without an exception, then reload them with
438 possible exception */
440 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
441 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
443 eflags_mask
&= 0xffff;
444 load_eflags(new_eflags
, eflags_mask
);
445 /* XXX: what to do in 16 bit case ? */
454 if (new_eflags
& VM_MASK
) {
455 for(i
= 0; i
< 6; i
++)
456 load_seg_vm(i
, new_segs
[i
]);
457 /* in vm86, CPL is always 3 */
458 cpu_x86_set_cpl(env
, 3);
460 /* CPL is set the RPL of CS */
461 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
462 /* first just selectors as the rest may trigger exceptions */
463 for(i
= 0; i
< 6; i
++)
464 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
467 env
->ldt
.selector
= new_ldt
& ~4;
474 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
476 if ((new_ldt
& 0xfffc) != 0) {
478 index
= new_ldt
& ~7;
479 if ((index
+ 7) > dt
->limit
)
480 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
481 ptr
= dt
->base
+ index
;
482 e1
= ldl_kernel(ptr
);
483 e2
= ldl_kernel(ptr
+ 4);
484 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
485 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
486 if (!(e2
& DESC_P_MASK
))
487 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
488 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
491 /* load the segments */
492 if (!(new_eflags
& VM_MASK
)) {
493 tss_load_seg(R_CS
, new_segs
[R_CS
]);
494 tss_load_seg(R_SS
, new_segs
[R_SS
]);
495 tss_load_seg(R_ES
, new_segs
[R_ES
]);
496 tss_load_seg(R_DS
, new_segs
[R_DS
]);
497 tss_load_seg(R_FS
, new_segs
[R_FS
]);
498 tss_load_seg(R_GS
, new_segs
[R_GS
]);
501 /* check that EIP is in the CS segment limits */
502 if (new_eip
> env
->segs
[R_CS
].limit
) {
503 /* XXX: different exception if CALL ? */
504 raise_exception_err(EXCP0D_GPF
, 0);
507 #ifndef CONFIG_USER_ONLY
508 /* reset local breakpoints */
509 if (env
->dr
[7] & 0x55) {
510 for (i
= 0; i
< 4; i
++) {
511 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
512 hw_breakpoint_remove(env
, i
);
519 /* check if Port I/O is allowed in TSS */
520 static inline void check_io(int addr
, int size
)
522 int io_offset
, val
, mask
;
524 /* TSS must be a valid 32 bit one */
525 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
526 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
529 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
530 io_offset
+= (addr
>> 3);
531 /* Note: the check needs two bytes */
532 if ((io_offset
+ 1) > env
->tr
.limit
)
534 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
536 mask
= (1 << size
) - 1;
537 /* all bits must be zero to allow the I/O */
538 if ((val
& mask
) != 0) {
540 raise_exception_err(EXCP0D_GPF
, 0);
544 void helper_check_iob(uint32_t t0
)
549 void helper_check_iow(uint32_t t0
)
554 void helper_check_iol(uint32_t t0
)
559 void helper_outb(uint32_t port
, uint32_t data
)
561 cpu_outb(port
, data
& 0xff);
564 target_ulong
helper_inb(uint32_t port
)
566 return cpu_inb(port
);
569 void helper_outw(uint32_t port
, uint32_t data
)
571 cpu_outw(port
, data
& 0xffff);
574 target_ulong
helper_inw(uint32_t port
)
576 return cpu_inw(port
);
579 void helper_outl(uint32_t port
, uint32_t data
)
581 cpu_outl(port
, data
);
584 target_ulong
helper_inl(uint32_t port
)
586 return cpu_inl(port
);
589 static inline unsigned int get_sp_mask(unsigned int e2
)
591 if (e2
& DESC_B_MASK
)
597 static int exeption_has_error_code(int intno
)
613 #define SET_ESP(val, sp_mask)\
615 if ((sp_mask) == 0xffff)\
616 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
617 else if ((sp_mask) == 0xffffffffLL)\
618 ESP = (uint32_t)(val);\
623 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
626 /* in 64-bit machines, this can overflow. So this segment addition macro
627 * can be used to trim the value to 32-bit whenever needed */
628 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
630 /* XXX: add a is_user flag to have proper security support */
631 #define PUSHW(ssp, sp, sp_mask, val)\
634 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
637 #define PUSHL(ssp, sp, sp_mask, val)\
640 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
643 #define POPW(ssp, sp, sp_mask, val)\
645 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
649 #define POPL(ssp, sp, sp_mask, val)\
651 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
655 /* protected mode interrupt */
656 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
657 unsigned int next_eip
, int is_hw
)
660 target_ulong ptr
, ssp
;
661 int type
, dpl
, selector
, ss_dpl
, cpl
;
662 int has_error_code
, new_stack
, shift
;
663 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
664 uint32_t old_eip
, sp_mask
;
667 if (!is_int
&& !is_hw
)
668 has_error_code
= exeption_has_error_code(intno
);
675 if (intno
* 8 + 7 > dt
->limit
)
676 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
677 ptr
= dt
->base
+ intno
* 8;
678 e1
= ldl_kernel(ptr
);
679 e2
= ldl_kernel(ptr
+ 4);
680 /* check gate type */
681 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
683 case 5: /* task gate */
684 /* must do that check here to return the correct error code */
685 if (!(e2
& DESC_P_MASK
))
686 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
687 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
688 if (has_error_code
) {
691 /* push the error code */
692 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
694 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
698 esp
= (ESP
- (2 << shift
)) & mask
;
699 ssp
= env
->segs
[R_SS
].base
+ esp
;
701 stl_kernel(ssp
, error_code
);
703 stw_kernel(ssp
, error_code
);
707 case 6: /* 286 interrupt gate */
708 case 7: /* 286 trap gate */
709 case 14: /* 386 interrupt gate */
710 case 15: /* 386 trap gate */
713 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
716 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
717 cpl
= env
->hflags
& HF_CPL_MASK
;
718 /* check privilege if software int */
719 if (is_int
&& dpl
< cpl
)
720 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
721 /* check valid bit */
722 if (!(e2
& DESC_P_MASK
))
723 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
725 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
726 if ((selector
& 0xfffc) == 0)
727 raise_exception_err(EXCP0D_GPF
, 0);
729 if (load_segment(&e1
, &e2
, selector
) != 0)
730 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
731 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
732 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
733 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
735 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
736 if (!(e2
& DESC_P_MASK
))
737 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
738 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
739 /* to inner privilege */
740 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
741 if ((ss
& 0xfffc) == 0)
742 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
744 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
745 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
746 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
747 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
749 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
750 if (!(ss_e2
& DESC_S_MASK
) ||
751 (ss_e2
& DESC_CS_MASK
) ||
752 !(ss_e2
& DESC_W_MASK
))
753 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
754 if (!(ss_e2
& DESC_P_MASK
))
755 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
757 sp_mask
= get_sp_mask(ss_e2
);
758 ssp
= get_seg_base(ss_e1
, ss_e2
);
759 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
760 /* to same privilege */
761 if (env
->eflags
& VM_MASK
)
762 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
764 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
765 ssp
= env
->segs
[R_SS
].base
;
769 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
770 new_stack
= 0; /* avoid warning */
771 sp_mask
= 0; /* avoid warning */
772 ssp
= 0; /* avoid warning */
773 esp
= 0; /* avoid warning */
779 /* XXX: check that enough room is available */
780 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
781 if (env
->eflags
& VM_MASK
)
787 if (env
->eflags
& VM_MASK
) {
788 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
789 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
790 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
791 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
793 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
794 PUSHL(ssp
, esp
, sp_mask
, ESP
);
796 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
797 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
798 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
799 if (has_error_code
) {
800 PUSHL(ssp
, esp
, sp_mask
, error_code
);
804 if (env
->eflags
& VM_MASK
) {
805 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
806 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
807 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
808 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
810 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
811 PUSHW(ssp
, esp
, sp_mask
, ESP
);
813 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
814 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
815 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
816 if (has_error_code
) {
817 PUSHW(ssp
, esp
, sp_mask
, error_code
);
822 if (env
->eflags
& VM_MASK
) {
823 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
824 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
828 ss
= (ss
& ~3) | dpl
;
829 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
830 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
832 SET_ESP(esp
, sp_mask
);
834 selector
= (selector
& ~3) | dpl
;
835 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
836 get_seg_base(e1
, e2
),
837 get_seg_limit(e1
, e2
),
839 cpu_x86_set_cpl(env
, dpl
);
842 /* interrupt gate clear IF mask */
843 if ((type
& 1) == 0) {
844 env
->eflags
&= ~IF_MASK
;
846 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
851 #define PUSHQ(sp, val)\
854 stq_kernel(sp, (val));\
857 #define POPQ(sp, val)\
859 val = ldq_kernel(sp);\
863 static inline target_ulong
get_rsp_from_tss(int level
)
868 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
869 env
->tr
.base
, env
->tr
.limit
);
872 if (!(env
->tr
.flags
& DESC_P_MASK
))
873 cpu_abort(env
, "invalid tss");
874 index
= 8 * level
+ 4;
875 if ((index
+ 7) > env
->tr
.limit
)
876 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
877 return ldq_kernel(env
->tr
.base
+ index
);
880 /* 64 bit interrupt */
881 static void do_interrupt64(int intno
, int is_int
, int error_code
,
882 target_ulong next_eip
, int is_hw
)
886 int type
, dpl
, selector
, cpl
, ist
;
887 int has_error_code
, new_stack
;
888 uint32_t e1
, e2
, e3
, ss
;
889 target_ulong old_eip
, esp
, offset
;
892 if (!is_int
&& !is_hw
)
893 has_error_code
= exeption_has_error_code(intno
);
900 if (intno
* 16 + 15 > dt
->limit
)
901 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
902 ptr
= dt
->base
+ intno
* 16;
903 e1
= ldl_kernel(ptr
);
904 e2
= ldl_kernel(ptr
+ 4);
905 e3
= ldl_kernel(ptr
+ 8);
906 /* check gate type */
907 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
909 case 14: /* 386 interrupt gate */
910 case 15: /* 386 trap gate */
913 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
916 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
917 cpl
= env
->hflags
& HF_CPL_MASK
;
918 /* check privilege if software int */
919 if (is_int
&& dpl
< cpl
)
920 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
921 /* check valid bit */
922 if (!(e2
& DESC_P_MASK
))
923 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
925 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
927 if ((selector
& 0xfffc) == 0)
928 raise_exception_err(EXCP0D_GPF
, 0);
930 if (load_segment(&e1
, &e2
, selector
) != 0)
931 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
932 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
933 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
934 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
936 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
937 if (!(e2
& DESC_P_MASK
))
938 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
939 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
940 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
941 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
942 /* to inner privilege */
944 esp
= get_rsp_from_tss(ist
+ 3);
946 esp
= get_rsp_from_tss(dpl
);
947 esp
&= ~0xfLL
; /* align stack */
950 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
951 /* to same privilege */
952 if (env
->eflags
& VM_MASK
)
953 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
956 esp
= get_rsp_from_tss(ist
+ 3);
959 esp
&= ~0xfLL
; /* align stack */
962 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
963 new_stack
= 0; /* avoid warning */
964 esp
= 0; /* avoid warning */
967 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
969 PUSHQ(esp
, compute_eflags());
970 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
972 if (has_error_code
) {
973 PUSHQ(esp
, error_code
);
978 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
982 selector
= (selector
& ~3) | dpl
;
983 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
984 get_seg_base(e1
, e2
),
985 get_seg_limit(e1
, e2
),
987 cpu_x86_set_cpl(env
, dpl
);
990 /* interrupt gate clear IF mask */
991 if ((type
& 1) == 0) {
992 env
->eflags
&= ~IF_MASK
;
994 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
999 #if defined(CONFIG_USER_ONLY)
1000 void helper_syscall(int next_eip_addend
)
1002 env
->exception_index
= EXCP_SYSCALL
;
1003 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1007 void helper_syscall(int next_eip_addend
)
1011 if (!(env
->efer
& MSR_EFER_SCE
)) {
1012 raise_exception_err(EXCP06_ILLOP
, 0);
1014 selector
= (env
->star
>> 32) & 0xffff;
1015 if (env
->hflags
& HF_LMA_MASK
) {
1018 ECX
= env
->eip
+ next_eip_addend
;
1019 env
->regs
[11] = compute_eflags();
1021 code64
= env
->hflags
& HF_CS64_MASK
;
1023 cpu_x86_set_cpl(env
, 0);
1024 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1026 DESC_G_MASK
| DESC_P_MASK
|
1028 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1029 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1031 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1033 DESC_W_MASK
| DESC_A_MASK
);
1034 env
->eflags
&= ~env
->fmask
;
1035 load_eflags(env
->eflags
, 0);
1037 env
->eip
= env
->lstar
;
1039 env
->eip
= env
->cstar
;
1041 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1043 cpu_x86_set_cpl(env
, 0);
1044 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1046 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1048 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1049 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1051 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1053 DESC_W_MASK
| DESC_A_MASK
);
1054 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1055 env
->eip
= (uint32_t)env
->star
;
1061 #ifdef TARGET_X86_64
1062 void helper_sysret(int dflag
)
1066 if (!(env
->efer
& MSR_EFER_SCE
)) {
1067 raise_exception_err(EXCP06_ILLOP
, 0);
1069 cpl
= env
->hflags
& HF_CPL_MASK
;
1070 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1071 raise_exception_err(EXCP0D_GPF
, 0);
1073 selector
= (env
->star
>> 48) & 0xffff;
1074 if (env
->hflags
& HF_LMA_MASK
) {
1076 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1078 DESC_G_MASK
| DESC_P_MASK
|
1079 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1080 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1084 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1086 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1087 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1088 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1089 env
->eip
= (uint32_t)ECX
;
1091 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1093 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1094 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1095 DESC_W_MASK
| DESC_A_MASK
);
1096 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1097 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1098 cpu_x86_set_cpl(env
, 3);
1100 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1102 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1103 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1104 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1105 env
->eip
= (uint32_t)ECX
;
1106 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1108 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1109 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1110 DESC_W_MASK
| DESC_A_MASK
);
1111 env
->eflags
|= IF_MASK
;
1112 cpu_x86_set_cpl(env
, 3);
1117 /* real mode interrupt */
1118 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1119 unsigned int next_eip
)
1122 target_ulong ptr
, ssp
;
1124 uint32_t offset
, esp
;
1125 uint32_t old_cs
, old_eip
;
1127 /* real mode (simpler !) */
1129 if (intno
* 4 + 3 > dt
->limit
)
1130 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1131 ptr
= dt
->base
+ intno
* 4;
1132 offset
= lduw_kernel(ptr
);
1133 selector
= lduw_kernel(ptr
+ 2);
1135 ssp
= env
->segs
[R_SS
].base
;
1140 old_cs
= env
->segs
[R_CS
].selector
;
1141 /* XXX: use SS segment size ? */
1142 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1143 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1144 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1146 /* update processor state */
1147 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1149 env
->segs
[R_CS
].selector
= selector
;
1150 env
->segs
[R_CS
].base
= (selector
<< 4);
1151 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1154 /* fake user mode interrupt */
1155 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1156 target_ulong next_eip
)
1160 int dpl
, cpl
, shift
;
1164 if (env
->hflags
& HF_LMA_MASK
) {
1169 ptr
= dt
->base
+ (intno
<< shift
);
1170 e2
= ldl_kernel(ptr
+ 4);
1172 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1173 cpl
= env
->hflags
& HF_CPL_MASK
;
1174 /* check privilege if software int */
1175 if (is_int
&& dpl
< cpl
)
1176 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1178 /* Since we emulate only user space, we cannot do more than
1179 exiting the emulation with the suitable exception and error
1185 #if !defined(CONFIG_USER_ONLY)
1186 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1189 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1190 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1193 type
= SVM_EVTINJ_TYPE_SOFT
;
1195 type
= SVM_EVTINJ_TYPE_EXEPT
;
1196 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1197 if (!rm
&& exeption_has_error_code(intno
)) {
1198 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1199 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1201 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1207 * Begin execution of an interruption. is_int is TRUE if coming from
1208 * the int instruction. next_eip is the EIP value AFTER the interrupt
1209 * instruction. It is only relevant if is_int is TRUE.
1211 void do_interrupt(int intno
, int is_int
, int error_code
,
1212 target_ulong next_eip
, int is_hw
)
1214 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1215 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1217 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1218 count
, intno
, error_code
, is_int
,
1219 env
->hflags
& HF_CPL_MASK
,
1220 env
->segs
[R_CS
].selector
, EIP
,
1221 (int)env
->segs
[R_CS
].base
+ EIP
,
1222 env
->segs
[R_SS
].selector
, ESP
);
1223 if (intno
== 0x0e) {
1224 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1226 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1229 log_cpu_state(env
, X86_DUMP_CCOP
);
1235 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1236 for(i
= 0; i
< 16; i
++) {
1237 qemu_log(" %02x", ldub(ptr
+ i
));
1245 if (env
->cr
[0] & CR0_PE_MASK
) {
1246 #if !defined(CONFIG_USER_ONLY)
1247 if (env
->hflags
& HF_SVMI_MASK
)
1248 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1250 #ifdef TARGET_X86_64
1251 if (env
->hflags
& HF_LMA_MASK
) {
1252 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1256 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1259 #if !defined(CONFIG_USER_ONLY)
1260 if (env
->hflags
& HF_SVMI_MASK
)
1261 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1263 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1266 #if !defined(CONFIG_USER_ONLY)
1267 if (env
->hflags
& HF_SVMI_MASK
) {
1268 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1269 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1274 /* This should come from sysemu.h - if we could include it here... */
1275 void qemu_system_reset_request(void);
1278 * Check nested exceptions and change to double or triple fault if
1279 * needed. It should only be called, if this is not an interrupt.
1280 * Returns the new exception number.
1282 static int check_exception(int intno
, int *error_code
)
1284 int first_contributory
= env
->old_exception
== 0 ||
1285 (env
->old_exception
>= 10 &&
1286 env
->old_exception
<= 13);
1287 int second_contributory
= intno
== 0 ||
1288 (intno
>= 10 && intno
<= 13);
1290 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1291 env
->old_exception
, intno
);
1293 #if !defined(CONFIG_USER_ONLY)
1294 if (env
->old_exception
== EXCP08_DBLE
) {
1295 if (env
->hflags
& HF_SVMI_MASK
)
1296 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1298 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1300 qemu_system_reset_request();
1305 if ((first_contributory
&& second_contributory
)
1306 || (env
->old_exception
== EXCP0E_PAGE
&&
1307 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1308 intno
= EXCP08_DBLE
;
1312 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1313 (intno
== EXCP08_DBLE
))
1314 env
->old_exception
= intno
;
1320 * Signal an interruption. It is executed in the main CPU loop.
1321 * is_int is TRUE if coming from the int instruction. next_eip is the
1322 * EIP value AFTER the interrupt instruction. It is only relevant if
1325 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1326 int next_eip_addend
)
1329 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1330 intno
= check_exception(intno
, &error_code
);
1332 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1335 env
->exception_index
= intno
;
1336 env
->error_code
= error_code
;
1337 env
->exception_is_int
= is_int
;
1338 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1342 /* shortcuts to generate exceptions */
1344 void raise_exception_err(int exception_index
, int error_code
)
1346 raise_interrupt(exception_index
, 0, error_code
, 0);
1349 void raise_exception(int exception_index
)
1351 raise_interrupt(exception_index
, 0, 0, 0);
1354 void raise_exception_env(int exception_index
, CPUState
*nenv
)
1357 raise_exception(exception_index
);
1361 #if defined(CONFIG_USER_ONLY)
1363 void do_smm_enter(void)
1367 void helper_rsm(void)
1373 #ifdef TARGET_X86_64
1374 #define SMM_REVISION_ID 0x00020064
1376 #define SMM_REVISION_ID 0x00020000
1379 void do_smm_enter(void)
1381 target_ulong sm_state
;
1385 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1386 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1388 env
->hflags
|= HF_SMM_MASK
;
1389 cpu_smm_update(env
);
1391 sm_state
= env
->smbase
+ 0x8000;
1393 #ifdef TARGET_X86_64
1394 for(i
= 0; i
< 6; i
++) {
1396 offset
= 0x7e00 + i
* 16;
1397 stw_phys(sm_state
+ offset
, dt
->selector
);
1398 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1399 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1400 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1403 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1404 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1406 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1407 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1408 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1409 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1411 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1412 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1414 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1415 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1416 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1417 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1419 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1421 stq_phys(sm_state
+ 0x7ff8, EAX
);
1422 stq_phys(sm_state
+ 0x7ff0, ECX
);
1423 stq_phys(sm_state
+ 0x7fe8, EDX
);
1424 stq_phys(sm_state
+ 0x7fe0, EBX
);
1425 stq_phys(sm_state
+ 0x7fd8, ESP
);
1426 stq_phys(sm_state
+ 0x7fd0, EBP
);
1427 stq_phys(sm_state
+ 0x7fc8, ESI
);
1428 stq_phys(sm_state
+ 0x7fc0, EDI
);
1429 for(i
= 8; i
< 16; i
++)
1430 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1431 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1432 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1433 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1434 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1436 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1437 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1438 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1440 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1441 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1443 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1444 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1445 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1446 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1447 stl_phys(sm_state
+ 0x7fec, EDI
);
1448 stl_phys(sm_state
+ 0x7fe8, ESI
);
1449 stl_phys(sm_state
+ 0x7fe4, EBP
);
1450 stl_phys(sm_state
+ 0x7fe0, ESP
);
1451 stl_phys(sm_state
+ 0x7fdc, EBX
);
1452 stl_phys(sm_state
+ 0x7fd8, EDX
);
1453 stl_phys(sm_state
+ 0x7fd4, ECX
);
1454 stl_phys(sm_state
+ 0x7fd0, EAX
);
1455 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1456 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1458 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1459 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1460 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1461 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1463 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1464 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1465 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1466 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1468 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1469 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1471 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1472 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1474 for(i
= 0; i
< 6; i
++) {
1477 offset
= 0x7f84 + i
* 12;
1479 offset
= 0x7f2c + (i
- 3) * 12;
1480 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1481 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1482 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1483 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1485 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1487 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1488 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1490 /* init SMM cpu state */
1492 #ifdef TARGET_X86_64
1493 cpu_load_efer(env
, 0);
1495 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1496 env
->eip
= 0x00008000;
1497 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1499 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1500 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1501 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1502 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1503 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1505 cpu_x86_update_cr0(env
,
1506 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1507 cpu_x86_update_cr4(env
, 0);
1508 env
->dr
[7] = 0x00000400;
1509 CC_OP
= CC_OP_EFLAGS
;
1512 void helper_rsm(void)
1514 target_ulong sm_state
;
1518 sm_state
= env
->smbase
+ 0x8000;
1519 #ifdef TARGET_X86_64
1520 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1522 for(i
= 0; i
< 6; i
++) {
1523 offset
= 0x7e00 + i
* 16;
1524 cpu_x86_load_seg_cache(env
, i
,
1525 lduw_phys(sm_state
+ offset
),
1526 ldq_phys(sm_state
+ offset
+ 8),
1527 ldl_phys(sm_state
+ offset
+ 4),
1528 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1531 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1532 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1534 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1535 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1536 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1537 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1539 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1540 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1542 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1543 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1544 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1545 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1547 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1548 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1549 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1550 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1551 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1552 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1553 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1554 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1555 for(i
= 8; i
< 16; i
++)
1556 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1557 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1558 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1559 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1560 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1561 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1563 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1564 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1565 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1567 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1568 if (val
& 0x20000) {
1569 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1572 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1573 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1574 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1575 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1576 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1577 EDI
= ldl_phys(sm_state
+ 0x7fec);
1578 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1579 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1580 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1581 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1582 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1583 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1584 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1585 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1586 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1588 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1589 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1590 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1591 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1593 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1594 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1595 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1596 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1598 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1599 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1601 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1602 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1604 for(i
= 0; i
< 6; i
++) {
1606 offset
= 0x7f84 + i
* 12;
1608 offset
= 0x7f2c + (i
- 3) * 12;
1609 cpu_x86_load_seg_cache(env
, i
,
1610 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1611 ldl_phys(sm_state
+ offset
+ 8),
1612 ldl_phys(sm_state
+ offset
+ 4),
1613 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1615 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1617 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1618 if (val
& 0x20000) {
1619 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1622 CC_OP
= CC_OP_EFLAGS
;
1623 env
->hflags
&= ~HF_SMM_MASK
;
1624 cpu_smm_update(env
);
1626 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1627 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1630 #endif /* !CONFIG_USER_ONLY */
1633 /* division, flags are undefined */
1635 void helper_divb_AL(target_ulong t0
)
1637 unsigned int num
, den
, q
, r
;
1639 num
= (EAX
& 0xffff);
1642 raise_exception(EXCP00_DIVZ
);
1646 raise_exception(EXCP00_DIVZ
);
1648 r
= (num
% den
) & 0xff;
1649 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1652 void helper_idivb_AL(target_ulong t0
)
1659 raise_exception(EXCP00_DIVZ
);
1663 raise_exception(EXCP00_DIVZ
);
1665 r
= (num
% den
) & 0xff;
1666 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1669 void helper_divw_AX(target_ulong t0
)
1671 unsigned int num
, den
, q
, r
;
1673 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1674 den
= (t0
& 0xffff);
1676 raise_exception(EXCP00_DIVZ
);
1680 raise_exception(EXCP00_DIVZ
);
1682 r
= (num
% den
) & 0xffff;
1683 EAX
= (EAX
& ~0xffff) | q
;
1684 EDX
= (EDX
& ~0xffff) | r
;
1687 void helper_idivw_AX(target_ulong t0
)
1691 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1694 raise_exception(EXCP00_DIVZ
);
1697 if (q
!= (int16_t)q
)
1698 raise_exception(EXCP00_DIVZ
);
1700 r
= (num
% den
) & 0xffff;
1701 EAX
= (EAX
& ~0xffff) | q
;
1702 EDX
= (EDX
& ~0xffff) | r
;
1705 void helper_divl_EAX(target_ulong t0
)
1707 unsigned int den
, r
;
1710 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1713 raise_exception(EXCP00_DIVZ
);
1718 raise_exception(EXCP00_DIVZ
);
1723 void helper_idivl_EAX(target_ulong t0
)
1728 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1731 raise_exception(EXCP00_DIVZ
);
1735 if (q
!= (int32_t)q
)
1736 raise_exception(EXCP00_DIVZ
);
1743 /* XXX: exception */
1744 void helper_aam(int base
)
1750 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1754 void helper_aad(int base
)
1758 ah
= (EAX
>> 8) & 0xff;
1759 al
= ((ah
* base
) + al
) & 0xff;
1760 EAX
= (EAX
& ~0xffff) | al
;
1764 void helper_aaa(void)
1770 eflags
= helper_cc_compute_all(CC_OP
);
1773 ah
= (EAX
>> 8) & 0xff;
1775 icarry
= (al
> 0xf9);
1776 if (((al
& 0x0f) > 9 ) || af
) {
1777 al
= (al
+ 6) & 0x0f;
1778 ah
= (ah
+ 1 + icarry
) & 0xff;
1779 eflags
|= CC_C
| CC_A
;
1781 eflags
&= ~(CC_C
| CC_A
);
1784 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1788 void helper_aas(void)
1794 eflags
= helper_cc_compute_all(CC_OP
);
1797 ah
= (EAX
>> 8) & 0xff;
1800 if (((al
& 0x0f) > 9 ) || af
) {
1801 al
= (al
- 6) & 0x0f;
1802 ah
= (ah
- 1 - icarry
) & 0xff;
1803 eflags
|= CC_C
| CC_A
;
1805 eflags
&= ~(CC_C
| CC_A
);
1808 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1812 void helper_daa(void)
1817 eflags
= helper_cc_compute_all(CC_OP
);
1823 if (((al
& 0x0f) > 9 ) || af
) {
1824 al
= (al
+ 6) & 0xff;
1827 if ((al
> 0x9f) || cf
) {
1828 al
= (al
+ 0x60) & 0xff;
1831 EAX
= (EAX
& ~0xff) | al
;
1832 /* well, speed is not an issue here, so we compute the flags by hand */
1833 eflags
|= (al
== 0) << 6; /* zf */
1834 eflags
|= parity_table
[al
]; /* pf */
1835 eflags
|= (al
& 0x80); /* sf */
1839 void helper_das(void)
1841 int al
, al1
, af
, cf
;
1844 eflags
= helper_cc_compute_all(CC_OP
);
1851 if (((al
& 0x0f) > 9 ) || af
) {
1855 al
= (al
- 6) & 0xff;
1857 if ((al1
> 0x99) || cf
) {
1858 al
= (al
- 0x60) & 0xff;
1861 EAX
= (EAX
& ~0xff) | al
;
1862 /* well, speed is not an issue here, so we compute the flags by hand */
1863 eflags
|= (al
== 0) << 6; /* zf */
1864 eflags
|= parity_table
[al
]; /* pf */
1865 eflags
|= (al
& 0x80); /* sf */
1869 void helper_into(int next_eip_addend
)
1872 eflags
= helper_cc_compute_all(CC_OP
);
1873 if (eflags
& CC_O
) {
1874 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1878 void helper_cmpxchg8b(target_ulong a0
)
1883 eflags
= helper_cc_compute_all(CC_OP
);
1885 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1886 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1889 /* always do the store */
1891 EDX
= (uint32_t)(d
>> 32);
1898 #ifdef TARGET_X86_64
1899 void helper_cmpxchg16b(target_ulong a0
)
1904 if ((a0
& 0xf) != 0)
1905 raise_exception(EXCP0D_GPF
);
1906 eflags
= helper_cc_compute_all(CC_OP
);
1909 if (d0
== EAX
&& d1
== EDX
) {
1914 /* always do the store */
1925 void helper_single_step(void)
1927 #ifndef CONFIG_USER_ONLY
1928 check_hw_breakpoints(env
, 1);
1929 env
->dr
[6] |= DR6_BS
;
1931 raise_exception(EXCP01_DB
);
1934 void helper_cpuid(void)
1936 uint32_t eax
, ebx
, ecx
, edx
;
1938 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1940 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1947 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1950 uint32_t esp_mask
, esp
, ebp
;
1952 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1953 ssp
= env
->segs
[R_SS
].base
;
1962 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1965 stl(ssp
+ (esp
& esp_mask
), t1
);
1972 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1975 stw(ssp
+ (esp
& esp_mask
), t1
);
1979 #ifdef TARGET_X86_64
1980 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1982 target_ulong esp
, ebp
;
2002 stw(esp
, lduw(ebp
));
2010 void helper_lldt(int selector
)
2014 int index
, entry_limit
;
2018 if ((selector
& 0xfffc) == 0) {
2019 /* XXX: NULL selector case: invalid LDT */
2024 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2026 index
= selector
& ~7;
2027 #ifdef TARGET_X86_64
2028 if (env
->hflags
& HF_LMA_MASK
)
2033 if ((index
+ entry_limit
) > dt
->limit
)
2034 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2035 ptr
= dt
->base
+ index
;
2036 e1
= ldl_kernel(ptr
);
2037 e2
= ldl_kernel(ptr
+ 4);
2038 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2039 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2040 if (!(e2
& DESC_P_MASK
))
2041 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2042 #ifdef TARGET_X86_64
2043 if (env
->hflags
& HF_LMA_MASK
) {
2045 e3
= ldl_kernel(ptr
+ 8);
2046 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2047 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2051 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2054 env
->ldt
.selector
= selector
;
2057 void helper_ltr(int selector
)
2061 int index
, type
, entry_limit
;
2065 if ((selector
& 0xfffc) == 0) {
2066 /* NULL selector case: invalid TR */
2072 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2074 index
= selector
& ~7;
2075 #ifdef TARGET_X86_64
2076 if (env
->hflags
& HF_LMA_MASK
)
2081 if ((index
+ entry_limit
) > dt
->limit
)
2082 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2083 ptr
= dt
->base
+ index
;
2084 e1
= ldl_kernel(ptr
);
2085 e2
= ldl_kernel(ptr
+ 4);
2086 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2087 if ((e2
& DESC_S_MASK
) ||
2088 (type
!= 1 && type
!= 9))
2089 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2090 if (!(e2
& DESC_P_MASK
))
2091 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2092 #ifdef TARGET_X86_64
2093 if (env
->hflags
& HF_LMA_MASK
) {
2095 e3
= ldl_kernel(ptr
+ 8);
2096 e4
= ldl_kernel(ptr
+ 12);
2097 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2098 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2099 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2100 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2104 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2106 e2
|= DESC_TSS_BUSY_MASK
;
2107 stl_kernel(ptr
+ 4, e2
);
2109 env
->tr
.selector
= selector
;
2112 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2113 void helper_load_seg(int seg_reg
, int selector
)
2122 cpl
= env
->hflags
& HF_CPL_MASK
;
2123 if ((selector
& 0xfffc) == 0) {
2124 /* null selector case */
2126 #ifdef TARGET_X86_64
2127 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2130 raise_exception_err(EXCP0D_GPF
, 0);
2131 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2138 index
= selector
& ~7;
2139 if ((index
+ 7) > dt
->limit
)
2140 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2141 ptr
= dt
->base
+ index
;
2142 e1
= ldl_kernel(ptr
);
2143 e2
= ldl_kernel(ptr
+ 4);
2145 if (!(e2
& DESC_S_MASK
))
2146 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2148 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2149 if (seg_reg
== R_SS
) {
2150 /* must be writable segment */
2151 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2152 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2153 if (rpl
!= cpl
|| dpl
!= cpl
)
2154 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2156 /* must be readable segment */
2157 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2158 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2160 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2161 /* if not conforming code, test rights */
2162 if (dpl
< cpl
|| dpl
< rpl
)
2163 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2167 if (!(e2
& DESC_P_MASK
)) {
2168 if (seg_reg
== R_SS
)
2169 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2171 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2174 /* set the access bit if not already set */
2175 if (!(e2
& DESC_A_MASK
)) {
2177 stl_kernel(ptr
+ 4, e2
);
2180 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2181 get_seg_base(e1
, e2
),
2182 get_seg_limit(e1
, e2
),
2185 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2186 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2191 /* protected mode jump */
2192 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2193 int next_eip_addend
)
2196 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2197 target_ulong next_eip
;
2199 if ((new_cs
& 0xfffc) == 0)
2200 raise_exception_err(EXCP0D_GPF
, 0);
2201 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2202 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2203 cpl
= env
->hflags
& HF_CPL_MASK
;
2204 if (e2
& DESC_S_MASK
) {
2205 if (!(e2
& DESC_CS_MASK
))
2206 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2207 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2208 if (e2
& DESC_C_MASK
) {
2209 /* conforming code segment */
2211 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2213 /* non conforming code segment */
2216 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2218 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2220 if (!(e2
& DESC_P_MASK
))
2221 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2222 limit
= get_seg_limit(e1
, e2
);
2223 if (new_eip
> limit
&&
2224 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2225 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2226 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2227 get_seg_base(e1
, e2
), limit
, e2
);
2230 /* jump to call or task gate */
2231 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2233 cpl
= env
->hflags
& HF_CPL_MASK
;
2234 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2236 case 1: /* 286 TSS */
2237 case 9: /* 386 TSS */
2238 case 5: /* task gate */
2239 if (dpl
< cpl
|| dpl
< rpl
)
2240 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2241 next_eip
= env
->eip
+ next_eip_addend
;
2242 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2243 CC_OP
= CC_OP_EFLAGS
;
2245 case 4: /* 286 call gate */
2246 case 12: /* 386 call gate */
2247 if ((dpl
< cpl
) || (dpl
< rpl
))
2248 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2249 if (!(e2
& DESC_P_MASK
))
2250 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2252 new_eip
= (e1
& 0xffff);
2254 new_eip
|= (e2
& 0xffff0000);
2255 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2256 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2257 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2258 /* must be code segment */
2259 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2260 (DESC_S_MASK
| DESC_CS_MASK
)))
2261 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2262 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2263 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2264 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2265 if (!(e2
& DESC_P_MASK
))
2266 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2267 limit
= get_seg_limit(e1
, e2
);
2268 if (new_eip
> limit
)
2269 raise_exception_err(EXCP0D_GPF
, 0);
2270 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2271 get_seg_base(e1
, e2
), limit
, e2
);
2275 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2281 /* real mode call */
2282 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2283 int shift
, int next_eip
)
2286 uint32_t esp
, esp_mask
;
2291 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2292 ssp
= env
->segs
[R_SS
].base
;
2294 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2295 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2297 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2298 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2301 SET_ESP(esp
, esp_mask
);
2303 env
->segs
[R_CS
].selector
= new_cs
;
2304 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2307 /* protected mode call */
2308 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2309 int shift
, int next_eip_addend
)
2312 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2313 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2314 uint32_t val
, limit
, old_sp_mask
;
2315 target_ulong ssp
, old_ssp
, next_eip
;
2317 next_eip
= env
->eip
+ next_eip_addend
;
2318 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2319 LOG_PCALL_STATE(env
);
2320 if ((new_cs
& 0xfffc) == 0)
2321 raise_exception_err(EXCP0D_GPF
, 0);
2322 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2323 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2324 cpl
= env
->hflags
& HF_CPL_MASK
;
2325 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2326 if (e2
& DESC_S_MASK
) {
2327 if (!(e2
& DESC_CS_MASK
))
2328 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2329 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2330 if (e2
& DESC_C_MASK
) {
2331 /* conforming code segment */
2333 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2335 /* non conforming code segment */
2338 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2340 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2342 if (!(e2
& DESC_P_MASK
))
2343 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2345 #ifdef TARGET_X86_64
2346 /* XXX: check 16/32 bit cases in long mode */
2351 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2352 PUSHQ(rsp
, next_eip
);
2353 /* from this point, not restartable */
2355 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2356 get_seg_base(e1
, e2
),
2357 get_seg_limit(e1
, e2
), e2
);
2363 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2364 ssp
= env
->segs
[R_SS
].base
;
2366 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2367 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2369 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2370 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2373 limit
= get_seg_limit(e1
, e2
);
2374 if (new_eip
> limit
)
2375 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2376 /* from this point, not restartable */
2377 SET_ESP(sp
, sp_mask
);
2378 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2379 get_seg_base(e1
, e2
), limit
, e2
);
2383 /* check gate type */
2384 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2385 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2388 case 1: /* available 286 TSS */
2389 case 9: /* available 386 TSS */
2390 case 5: /* task gate */
2391 if (dpl
< cpl
|| dpl
< rpl
)
2392 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2393 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2394 CC_OP
= CC_OP_EFLAGS
;
2396 case 4: /* 286 call gate */
2397 case 12: /* 386 call gate */
2400 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2405 if (dpl
< cpl
|| dpl
< rpl
)
2406 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2407 /* check valid bit */
2408 if (!(e2
& DESC_P_MASK
))
2409 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2410 selector
= e1
>> 16;
2411 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2412 param_count
= e2
& 0x1f;
2413 if ((selector
& 0xfffc) == 0)
2414 raise_exception_err(EXCP0D_GPF
, 0);
2416 if (load_segment(&e1
, &e2
, selector
) != 0)
2417 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2418 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2419 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2420 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2422 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2423 if (!(e2
& DESC_P_MASK
))
2424 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2426 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2427 /* to inner privilege */
2428 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2429 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2430 ss
, sp
, param_count
, ESP
);
2431 if ((ss
& 0xfffc) == 0)
2432 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2433 if ((ss
& 3) != dpl
)
2434 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2435 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2436 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2437 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2439 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2440 if (!(ss_e2
& DESC_S_MASK
) ||
2441 (ss_e2
& DESC_CS_MASK
) ||
2442 !(ss_e2
& DESC_W_MASK
))
2443 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2444 if (!(ss_e2
& DESC_P_MASK
))
2445 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2447 // push_size = ((param_count * 2) + 8) << shift;
2449 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2450 old_ssp
= env
->segs
[R_SS
].base
;
2452 sp_mask
= get_sp_mask(ss_e2
);
2453 ssp
= get_seg_base(ss_e1
, ss_e2
);
2455 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2456 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2457 for(i
= param_count
- 1; i
>= 0; i
--) {
2458 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2459 PUSHL(ssp
, sp
, sp_mask
, val
);
2462 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2463 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2464 for(i
= param_count
- 1; i
>= 0; i
--) {
2465 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2466 PUSHW(ssp
, sp
, sp_mask
, val
);
2471 /* to same privilege */
2473 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2474 ssp
= env
->segs
[R_SS
].base
;
2475 // push_size = (4 << shift);
2480 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2481 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2483 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2484 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2487 /* from this point, not restartable */
2490 ss
= (ss
& ~3) | dpl
;
2491 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2493 get_seg_limit(ss_e1
, ss_e2
),
2497 selector
= (selector
& ~3) | dpl
;
2498 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2499 get_seg_base(e1
, e2
),
2500 get_seg_limit(e1
, e2
),
2502 cpu_x86_set_cpl(env
, dpl
);
2503 SET_ESP(sp
, sp_mask
);
2508 /* real and vm86 mode iret */
2509 void helper_iret_real(int shift
)
2511 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2515 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2517 ssp
= env
->segs
[R_SS
].base
;
2520 POPL(ssp
, sp
, sp_mask
, new_eip
);
2521 POPL(ssp
, sp
, sp_mask
, new_cs
);
2523 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2526 POPW(ssp
, sp
, sp_mask
, new_eip
);
2527 POPW(ssp
, sp
, sp_mask
, new_cs
);
2528 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2530 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2531 env
->segs
[R_CS
].selector
= new_cs
;
2532 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2534 if (env
->eflags
& VM_MASK
)
2535 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2537 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2539 eflags_mask
&= 0xffff;
2540 load_eflags(new_eflags
, eflags_mask
);
2541 env
->hflags2
&= ~HF2_NMI_MASK
;
2544 static inline void validate_seg(int seg_reg
, int cpl
)
2549 /* XXX: on x86_64, we do not want to nullify FS and GS because
2550 they may still contain a valid base. I would be interested to
2551 know how a real x86_64 CPU behaves */
2552 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2553 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2556 e2
= env
->segs
[seg_reg
].flags
;
2557 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2558 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2559 /* data or non conforming code segment */
2561 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2566 /* protected mode iret */
2567 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2569 uint32_t new_cs
, new_eflags
, new_ss
;
2570 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2571 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2572 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2573 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2575 #ifdef TARGET_X86_64
2580 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2582 ssp
= env
->segs
[R_SS
].base
;
2583 new_eflags
= 0; /* avoid warning */
2584 #ifdef TARGET_X86_64
2590 POPQ(sp
, new_eflags
);
2596 POPL(ssp
, sp
, sp_mask
, new_eip
);
2597 POPL(ssp
, sp
, sp_mask
, new_cs
);
2600 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2601 if (new_eflags
& VM_MASK
)
2602 goto return_to_vm86
;
2606 POPW(ssp
, sp
, sp_mask
, new_eip
);
2607 POPW(ssp
, sp
, sp_mask
, new_cs
);
2609 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2611 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2612 new_cs
, new_eip
, shift
, addend
);
2613 LOG_PCALL_STATE(env
);
2614 if ((new_cs
& 0xfffc) == 0)
2615 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2616 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2617 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2618 if (!(e2
& DESC_S_MASK
) ||
2619 !(e2
& DESC_CS_MASK
))
2620 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2621 cpl
= env
->hflags
& HF_CPL_MASK
;
2624 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2625 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2626 if (e2
& DESC_C_MASK
) {
2628 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2631 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2633 if (!(e2
& DESC_P_MASK
))
2634 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2637 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2638 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2639 /* return to same privilege level */
2640 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2641 get_seg_base(e1
, e2
),
2642 get_seg_limit(e1
, e2
),
2645 /* return to different privilege level */
2646 #ifdef TARGET_X86_64
2655 POPL(ssp
, sp
, sp_mask
, new_esp
);
2656 POPL(ssp
, sp
, sp_mask
, new_ss
);
2660 POPW(ssp
, sp
, sp_mask
, new_esp
);
2661 POPW(ssp
, sp
, sp_mask
, new_ss
);
2663 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2665 if ((new_ss
& 0xfffc) == 0) {
2666 #ifdef TARGET_X86_64
2667 /* NULL ss is allowed in long mode if cpl != 3*/
2668 /* XXX: test CS64 ? */
2669 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2670 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2672 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2673 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2674 DESC_W_MASK
| DESC_A_MASK
);
2675 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2679 raise_exception_err(EXCP0D_GPF
, 0);
2682 if ((new_ss
& 3) != rpl
)
2683 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2684 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2685 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2686 if (!(ss_e2
& DESC_S_MASK
) ||
2687 (ss_e2
& DESC_CS_MASK
) ||
2688 !(ss_e2
& DESC_W_MASK
))
2689 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2690 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2692 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2693 if (!(ss_e2
& DESC_P_MASK
))
2694 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2695 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2696 get_seg_base(ss_e1
, ss_e2
),
2697 get_seg_limit(ss_e1
, ss_e2
),
2701 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2702 get_seg_base(e1
, e2
),
2703 get_seg_limit(e1
, e2
),
2705 cpu_x86_set_cpl(env
, rpl
);
2707 #ifdef TARGET_X86_64
2708 if (env
->hflags
& HF_CS64_MASK
)
2712 sp_mask
= get_sp_mask(ss_e2
);
2714 /* validate data segments */
2715 validate_seg(R_ES
, rpl
);
2716 validate_seg(R_DS
, rpl
);
2717 validate_seg(R_FS
, rpl
);
2718 validate_seg(R_GS
, rpl
);
2722 SET_ESP(sp
, sp_mask
);
2725 /* NOTE: 'cpl' is the _old_ CPL */
2726 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2728 eflags_mask
|= IOPL_MASK
;
2729 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2731 eflags_mask
|= IF_MASK
;
2733 eflags_mask
&= 0xffff;
2734 load_eflags(new_eflags
, eflags_mask
);
2739 POPL(ssp
, sp
, sp_mask
, new_esp
);
2740 POPL(ssp
, sp
, sp_mask
, new_ss
);
2741 POPL(ssp
, sp
, sp_mask
, new_es
);
2742 POPL(ssp
, sp
, sp_mask
, new_ds
);
2743 POPL(ssp
, sp
, sp_mask
, new_fs
);
2744 POPL(ssp
, sp
, sp_mask
, new_gs
);
2746 /* modify processor state */
2747 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2748 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2749 load_seg_vm(R_CS
, new_cs
& 0xffff);
2750 cpu_x86_set_cpl(env
, 3);
2751 load_seg_vm(R_SS
, new_ss
& 0xffff);
2752 load_seg_vm(R_ES
, new_es
& 0xffff);
2753 load_seg_vm(R_DS
, new_ds
& 0xffff);
2754 load_seg_vm(R_FS
, new_fs
& 0xffff);
2755 load_seg_vm(R_GS
, new_gs
& 0xffff);
2757 env
->eip
= new_eip
& 0xffff;
2761 void helper_iret_protected(int shift
, int next_eip
)
2763 int tss_selector
, type
;
2766 /* specific case for TSS */
2767 if (env
->eflags
& NT_MASK
) {
2768 #ifdef TARGET_X86_64
2769 if (env
->hflags
& HF_LMA_MASK
)
2770 raise_exception_err(EXCP0D_GPF
, 0);
2772 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2773 if (tss_selector
& 4)
2774 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2775 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2776 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2777 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2778 /* NOTE: we check both segment and busy TSS */
2780 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2781 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2783 helper_ret_protected(shift
, 1, 0);
2785 env
->hflags2
&= ~HF2_NMI_MASK
;
2788 void helper_lret_protected(int shift
, int addend
)
2790 helper_ret_protected(shift
, 0, addend
);
2793 void helper_sysenter(void)
2795 if (env
->sysenter_cs
== 0) {
2796 raise_exception_err(EXCP0D_GPF
, 0);
2798 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2799 cpu_x86_set_cpl(env
, 0);
2801 #ifdef TARGET_X86_64
2802 if (env
->hflags
& HF_LMA_MASK
) {
2803 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2805 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2807 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2811 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2813 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2815 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2817 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2819 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2821 DESC_W_MASK
| DESC_A_MASK
);
2822 ESP
= env
->sysenter_esp
;
2823 EIP
= env
->sysenter_eip
;
2826 void helper_sysexit(int dflag
)
2830 cpl
= env
->hflags
& HF_CPL_MASK
;
2831 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2832 raise_exception_err(EXCP0D_GPF
, 0);
2834 cpu_x86_set_cpl(env
, 3);
2835 #ifdef TARGET_X86_64
2837 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2839 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2840 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2841 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2842 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2844 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2845 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2846 DESC_W_MASK
| DESC_A_MASK
);
2850 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2852 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2853 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2854 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2855 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2857 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2858 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2859 DESC_W_MASK
| DESC_A_MASK
);
2865 #if defined(CONFIG_USER_ONLY)
2866 target_ulong
helper_read_crN(int reg
)
2871 void helper_write_crN(int reg
, target_ulong t0
)
2875 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2879 target_ulong
helper_read_crN(int reg
)
2883 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2889 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2890 val
= cpu_get_apic_tpr(env
);
2899 void helper_write_crN(int reg
, target_ulong t0
)
2901 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2904 cpu_x86_update_cr0(env
, t0
);
2907 cpu_x86_update_cr3(env
, t0
);
2910 cpu_x86_update_cr4(env
, t0
);
2913 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2914 cpu_set_apic_tpr(env
, t0
);
2916 env
->v_tpr
= t0
& 0x0f;
2924 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2929 hw_breakpoint_remove(env
, reg
);
2931 hw_breakpoint_insert(env
, reg
);
2932 } else if (reg
== 7) {
2933 for (i
= 0; i
< 4; i
++)
2934 hw_breakpoint_remove(env
, i
);
2936 for (i
= 0; i
< 4; i
++)
2937 hw_breakpoint_insert(env
, i
);
2943 void helper_lmsw(target_ulong t0
)
2945 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2946 if already set to one. */
2947 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2948 helper_write_crN(0, t0
);
2951 void helper_clts(void)
2953 env
->cr
[0] &= ~CR0_TS_MASK
;
2954 env
->hflags
&= ~HF_TS_MASK
;
2957 void helper_invlpg(target_ulong addr
)
2959 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2960 tlb_flush_page(env
, addr
);
2963 void helper_rdtsc(void)
2967 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2968 raise_exception(EXCP0D_GPF
);
2970 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2972 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2973 EAX
= (uint32_t)(val
);
2974 EDX
= (uint32_t)(val
>> 32);
2977 void helper_rdtscp(void)
2980 ECX
= (uint32_t)(env
->tsc_aux
);
2983 void helper_rdpmc(void)
2985 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2986 raise_exception(EXCP0D_GPF
);
2988 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
2990 /* currently unimplemented */
2991 raise_exception_err(EXCP06_ILLOP
, 0);
2994 #if defined(CONFIG_USER_ONLY)
2995 void helper_wrmsr(void)
2999 void helper_rdmsr(void)
3003 void helper_wrmsr(void)
3007 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3009 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3011 switch((uint32_t)ECX
) {
3012 case MSR_IA32_SYSENTER_CS
:
3013 env
->sysenter_cs
= val
& 0xffff;
3015 case MSR_IA32_SYSENTER_ESP
:
3016 env
->sysenter_esp
= val
;
3018 case MSR_IA32_SYSENTER_EIP
:
3019 env
->sysenter_eip
= val
;
3021 case MSR_IA32_APICBASE
:
3022 cpu_set_apic_base(env
, val
);
3026 uint64_t update_mask
;
3028 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3029 update_mask
|= MSR_EFER_SCE
;
3030 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3031 update_mask
|= MSR_EFER_LME
;
3032 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3033 update_mask
|= MSR_EFER_FFXSR
;
3034 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3035 update_mask
|= MSR_EFER_NXE
;
3036 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3037 update_mask
|= MSR_EFER_SVME
;
3038 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3039 update_mask
|= MSR_EFER_FFXSR
;
3040 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3041 (val
& update_mask
));
3050 case MSR_VM_HSAVE_PA
:
3051 env
->vm_hsave
= val
;
3053 #ifdef TARGET_X86_64
3064 env
->segs
[R_FS
].base
= val
;
3067 env
->segs
[R_GS
].base
= val
;
3069 case MSR_KERNELGSBASE
:
3070 env
->kernelgsbase
= val
;
3073 case MSR_MTRRphysBase(0):
3074 case MSR_MTRRphysBase(1):
3075 case MSR_MTRRphysBase(2):
3076 case MSR_MTRRphysBase(3):
3077 case MSR_MTRRphysBase(4):
3078 case MSR_MTRRphysBase(5):
3079 case MSR_MTRRphysBase(6):
3080 case MSR_MTRRphysBase(7):
3081 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3083 case MSR_MTRRphysMask(0):
3084 case MSR_MTRRphysMask(1):
3085 case MSR_MTRRphysMask(2):
3086 case MSR_MTRRphysMask(3):
3087 case MSR_MTRRphysMask(4):
3088 case MSR_MTRRphysMask(5):
3089 case MSR_MTRRphysMask(6):
3090 case MSR_MTRRphysMask(7):
3091 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3093 case MSR_MTRRfix64K_00000
:
3094 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3096 case MSR_MTRRfix16K_80000
:
3097 case MSR_MTRRfix16K_A0000
:
3098 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3100 case MSR_MTRRfix4K_C0000
:
3101 case MSR_MTRRfix4K_C8000
:
3102 case MSR_MTRRfix4K_D0000
:
3103 case MSR_MTRRfix4K_D8000
:
3104 case MSR_MTRRfix4K_E0000
:
3105 case MSR_MTRRfix4K_E8000
:
3106 case MSR_MTRRfix4K_F0000
:
3107 case MSR_MTRRfix4K_F8000
:
3108 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3110 case MSR_MTRRdefType
:
3111 env
->mtrr_deftype
= val
;
3113 case MSR_MCG_STATUS
:
3114 env
->mcg_status
= val
;
3117 if ((env
->mcg_cap
& MCG_CTL_P
)
3118 && (val
== 0 || val
== ~(uint64_t)0))
3125 if ((uint32_t)ECX
>= MSR_MC0_CTL
3126 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3127 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3128 if ((offset
& 0x3) != 0
3129 || (val
== 0 || val
== ~(uint64_t)0))
3130 env
->mce_banks
[offset
] = val
;
3133 /* XXX: exception ? */
3138 void helper_rdmsr(void)
3142 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3144 switch((uint32_t)ECX
) {
3145 case MSR_IA32_SYSENTER_CS
:
3146 val
= env
->sysenter_cs
;
3148 case MSR_IA32_SYSENTER_ESP
:
3149 val
= env
->sysenter_esp
;
3151 case MSR_IA32_SYSENTER_EIP
:
3152 val
= env
->sysenter_eip
;
3154 case MSR_IA32_APICBASE
:
3155 val
= cpu_get_apic_base(env
);
3166 case MSR_VM_HSAVE_PA
:
3167 val
= env
->vm_hsave
;
3169 case MSR_IA32_PERF_STATUS
:
3170 /* tsc_increment_by_tick */
3172 /* CPU multiplier */
3173 val
|= (((uint64_t)4ULL) << 40);
3175 #ifdef TARGET_X86_64
3186 val
= env
->segs
[R_FS
].base
;
3189 val
= env
->segs
[R_GS
].base
;
3191 case MSR_KERNELGSBASE
:
3192 val
= env
->kernelgsbase
;
3198 case MSR_MTRRphysBase(0):
3199 case MSR_MTRRphysBase(1):
3200 case MSR_MTRRphysBase(2):
3201 case MSR_MTRRphysBase(3):
3202 case MSR_MTRRphysBase(4):
3203 case MSR_MTRRphysBase(5):
3204 case MSR_MTRRphysBase(6):
3205 case MSR_MTRRphysBase(7):
3206 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3208 case MSR_MTRRphysMask(0):
3209 case MSR_MTRRphysMask(1):
3210 case MSR_MTRRphysMask(2):
3211 case MSR_MTRRphysMask(3):
3212 case MSR_MTRRphysMask(4):
3213 case MSR_MTRRphysMask(5):
3214 case MSR_MTRRphysMask(6):
3215 case MSR_MTRRphysMask(7):
3216 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3218 case MSR_MTRRfix64K_00000
:
3219 val
= env
->mtrr_fixed
[0];
3221 case MSR_MTRRfix16K_80000
:
3222 case MSR_MTRRfix16K_A0000
:
3223 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3225 case MSR_MTRRfix4K_C0000
:
3226 case MSR_MTRRfix4K_C8000
:
3227 case MSR_MTRRfix4K_D0000
:
3228 case MSR_MTRRfix4K_D8000
:
3229 case MSR_MTRRfix4K_E0000
:
3230 case MSR_MTRRfix4K_E8000
:
3231 case MSR_MTRRfix4K_F0000
:
3232 case MSR_MTRRfix4K_F8000
:
3233 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3235 case MSR_MTRRdefType
:
3236 val
= env
->mtrr_deftype
;
3239 if (env
->cpuid_features
& CPUID_MTRR
)
3240 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3242 /* XXX: exception ? */
3249 if (env
->mcg_cap
& MCG_CTL_P
)
3254 case MSR_MCG_STATUS
:
3255 val
= env
->mcg_status
;
3258 if ((uint32_t)ECX
>= MSR_MC0_CTL
3259 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3260 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3261 val
= env
->mce_banks
[offset
];
3264 /* XXX: exception ? */
3268 EAX
= (uint32_t)(val
);
3269 EDX
= (uint32_t)(val
>> 32);
3273 target_ulong
helper_lsl(target_ulong selector1
)
3276 uint32_t e1
, e2
, eflags
, selector
;
3277 int rpl
, dpl
, cpl
, type
;
3279 selector
= selector1
& 0xffff;
3280 eflags
= helper_cc_compute_all(CC_OP
);
3281 if ((selector
& 0xfffc) == 0)
3283 if (load_segment(&e1
, &e2
, selector
) != 0)
3286 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3287 cpl
= env
->hflags
& HF_CPL_MASK
;
3288 if (e2
& DESC_S_MASK
) {
3289 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3292 if (dpl
< cpl
|| dpl
< rpl
)
3296 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3307 if (dpl
< cpl
|| dpl
< rpl
) {
3309 CC_SRC
= eflags
& ~CC_Z
;
3313 limit
= get_seg_limit(e1
, e2
);
3314 CC_SRC
= eflags
| CC_Z
;
3318 target_ulong
helper_lar(target_ulong selector1
)
3320 uint32_t e1
, e2
, eflags
, selector
;
3321 int rpl
, dpl
, cpl
, type
;
3323 selector
= selector1
& 0xffff;
3324 eflags
= helper_cc_compute_all(CC_OP
);
3325 if ((selector
& 0xfffc) == 0)
3327 if (load_segment(&e1
, &e2
, selector
) != 0)
3330 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3331 cpl
= env
->hflags
& HF_CPL_MASK
;
3332 if (e2
& DESC_S_MASK
) {
3333 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3336 if (dpl
< cpl
|| dpl
< rpl
)
3340 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3354 if (dpl
< cpl
|| dpl
< rpl
) {
3356 CC_SRC
= eflags
& ~CC_Z
;
3360 CC_SRC
= eflags
| CC_Z
;
3361 return e2
& 0x00f0ff00;
3364 void helper_verr(target_ulong selector1
)
3366 uint32_t e1
, e2
, eflags
, selector
;
3369 selector
= selector1
& 0xffff;
3370 eflags
= helper_cc_compute_all(CC_OP
);
3371 if ((selector
& 0xfffc) == 0)
3373 if (load_segment(&e1
, &e2
, selector
) != 0)
3375 if (!(e2
& DESC_S_MASK
))
3378 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3379 cpl
= env
->hflags
& HF_CPL_MASK
;
3380 if (e2
& DESC_CS_MASK
) {
3381 if (!(e2
& DESC_R_MASK
))
3383 if (!(e2
& DESC_C_MASK
)) {
3384 if (dpl
< cpl
|| dpl
< rpl
)
3388 if (dpl
< cpl
|| dpl
< rpl
) {
3390 CC_SRC
= eflags
& ~CC_Z
;
3394 CC_SRC
= eflags
| CC_Z
;
3397 void helper_verw(target_ulong selector1
)
3399 uint32_t e1
, e2
, eflags
, selector
;
3402 selector
= selector1
& 0xffff;
3403 eflags
= helper_cc_compute_all(CC_OP
);
3404 if ((selector
& 0xfffc) == 0)
3406 if (load_segment(&e1
, &e2
, selector
) != 0)
3408 if (!(e2
& DESC_S_MASK
))
3411 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3412 cpl
= env
->hflags
& HF_CPL_MASK
;
3413 if (e2
& DESC_CS_MASK
) {
3416 if (dpl
< cpl
|| dpl
< rpl
)
3418 if (!(e2
& DESC_W_MASK
)) {
3420 CC_SRC
= eflags
& ~CC_Z
;
3424 CC_SRC
= eflags
| CC_Z
;
3427 /* x87 FPU helpers */
3429 static void fpu_set_exception(int mask
)
3432 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3433 env
->fpus
|= FPUS_SE
| FPUS_B
;
3436 static inline CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3439 fpu_set_exception(FPUS_ZE
);
3443 static void fpu_raise_exception(void)
3445 if (env
->cr
[0] & CR0_NE_MASK
) {
3446 raise_exception(EXCP10_COPR
);
3448 #if !defined(CONFIG_USER_ONLY)
3455 void helper_flds_FT0(uint32_t val
)
3462 FT0
= float32_to_floatx(u
.f
, &env
->fp_status
);
3465 void helper_fldl_FT0(uint64_t val
)
3472 FT0
= float64_to_floatx(u
.f
, &env
->fp_status
);
3475 void helper_fildl_FT0(int32_t val
)
3477 FT0
= int32_to_floatx(val
, &env
->fp_status
);
3480 void helper_flds_ST0(uint32_t val
)
3487 new_fpstt
= (env
->fpstt
- 1) & 7;
3489 env
->fpregs
[new_fpstt
].d
= float32_to_floatx(u
.f
, &env
->fp_status
);
3490 env
->fpstt
= new_fpstt
;
3491 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3494 void helper_fldl_ST0(uint64_t val
)
3501 new_fpstt
= (env
->fpstt
- 1) & 7;
3503 env
->fpregs
[new_fpstt
].d
= float64_to_floatx(u
.f
, &env
->fp_status
);
3504 env
->fpstt
= new_fpstt
;
3505 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3508 void helper_fildl_ST0(int32_t val
)
3511 new_fpstt
= (env
->fpstt
- 1) & 7;
3512 env
->fpregs
[new_fpstt
].d
= int32_to_floatx(val
, &env
->fp_status
);
3513 env
->fpstt
= new_fpstt
;
3514 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3517 void helper_fildll_ST0(int64_t val
)
3520 new_fpstt
= (env
->fpstt
- 1) & 7;
3521 env
->fpregs
[new_fpstt
].d
= int64_to_floatx(val
, &env
->fp_status
);
3522 env
->fpstt
= new_fpstt
;
3523 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3526 uint32_t helper_fsts_ST0(void)
3532 u
.f
= floatx_to_float32(ST0
, &env
->fp_status
);
3536 uint64_t helper_fstl_ST0(void)
3542 u
.f
= floatx_to_float64(ST0
, &env
->fp_status
);
3546 int32_t helper_fist_ST0(void)
3549 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3550 if (val
!= (int16_t)val
)
3555 int32_t helper_fistl_ST0(void)
3558 val
= floatx_to_int32(ST0
, &env
->fp_status
);
3562 int64_t helper_fistll_ST0(void)
3565 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3569 int32_t helper_fistt_ST0(void)
3572 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3573 if (val
!= (int16_t)val
)
3578 int32_t helper_fisttl_ST0(void)
3581 val
= floatx_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3585 int64_t helper_fisttll_ST0(void)
3588 val
= floatx_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3592 void helper_fldt_ST0(target_ulong ptr
)
3595 new_fpstt
= (env
->fpstt
- 1) & 7;
3596 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3597 env
->fpstt
= new_fpstt
;
3598 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3601 void helper_fstt_ST0(target_ulong ptr
)
3603 helper_fstt(ST0
, ptr
);
3606 void helper_fpush(void)
3611 void helper_fpop(void)
3616 void helper_fdecstp(void)
3618 env
->fpstt
= (env
->fpstt
- 1) & 7;
3619 env
->fpus
&= (~0x4700);
3622 void helper_fincstp(void)
3624 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3625 env
->fpus
&= (~0x4700);
3630 void helper_ffree_STN(int st_index
)
3632 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3635 void helper_fmov_ST0_FT0(void)
3640 void helper_fmov_FT0_STN(int st_index
)
3645 void helper_fmov_ST0_STN(int st_index
)
3650 void helper_fmov_STN_ST0(int st_index
)
3655 void helper_fxchg_ST0_STN(int st_index
)
3663 /* FPU operations */
3665 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3667 void helper_fcom_ST0_FT0(void)
3671 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3672 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3675 void helper_fucom_ST0_FT0(void)
3679 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3680 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3683 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3685 void helper_fcomi_ST0_FT0(void)
3690 ret
= floatx_compare(ST0
, FT0
, &env
->fp_status
);
3691 eflags
= helper_cc_compute_all(CC_OP
);
3692 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3696 void helper_fucomi_ST0_FT0(void)
3701 ret
= floatx_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3702 eflags
= helper_cc_compute_all(CC_OP
);
3703 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3707 void helper_fadd_ST0_FT0(void)
3712 void helper_fmul_ST0_FT0(void)
3717 void helper_fsub_ST0_FT0(void)
3722 void helper_fsubr_ST0_FT0(void)
3727 void helper_fdiv_ST0_FT0(void)
3729 ST0
= helper_fdiv(ST0
, FT0
);
3732 void helper_fdivr_ST0_FT0(void)
3734 ST0
= helper_fdiv(FT0
, ST0
);
3737 /* fp operations between STN and ST0 */
3739 void helper_fadd_STN_ST0(int st_index
)
3741 ST(st_index
) += ST0
;
3744 void helper_fmul_STN_ST0(int st_index
)
3746 ST(st_index
) *= ST0
;
3749 void helper_fsub_STN_ST0(int st_index
)
3751 ST(st_index
) -= ST0
;
3754 void helper_fsubr_STN_ST0(int st_index
)
3761 void helper_fdiv_STN_ST0(int st_index
)
3765 *p
= helper_fdiv(*p
, ST0
);
3768 void helper_fdivr_STN_ST0(int st_index
)
3772 *p
= helper_fdiv(ST0
, *p
);
3775 /* misc FPU operations */
3776 void helper_fchs_ST0(void)
3778 ST0
= floatx_chs(ST0
);
3781 void helper_fabs_ST0(void)
3783 ST0
= floatx_abs(ST0
);
3786 void helper_fld1_ST0(void)
3791 void helper_fldl2t_ST0(void)
3796 void helper_fldl2e_ST0(void)
3801 void helper_fldpi_ST0(void)
3806 void helper_fldlg2_ST0(void)
3811 void helper_fldln2_ST0(void)
3816 void helper_fldz_ST0(void)
3821 void helper_fldz_FT0(void)
3826 uint32_t helper_fnstsw(void)
3828 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3831 uint32_t helper_fnstcw(void)
3836 static void update_fp_status(void)
3840 /* set rounding mode */
3841 switch(env
->fpuc
& RC_MASK
) {
3844 rnd_type
= float_round_nearest_even
;
3847 rnd_type
= float_round_down
;
3850 rnd_type
= float_round_up
;
3853 rnd_type
= float_round_to_zero
;
3856 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3858 switch((env
->fpuc
>> 8) & 3) {
3870 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3874 void helper_fldcw(uint32_t val
)
3880 void helper_fclex(void)
3882 env
->fpus
&= 0x7f00;
3885 void helper_fwait(void)
3887 if (env
->fpus
& FPUS_SE
)
3888 fpu_raise_exception();
3891 void helper_fninit(void)
3908 void helper_fbld_ST0(target_ulong ptr
)
3916 for(i
= 8; i
>= 0; i
--) {
3918 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3921 if (ldub(ptr
+ 9) & 0x80)
3927 void helper_fbst_ST0(target_ulong ptr
)
3930 target_ulong mem_ref
, mem_end
;
3933 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3935 mem_end
= mem_ref
+ 9;
3942 while (mem_ref
< mem_end
) {
3947 v
= ((v
/ 10) << 4) | (v
% 10);
3950 while (mem_ref
< mem_end
) {
3955 void helper_f2xm1(void)
3957 ST0
= pow(2.0,ST0
) - 1.0;
3960 void helper_fyl2x(void)
3962 CPU86_LDouble fptemp
;
3966 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3970 env
->fpus
&= (~0x4700);
3975 void helper_fptan(void)
3977 CPU86_LDouble fptemp
;
3980 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3986 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3987 /* the above code is for |arg| < 2**52 only */
3991 void helper_fpatan(void)
3993 CPU86_LDouble fptemp
, fpsrcop
;
3997 ST1
= atan2(fpsrcop
,fptemp
);
4001 void helper_fxtract(void)
4003 CPU86_LDoubleU temp
;
4004 unsigned int expdif
;
4007 expdif
= EXPD(temp
) - EXPBIAS
;
4008 /*DP exponent bias*/
4015 void helper_fprem1(void)
4017 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4018 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4020 signed long long int q
;
4022 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4023 ST0
= 0.0 / 0.0; /* NaN */
4024 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4030 fpsrcop1
.d
= fpsrcop
;
4032 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4035 /* optimisation? taken from the AMD docs */
4036 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4037 /* ST0 is unchanged */
4042 dblq
= fpsrcop
/ fptemp
;
4043 /* round dblq towards nearest integer */
4045 ST0
= fpsrcop
- fptemp
* dblq
;
4047 /* convert dblq to q by truncating towards zero */
4049 q
= (signed long long int)(-dblq
);
4051 q
= (signed long long int)dblq
;
4053 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4054 /* (C0,C3,C1) <-- (q2,q1,q0) */
4055 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4056 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4057 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4059 env
->fpus
|= 0x400; /* C2 <-- 1 */
4060 fptemp
= pow(2.0, expdif
- 50);
4061 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4062 /* fpsrcop = integer obtained by chopping */
4063 fpsrcop
= (fpsrcop
< 0.0) ?
4064 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4065 ST0
-= (ST1
* fpsrcop
* fptemp
);
4069 void helper_fprem(void)
4071 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
4072 CPU86_LDoubleU fpsrcop1
, fptemp1
;
4074 signed long long int q
;
4076 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
4077 ST0
= 0.0 / 0.0; /* NaN */
4078 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4082 fpsrcop
= (CPU86_LDouble
)ST0
;
4083 fptemp
= (CPU86_LDouble
)ST1
;
4084 fpsrcop1
.d
= fpsrcop
;
4086 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4089 /* optimisation? taken from the AMD docs */
4090 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4091 /* ST0 is unchanged */
4095 if ( expdif
< 53 ) {
4096 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4097 /* round dblq towards zero */
4098 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4099 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4101 /* convert dblq to q by truncating towards zero */
4103 q
= (signed long long int)(-dblq
);
4105 q
= (signed long long int)dblq
;
4107 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4108 /* (C0,C3,C1) <-- (q2,q1,q0) */
4109 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4110 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4111 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4113 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4114 env
->fpus
|= 0x400; /* C2 <-- 1 */
4115 fptemp
= pow(2.0, (double)(expdif
- N
));
4116 fpsrcop
= (ST0
/ ST1
) / fptemp
;
4117 /* fpsrcop = integer obtained by chopping */
4118 fpsrcop
= (fpsrcop
< 0.0) ?
4119 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4120 ST0
-= (ST1
* fpsrcop
* fptemp
);
4124 void helper_fyl2xp1(void)
4126 CPU86_LDouble fptemp
;
4129 if ((fptemp
+1.0)>0.0) {
4130 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4134 env
->fpus
&= (~0x4700);
4139 void helper_fsqrt(void)
4141 CPU86_LDouble fptemp
;
4145 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4151 void helper_fsincos(void)
4153 CPU86_LDouble fptemp
;
4156 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4162 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4163 /* the above code is for |arg| < 2**63 only */
4167 void helper_frndint(void)
4169 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
4172 void helper_fscale(void)
4174 ST0
= ldexp (ST0
, (int)(ST1
));
4177 void helper_fsin(void)
4179 CPU86_LDouble fptemp
;
4182 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4186 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4187 /* the above code is for |arg| < 2**53 only */
4191 void helper_fcos(void)
4193 CPU86_LDouble fptemp
;
4196 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4200 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4201 /* the above code is for |arg5 < 2**63 only */
4205 void helper_fxam_ST0(void)
4207 CPU86_LDoubleU temp
;
4212 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4214 env
->fpus
|= 0x200; /* C1 <-- 1 */
4216 /* XXX: test fptags too */
4217 expdif
= EXPD(temp
);
4218 if (expdif
== MAXEXPD
) {
4219 #ifdef USE_X86LDOUBLE
4220 if (MANTD(temp
) == 0x8000000000000000ULL
)
4222 if (MANTD(temp
) == 0)
4224 env
->fpus
|= 0x500 /*Infinity*/;
4226 env
->fpus
|= 0x100 /*NaN*/;
4227 } else if (expdif
== 0) {
4228 if (MANTD(temp
) == 0)
4229 env
->fpus
|= 0x4000 /*Zero*/;
4231 env
->fpus
|= 0x4400 /*Denormal*/;
4237 void helper_fstenv(target_ulong ptr
, int data32
)
4239 int fpus
, fptag
, exp
, i
;
4243 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4245 for (i
=7; i
>=0; i
--) {
4247 if (env
->fptags
[i
]) {
4250 tmp
.d
= env
->fpregs
[i
].d
;
4253 if (exp
== 0 && mant
== 0) {
4256 } else if (exp
== 0 || exp
== MAXEXPD
4257 #ifdef USE_X86LDOUBLE
4258 || (mant
& (1LL << 63)) == 0
4261 /* NaNs, infinity, denormal */
4268 stl(ptr
, env
->fpuc
);
4270 stl(ptr
+ 8, fptag
);
4271 stl(ptr
+ 12, 0); /* fpip */
4272 stl(ptr
+ 16, 0); /* fpcs */
4273 stl(ptr
+ 20, 0); /* fpoo */
4274 stl(ptr
+ 24, 0); /* fpos */
4277 stw(ptr
, env
->fpuc
);
4279 stw(ptr
+ 4, fptag
);
4287 void helper_fldenv(target_ulong ptr
, int data32
)
4292 env
->fpuc
= lduw(ptr
);
4293 fpus
= lduw(ptr
+ 4);
4294 fptag
= lduw(ptr
+ 8);
4297 env
->fpuc
= lduw(ptr
);
4298 fpus
= lduw(ptr
+ 2);
4299 fptag
= lduw(ptr
+ 4);
4301 env
->fpstt
= (fpus
>> 11) & 7;
4302 env
->fpus
= fpus
& ~0x3800;
4303 for(i
= 0;i
< 8; i
++) {
4304 env
->fptags
[i
] = ((fptag
& 3) == 3);
4309 void helper_fsave(target_ulong ptr
, int data32
)
4314 helper_fstenv(ptr
, data32
);
4316 ptr
+= (14 << data32
);
4317 for(i
= 0;i
< 8; i
++) {
4319 helper_fstt(tmp
, ptr
);
4337 void helper_frstor(target_ulong ptr
, int data32
)
4342 helper_fldenv(ptr
, data32
);
4343 ptr
+= (14 << data32
);
4345 for(i
= 0;i
< 8; i
++) {
4346 tmp
= helper_fldt(ptr
);
4352 void helper_fxsave(target_ulong ptr
, int data64
)
4354 int fpus
, fptag
, i
, nb_xmm_regs
;
4358 /* The operand must be 16 byte aligned */
4360 raise_exception(EXCP0D_GPF
);
4363 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4365 for(i
= 0; i
< 8; i
++) {
4366 fptag
|= (env
->fptags
[i
] << i
);
4368 stw(ptr
, env
->fpuc
);
4370 stw(ptr
+ 4, fptag
^ 0xff);
4371 #ifdef TARGET_X86_64
4373 stq(ptr
+ 0x08, 0); /* rip */
4374 stq(ptr
+ 0x10, 0); /* rdp */
4378 stl(ptr
+ 0x08, 0); /* eip */
4379 stl(ptr
+ 0x0c, 0); /* sel */
4380 stl(ptr
+ 0x10, 0); /* dp */
4381 stl(ptr
+ 0x14, 0); /* sel */
4385 for(i
= 0;i
< 8; i
++) {
4387 helper_fstt(tmp
, addr
);
4391 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4392 /* XXX: finish it */
4393 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4394 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4395 if (env
->hflags
& HF_CS64_MASK
)
4400 /* Fast FXSAVE leaves out the XMM registers */
4401 if (!(env
->efer
& MSR_EFER_FFXSR
)
4402 || (env
->hflags
& HF_CPL_MASK
)
4403 || !(env
->hflags
& HF_LMA_MASK
)) {
4404 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4405 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4406 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4413 void helper_fxrstor(target_ulong ptr
, int data64
)
4415 int i
, fpus
, fptag
, nb_xmm_regs
;
4419 /* The operand must be 16 byte aligned */
4421 raise_exception(EXCP0D_GPF
);
4424 env
->fpuc
= lduw(ptr
);
4425 fpus
= lduw(ptr
+ 2);
4426 fptag
= lduw(ptr
+ 4);
4427 env
->fpstt
= (fpus
>> 11) & 7;
4428 env
->fpus
= fpus
& ~0x3800;
4430 for(i
= 0;i
< 8; i
++) {
4431 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4435 for(i
= 0;i
< 8; i
++) {
4436 tmp
= helper_fldt(addr
);
4441 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4442 /* XXX: finish it */
4443 env
->mxcsr
= ldl(ptr
+ 0x18);
4445 if (env
->hflags
& HF_CS64_MASK
)
4450 /* Fast FXRESTORE leaves out the XMM registers */
4451 if (!(env
->efer
& MSR_EFER_FFXSR
)
4452 || (env
->hflags
& HF_CPL_MASK
)
4453 || !(env
->hflags
& HF_LMA_MASK
)) {
4454 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4455 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4456 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4463 #ifndef USE_X86LDOUBLE
4465 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4467 CPU86_LDoubleU temp
;
4472 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
4473 /* exponent + sign */
4474 e
= EXPD(temp
) - EXPBIAS
+ 16383;
4475 e
|= SIGND(temp
) >> 16;
4479 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4481 CPU86_LDoubleU temp
;
4485 /* XXX: handle overflow ? */
4486 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
4487 e
|= (upper
>> 4) & 0x800; /* sign */
4488 ll
= (mant
>> 11) & ((1LL << 52) - 1);
4490 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
4493 temp
.ll
= ll
| ((uint64_t)e
<< 52);
4500 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
4502 CPU86_LDoubleU temp
;
4505 *pmant
= temp
.l
.lower
;
4506 *pexp
= temp
.l
.upper
;
4509 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4511 CPU86_LDoubleU temp
;
4513 temp
.l
.upper
= upper
;
4514 temp
.l
.lower
= mant
;
4519 #ifdef TARGET_X86_64
4521 //#define DEBUG_MULDIV
4523 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4532 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4536 add128(plow
, phigh
, 1, 0);
4539 /* return TRUE if overflow */
4540 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4542 uint64_t q
, r
, a1
, a0
;
4555 /* XXX: use a better algorithm */
4556 for(i
= 0; i
< 64; i
++) {
4558 a1
= (a1
<< 1) | (a0
>> 63);
4559 if (ab
|| a1
>= b
) {
4565 a0
= (a0
<< 1) | qb
;
4567 #if defined(DEBUG_MULDIV)
4568 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4569 *phigh
, *plow
, b
, a0
, a1
);
4577 /* return TRUE if overflow */
4578 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4581 sa
= ((int64_t)*phigh
< 0);
4583 neg128(plow
, phigh
);
4587 if (div64(plow
, phigh
, b
) != 0)
4590 if (*plow
> (1ULL << 63))
4594 if (*plow
>= (1ULL << 63))
4602 void helper_mulq_EAX_T0(target_ulong t0
)
4606 mulu64(&r0
, &r1
, EAX
, t0
);
4613 void helper_imulq_EAX_T0(target_ulong t0
)
4617 muls64(&r0
, &r1
, EAX
, t0
);
4621 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4624 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4628 muls64(&r0
, &r1
, t0
, t1
);
4630 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4634 void helper_divq_EAX(target_ulong t0
)
4638 raise_exception(EXCP00_DIVZ
);
4642 if (div64(&r0
, &r1
, t0
))
4643 raise_exception(EXCP00_DIVZ
);
4648 void helper_idivq_EAX(target_ulong t0
)
4652 raise_exception(EXCP00_DIVZ
);
4656 if (idiv64(&r0
, &r1
, t0
))
4657 raise_exception(EXCP00_DIVZ
);
4663 static void do_hlt(void)
4665 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4667 env
->exception_index
= EXCP_HLT
;
4671 void helper_hlt(int next_eip_addend
)
4673 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4674 EIP
+= next_eip_addend
;
4679 void helper_monitor(target_ulong ptr
)
4681 if ((uint32_t)ECX
!= 0)
4682 raise_exception(EXCP0D_GPF
);
4683 /* XXX: store address ? */
4684 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4687 void helper_mwait(int next_eip_addend
)
4689 if ((uint32_t)ECX
!= 0)
4690 raise_exception(EXCP0D_GPF
);
4691 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4692 EIP
+= next_eip_addend
;
4694 /* XXX: not complete but not completely erroneous */
4695 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4696 /* more than one CPU: do not sleep because another CPU may
4703 void helper_debug(void)
4705 env
->exception_index
= EXCP_DEBUG
;
4709 void helper_reset_rf(void)
4711 env
->eflags
&= ~RF_MASK
;
4714 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4716 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4719 void helper_raise_exception(int exception_index
)
4721 raise_exception(exception_index
);
4724 void helper_cli(void)
4726 env
->eflags
&= ~IF_MASK
;
4729 void helper_sti(void)
4731 env
->eflags
|= IF_MASK
;
4735 /* vm86plus instructions */
4736 void helper_cli_vm(void)
4738 env
->eflags
&= ~VIF_MASK
;
4741 void helper_sti_vm(void)
4743 env
->eflags
|= VIF_MASK
;
4744 if (env
->eflags
& VIP_MASK
) {
4745 raise_exception(EXCP0D_GPF
);
4750 void helper_set_inhibit_irq(void)
4752 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4755 void helper_reset_inhibit_irq(void)
4757 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4760 void helper_boundw(target_ulong a0
, int v
)
4764 high
= ldsw(a0
+ 2);
4766 if (v
< low
|| v
> high
) {
4767 raise_exception(EXCP05_BOUND
);
4771 void helper_boundl(target_ulong a0
, int v
)
4776 if (v
< low
|| v
> high
) {
4777 raise_exception(EXCP05_BOUND
);
4781 static float approx_rsqrt(float a
)
4783 return 1.0 / sqrt(a
);
4786 static float approx_rcp(float a
)
4791 #if !defined(CONFIG_USER_ONLY)
4793 #define MMUSUFFIX _mmu
4796 #include "softmmu_template.h"
4799 #include "softmmu_template.h"
4802 #include "softmmu_template.h"
4805 #include "softmmu_template.h"
4809 #if !defined(CONFIG_USER_ONLY)
4810 /* try to fill the TLB and return an exception if error. If retaddr is
4811 NULL, it means that the function was called in C code (i.e. not
4812 from generated code or from helper.c) */
4813 /* XXX: fix it to restore all registers */
4814 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4816 TranslationBlock
*tb
;
4819 CPUX86State
*saved_env
;
4821 /* XXX: hack to restore env in all cases, even if not called from
4824 env
= cpu_single_env
;
4826 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4829 /* now we have a real cpu fault */
4830 pc
= (unsigned long)retaddr
;
4831 tb
= tb_find_pc(pc
);
4833 /* the PC is inside the translated code. It means that we have
4834 a virtual CPU fault */
4835 cpu_restore_state(tb
, env
, pc
, NULL
);
4838 raise_exception_err(env
->exception_index
, env
->error_code
);
4844 /* Secure Virtual Machine helpers */
4846 #if defined(CONFIG_USER_ONLY)
4848 void helper_vmrun(int aflag
, int next_eip_addend
)
4851 void helper_vmmcall(void)
4854 void helper_vmload(int aflag
)
4857 void helper_vmsave(int aflag
)
4860 void helper_stgi(void)
4863 void helper_clgi(void)
4866 void helper_skinit(void)
4869 void helper_invlpga(int aflag
)
4872 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4875 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4879 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4880 uint32_t next_eip_addend
)
4885 static inline void svm_save_seg(target_phys_addr_t addr
,
4886 const SegmentCache
*sc
)
4888 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4890 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4892 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4894 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4895 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4898 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4902 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4903 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4904 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4905 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4906 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4909 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4910 CPUState
*env
, int seg_reg
)
4912 SegmentCache sc1
, *sc
= &sc1
;
4913 svm_load_seg(addr
, sc
);
4914 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4915 sc
->base
, sc
->limit
, sc
->flags
);
4918 void helper_vmrun(int aflag
, int next_eip_addend
)
4924 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4929 addr
= (uint32_t)EAX
;
4931 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4933 env
->vm_vmcb
= addr
;
4935 /* save the current CPU state in the hsave page */
4936 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4937 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4939 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4940 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4942 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4943 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4944 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4945 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4946 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4947 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4949 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4950 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4952 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4954 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4956 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4958 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4961 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4962 EIP
+ next_eip_addend
);
4963 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4964 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4966 /* load the interception bitmaps so we do not need to access the
4968 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4969 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4970 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4971 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4972 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4973 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4975 /* enable intercepts */
4976 env
->hflags
|= HF_SVMI_MASK
;
4978 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4980 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4981 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4983 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4984 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4986 /* clear exit_info_2 so we behave like the real hardware */
4987 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4989 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4990 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4991 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4992 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4993 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4994 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
4995 if (int_ctl
& V_INTR_MASKING_MASK
) {
4996 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
4997 env
->hflags2
|= HF2_VINTR_MASK
;
4998 if (env
->eflags
& IF_MASK
)
4999 env
->hflags2
|= HF2_HIF_MASK
;
5003 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
5005 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
5006 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5007 CC_OP
= CC_OP_EFLAGS
;
5009 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5011 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5013 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5015 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5018 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
5020 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
5021 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
5022 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5023 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5024 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5026 /* FIXME: guest state consistency checks */
5028 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5029 case TLB_CONTROL_DO_NOTHING
:
5031 case TLB_CONTROL_FLUSH_ALL_ASID
:
5032 /* FIXME: this is not 100% correct but should work for now */
5037 env
->hflags2
|= HF2_GIF_MASK
;
5039 if (int_ctl
& V_IRQ_MASK
) {
5040 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5043 /* maybe we need to inject an event */
5044 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5045 if (event_inj
& SVM_EVTINJ_VALID
) {
5046 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5047 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5048 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5050 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5051 /* FIXME: need to implement valid_err */
5052 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5053 case SVM_EVTINJ_TYPE_INTR
:
5054 env
->exception_index
= vector
;
5055 env
->error_code
= event_inj_err
;
5056 env
->exception_is_int
= 0;
5057 env
->exception_next_eip
= -1;
5058 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5059 /* XXX: is it always correct ? */
5060 do_interrupt(vector
, 0, 0, 0, 1);
5062 case SVM_EVTINJ_TYPE_NMI
:
5063 env
->exception_index
= EXCP02_NMI
;
5064 env
->error_code
= event_inj_err
;
5065 env
->exception_is_int
= 0;
5066 env
->exception_next_eip
= EIP
;
5067 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5070 case SVM_EVTINJ_TYPE_EXEPT
:
5071 env
->exception_index
= vector
;
5072 env
->error_code
= event_inj_err
;
5073 env
->exception_is_int
= 0;
5074 env
->exception_next_eip
= -1;
5075 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5078 case SVM_EVTINJ_TYPE_SOFT
:
5079 env
->exception_index
= vector
;
5080 env
->error_code
= event_inj_err
;
5081 env
->exception_is_int
= 1;
5082 env
->exception_next_eip
= EIP
;
5083 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5087 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5091 void helper_vmmcall(void)
5093 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5094 raise_exception(EXCP06_ILLOP
);
5097 void helper_vmload(int aflag
)
5100 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5105 addr
= (uint32_t)EAX
;
5107 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5108 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5109 env
->segs
[R_FS
].base
);
5111 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5113 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5115 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5117 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5120 #ifdef TARGET_X86_64
5121 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5122 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5123 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5124 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5126 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5127 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5128 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5129 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5132 void helper_vmsave(int aflag
)
5135 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5140 addr
= (uint32_t)EAX
;
5142 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5143 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5144 env
->segs
[R_FS
].base
);
5146 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5148 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5150 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5152 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5155 #ifdef TARGET_X86_64
5156 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5157 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5158 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5159 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5161 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5162 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5163 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5164 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5167 void helper_stgi(void)
5169 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5170 env
->hflags2
|= HF2_GIF_MASK
;
5173 void helper_clgi(void)
5175 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5176 env
->hflags2
&= ~HF2_GIF_MASK
;
5179 void helper_skinit(void)
5181 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5182 /* XXX: not implemented */
5183 raise_exception(EXCP06_ILLOP
);
5186 void helper_invlpga(int aflag
)
5189 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5194 addr
= (uint32_t)EAX
;
5196 /* XXX: could use the ASID to see if it is needed to do the
5198 tlb_flush_page(env
, addr
);
5201 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5203 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5206 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5207 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5208 helper_vmexit(type
, param
);
5211 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5212 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5213 helper_vmexit(type
, param
);
5216 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5217 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5218 helper_vmexit(type
, param
);
5221 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5222 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5223 helper_vmexit(type
, param
);
5226 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5227 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5228 helper_vmexit(type
, param
);
5232 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5233 /* FIXME: this should be read in at vmrun (faster this way?) */
5234 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5236 switch((uint32_t)ECX
) {
5241 case 0xc0000000 ... 0xc0001fff:
5242 t0
= (8192 + ECX
- 0xc0000000) * 2;
5246 case 0xc0010000 ... 0xc0011fff:
5247 t0
= (16384 + ECX
- 0xc0010000) * 2;
5252 helper_vmexit(type
, param
);
5257 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5258 helper_vmexit(type
, param
);
5262 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5263 helper_vmexit(type
, param
);
5269 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5270 uint32_t next_eip_addend
)
5272 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5273 /* FIXME: this should be read in at vmrun (faster this way?) */
5274 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5275 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5276 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5278 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5279 env
->eip
+ next_eip_addend
);
5280 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5285 /* Note: currently only 32 bits of exit_code are used */
5286 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5290 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5291 exit_code
, exit_info_1
,
5292 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5295 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5296 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5297 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5299 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5302 /* Save the VM state in the vmcb */
5303 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5305 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5307 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5309 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5312 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5313 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5315 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5316 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5318 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5319 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5320 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5321 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5322 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5324 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5325 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5326 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5327 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5328 int_ctl
|= V_IRQ_MASK
;
5329 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5331 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5332 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5333 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5334 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5335 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5336 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5337 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5339 /* Reload the host state from vm_hsave */
5340 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5341 env
->hflags
&= ~HF_SVMI_MASK
;
5343 env
->intercept_exceptions
= 0;
5344 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5345 env
->tsc_offset
= 0;
5347 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5348 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5350 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5351 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5353 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5354 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5355 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5356 /* we need to set the efer after the crs so the hidden flags get
5359 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5361 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5362 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5363 CC_OP
= CC_OP_EFLAGS
;
5365 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5367 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5369 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5371 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5374 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5375 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5376 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5378 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5379 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5382 cpu_x86_set_cpl(env
, 0);
5383 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5384 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5386 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5387 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5388 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5389 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5391 env
->hflags2
&= ~HF2_GIF_MASK
;
5392 /* FIXME: Resets the current ASID register to zero (host ASID). */
5394 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5396 /* Clears the TSC_OFFSET inside the processor. */
5398 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5399 from the page table indicated the host's CR3. If the PDPEs contain
5400 illegal state, the processor causes a shutdown. */
5402 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5403 env
->cr
[0] |= CR0_PE_MASK
;
5404 env
->eflags
&= ~VM_MASK
;
5406 /* Disables all breakpoints in the host DR7 register. */
5408 /* Checks the reloaded host state for consistency. */
5410 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5411 host's code segment or non-canonical (in the case of long mode), a
5412 #GP fault is delivered inside the host.) */
5414 /* remove any pending exception */
5415 env
->exception_index
= -1;
5416 env
->error_code
= 0;
5417 env
->old_exception
= -1;
5425 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5426 void helper_enter_mmx(void)
5429 *(uint32_t *)(env
->fptags
) = 0;
5430 *(uint32_t *)(env
->fptags
+ 4) = 0;
5433 void helper_emms(void)
5435 /* set to empty state */
5436 *(uint32_t *)(env
->fptags
) = 0x01010101;
5437 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5441 void helper_movq(void *d
, void *s
)
5443 *(uint64_t *)d
= *(uint64_t *)s
;
5447 #include "ops_sse.h"
5450 #include "ops_sse.h"
5453 #include "helper_template.h"
5457 #include "helper_template.h"
5461 #include "helper_template.h"
5464 #ifdef TARGET_X86_64
5467 #include "helper_template.h"
5472 /* bit operations */
5473 target_ulong
helper_bsf(target_ulong t0
)
5480 while ((res
& 1) == 0) {
5487 target_ulong
helper_lzcnt(target_ulong t0
, int wordsize
)
5490 target_ulong res
, mask
;
5492 if (wordsize
> 0 && t0
== 0) {
5496 count
= TARGET_LONG_BITS
- 1;
5497 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5498 while ((res
& mask
) == 0) {
5503 return wordsize
- 1 - count
;
5508 target_ulong
helper_bsr(target_ulong t0
)
5510 return helper_lzcnt(t0
, 0);
5513 static int compute_all_eflags(void)
5518 static int compute_c_eflags(void)
5520 return CC_SRC
& CC_C
;
5523 uint32_t helper_cc_compute_all(int op
)
5526 default: /* should never happen */ return 0;
5528 case CC_OP_EFLAGS
: return compute_all_eflags();
5530 case CC_OP_MULB
: return compute_all_mulb();
5531 case CC_OP_MULW
: return compute_all_mulw();
5532 case CC_OP_MULL
: return compute_all_mull();
5534 case CC_OP_ADDB
: return compute_all_addb();
5535 case CC_OP_ADDW
: return compute_all_addw();
5536 case CC_OP_ADDL
: return compute_all_addl();
5538 case CC_OP_ADCB
: return compute_all_adcb();
5539 case CC_OP_ADCW
: return compute_all_adcw();
5540 case CC_OP_ADCL
: return compute_all_adcl();
5542 case CC_OP_SUBB
: return compute_all_subb();
5543 case CC_OP_SUBW
: return compute_all_subw();
5544 case CC_OP_SUBL
: return compute_all_subl();
5546 case CC_OP_SBBB
: return compute_all_sbbb();
5547 case CC_OP_SBBW
: return compute_all_sbbw();
5548 case CC_OP_SBBL
: return compute_all_sbbl();
5550 case CC_OP_LOGICB
: return compute_all_logicb();
5551 case CC_OP_LOGICW
: return compute_all_logicw();
5552 case CC_OP_LOGICL
: return compute_all_logicl();
5554 case CC_OP_INCB
: return compute_all_incb();
5555 case CC_OP_INCW
: return compute_all_incw();
5556 case CC_OP_INCL
: return compute_all_incl();
5558 case CC_OP_DECB
: return compute_all_decb();
5559 case CC_OP_DECW
: return compute_all_decw();
5560 case CC_OP_DECL
: return compute_all_decl();
5562 case CC_OP_SHLB
: return compute_all_shlb();
5563 case CC_OP_SHLW
: return compute_all_shlw();
5564 case CC_OP_SHLL
: return compute_all_shll();
5566 case CC_OP_SARB
: return compute_all_sarb();
5567 case CC_OP_SARW
: return compute_all_sarw();
5568 case CC_OP_SARL
: return compute_all_sarl();
5570 #ifdef TARGET_X86_64
5571 case CC_OP_MULQ
: return compute_all_mulq();
5573 case CC_OP_ADDQ
: return compute_all_addq();
5575 case CC_OP_ADCQ
: return compute_all_adcq();
5577 case CC_OP_SUBQ
: return compute_all_subq();
5579 case CC_OP_SBBQ
: return compute_all_sbbq();
5581 case CC_OP_LOGICQ
: return compute_all_logicq();
5583 case CC_OP_INCQ
: return compute_all_incq();
5585 case CC_OP_DECQ
: return compute_all_decq();
5587 case CC_OP_SHLQ
: return compute_all_shlq();
5589 case CC_OP_SARQ
: return compute_all_sarq();
5594 uint32_t helper_cc_compute_c(int op
)
5597 default: /* should never happen */ return 0;
5599 case CC_OP_EFLAGS
: return compute_c_eflags();
5601 case CC_OP_MULB
: return compute_c_mull();
5602 case CC_OP_MULW
: return compute_c_mull();
5603 case CC_OP_MULL
: return compute_c_mull();
5605 case CC_OP_ADDB
: return compute_c_addb();
5606 case CC_OP_ADDW
: return compute_c_addw();
5607 case CC_OP_ADDL
: return compute_c_addl();
5609 case CC_OP_ADCB
: return compute_c_adcb();
5610 case CC_OP_ADCW
: return compute_c_adcw();
5611 case CC_OP_ADCL
: return compute_c_adcl();
5613 case CC_OP_SUBB
: return compute_c_subb();
5614 case CC_OP_SUBW
: return compute_c_subw();
5615 case CC_OP_SUBL
: return compute_c_subl();
5617 case CC_OP_SBBB
: return compute_c_sbbb();
5618 case CC_OP_SBBW
: return compute_c_sbbw();
5619 case CC_OP_SBBL
: return compute_c_sbbl();
5621 case CC_OP_LOGICB
: return compute_c_logicb();
5622 case CC_OP_LOGICW
: return compute_c_logicw();
5623 case CC_OP_LOGICL
: return compute_c_logicl();
5625 case CC_OP_INCB
: return compute_c_incl();
5626 case CC_OP_INCW
: return compute_c_incl();
5627 case CC_OP_INCL
: return compute_c_incl();
5629 case CC_OP_DECB
: return compute_c_incl();
5630 case CC_OP_DECW
: return compute_c_incl();
5631 case CC_OP_DECL
: return compute_c_incl();
5633 case CC_OP_SHLB
: return compute_c_shlb();
5634 case CC_OP_SHLW
: return compute_c_shlw();
5635 case CC_OP_SHLL
: return compute_c_shll();
5637 case CC_OP_SARB
: return compute_c_sarl();
5638 case CC_OP_SARW
: return compute_c_sarl();
5639 case CC_OP_SARL
: return compute_c_sarl();
5641 #ifdef TARGET_X86_64
5642 case CC_OP_MULQ
: return compute_c_mull();
5644 case CC_OP_ADDQ
: return compute_c_addq();
5646 case CC_OP_ADCQ
: return compute_c_adcq();
5648 case CC_OP_SUBQ
: return compute_c_subq();
5650 case CC_OP_SBBQ
: return compute_c_sbbq();
5652 case CC_OP_LOGICQ
: return compute_c_logicq();
5654 case CC_OP_INCQ
: return compute_c_incl();
5656 case CC_OP_DECQ
: return compute_c_incl();
5658 case CC_OP_SHLQ
: return compute_c_shlq();
5660 case CC_OP_SARQ
: return compute_c_sarl();