4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "host-utils.h"
30 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31 # define LOG_PCALL_STATE(env) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
34 # define LOG_PCALL(...) do { } while (0)
35 # define LOG_PCALL_STATE(env) do { } while (0)
40 #define raise_exception_err(a, b)\
42 qemu_log("raise_exception line=%d\n", __LINE__);\
43 (raise_exception_err)(a, b);\
47 static const uint8_t parity_table
[256] = {
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
67 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
69 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
70 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
71 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
72 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
73 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
74 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
75 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
76 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
77 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
78 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
79 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
83 static const uint8_t rclw_table
[32] = {
84 0, 1, 2, 3, 4, 5, 6, 7,
85 8, 9,10,11,12,13,14,15,
86 16, 0, 1, 2, 3, 4, 5, 6,
87 7, 8, 9,10,11,12,13,14,
91 static const uint8_t rclb_table
[32] = {
92 0, 1, 2, 3, 4, 5, 6, 7,
93 8, 0, 1, 2, 3, 4, 5, 6,
94 7, 8, 0, 1, 2, 3, 4, 5,
95 6, 7, 8, 0, 1, 2, 3, 4,
98 #define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
99 #define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
100 #define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
102 static const floatx80 f15rk
[7] =
113 /* broken thread support */
115 static spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
117 void helper_lock(void)
119 spin_lock(&global_cpu_lock
);
122 void helper_unlock(void)
124 spin_unlock(&global_cpu_lock
);
127 void helper_write_eflags(target_ulong t0
, uint32_t update_mask
)
129 load_eflags(t0
, update_mask
);
132 target_ulong
helper_read_eflags(void)
135 eflags
= helper_cc_compute_all(CC_OP
);
136 eflags
|= (DF
& DF_MASK
);
137 eflags
|= env
->eflags
& ~(VM_MASK
| RF_MASK
);
141 /* return non zero if error */
142 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
153 index
= selector
& ~7;
154 if ((index
+ 7) > dt
->limit
)
156 ptr
= dt
->base
+ index
;
157 *e1_ptr
= ldl_kernel(ptr
);
158 *e2_ptr
= ldl_kernel(ptr
+ 4);
162 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
165 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
166 if (e2
& DESC_G_MASK
)
167 limit
= (limit
<< 12) | 0xfff;
171 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
173 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
176 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
178 sc
->base
= get_seg_base(e1
, e2
);
179 sc
->limit
= get_seg_limit(e1
, e2
);
183 /* init the segment cache in vm86 mode. */
184 static inline void load_seg_vm(int seg
, int selector
)
187 cpu_x86_load_seg_cache(env
, seg
, selector
,
188 (selector
<< 4), 0xffff, 0);
191 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
192 uint32_t *esp_ptr
, int dpl
)
194 int type
, index
, shift
;
199 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
200 for(i
=0;i
<env
->tr
.limit
;i
++) {
201 printf("%02x ", env
->tr
.base
[i
]);
202 if ((i
& 7) == 7) printf("\n");
208 if (!(env
->tr
.flags
& DESC_P_MASK
))
209 cpu_abort(env
, "invalid tss");
210 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
212 cpu_abort(env
, "invalid tss type");
214 index
= (dpl
* 4 + 2) << shift
;
215 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
216 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
218 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
219 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
221 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
222 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
226 /* XXX: merge with load_seg() */
227 static void tss_load_seg(int seg_reg
, int selector
)
232 if ((selector
& 0xfffc) != 0) {
233 if (load_segment(&e1
, &e2
, selector
) != 0)
234 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
235 if (!(e2
& DESC_S_MASK
))
236 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
238 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
239 cpl
= env
->hflags
& HF_CPL_MASK
;
240 if (seg_reg
== R_CS
) {
241 if (!(e2
& DESC_CS_MASK
))
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 /* XXX: is it correct ? */
245 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
246 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
247 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
248 } else if (seg_reg
== R_SS
) {
249 /* SS must be writable data */
250 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
251 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
252 if (dpl
!= cpl
|| dpl
!= rpl
)
253 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
255 /* not readable code */
256 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
257 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
258 /* if data or non conforming code, checks the rights */
259 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
260 if (dpl
< cpl
|| dpl
< rpl
)
261 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
264 if (!(e2
& DESC_P_MASK
))
265 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
266 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
267 get_seg_base(e1
, e2
),
268 get_seg_limit(e1
, e2
),
271 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
272 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
276 #define SWITCH_TSS_JMP 0
277 #define SWITCH_TSS_IRET 1
278 #define SWITCH_TSS_CALL 2
280 /* XXX: restore CPU state in registers (PowerPC case) */
281 static void switch_tss(int tss_selector
,
282 uint32_t e1
, uint32_t e2
, int source
,
285 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
286 target_ulong tss_base
;
287 uint32_t new_regs
[8], new_segs
[6];
288 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
289 uint32_t old_eflags
, eflags_mask
;
294 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
295 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
297 /* if task gate, we read the TSS segment and we load it */
299 if (!(e2
& DESC_P_MASK
))
300 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
301 tss_selector
= e1
>> 16;
302 if (tss_selector
& 4)
303 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
304 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
305 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
306 if (e2
& DESC_S_MASK
)
307 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
308 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
310 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
313 if (!(e2
& DESC_P_MASK
))
314 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
320 tss_limit
= get_seg_limit(e1
, e2
);
321 tss_base
= get_seg_base(e1
, e2
);
322 if ((tss_selector
& 4) != 0 ||
323 tss_limit
< tss_limit_max
)
324 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
325 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
327 old_tss_limit_max
= 103;
329 old_tss_limit_max
= 43;
331 /* read all the registers from the new TSS */
334 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
335 new_eip
= ldl_kernel(tss_base
+ 0x20);
336 new_eflags
= ldl_kernel(tss_base
+ 0x24);
337 for(i
= 0; i
< 8; i
++)
338 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
339 for(i
= 0; i
< 6; i
++)
340 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
341 new_ldt
= lduw_kernel(tss_base
+ 0x60);
342 new_trap
= ldl_kernel(tss_base
+ 0x64);
346 new_eip
= lduw_kernel(tss_base
+ 0x0e);
347 new_eflags
= lduw_kernel(tss_base
+ 0x10);
348 for(i
= 0; i
< 8; i
++)
349 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
350 for(i
= 0; i
< 4; i
++)
351 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
352 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
357 /* XXX: avoid a compiler warning, see
358 http://support.amd.com/us/Processor_TechDocs/24593.pdf
359 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
362 /* NOTE: we must avoid memory exceptions during the task switch,
363 so we make dummy accesses before */
364 /* XXX: it can still fail in some cases, so a bigger hack is
365 necessary to valid the TLB after having done the accesses */
367 v1
= ldub_kernel(env
->tr
.base
);
368 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
369 stb_kernel(env
->tr
.base
, v1
);
370 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
372 /* clear busy bit (it is restartable) */
373 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
376 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
377 e2
= ldl_kernel(ptr
+ 4);
378 e2
&= ~DESC_TSS_BUSY_MASK
;
379 stl_kernel(ptr
+ 4, e2
);
381 old_eflags
= compute_eflags();
382 if (source
== SWITCH_TSS_IRET
)
383 old_eflags
&= ~NT_MASK
;
385 /* save the current state in the old TSS */
388 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
389 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
390 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
391 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
392 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
393 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
394 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
395 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
396 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
397 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
398 for(i
= 0; i
< 6; i
++)
399 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
402 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
403 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
404 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
405 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
406 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
407 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
408 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
409 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
410 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
411 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
412 for(i
= 0; i
< 4; i
++)
413 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
416 /* now if an exception occurs, it will occurs in the next task
419 if (source
== SWITCH_TSS_CALL
) {
420 stw_kernel(tss_base
, env
->tr
.selector
);
421 new_eflags
|= NT_MASK
;
425 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
428 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
429 e2
= ldl_kernel(ptr
+ 4);
430 e2
|= DESC_TSS_BUSY_MASK
;
431 stl_kernel(ptr
+ 4, e2
);
434 /* set the new CPU state */
435 /* from this point, any exception which occurs can give problems */
436 env
->cr
[0] |= CR0_TS_MASK
;
437 env
->hflags
|= HF_TS_MASK
;
438 env
->tr
.selector
= tss_selector
;
439 env
->tr
.base
= tss_base
;
440 env
->tr
.limit
= tss_limit
;
441 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
443 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
444 cpu_x86_update_cr3(env
, new_cr3
);
447 /* load all registers without an exception, then reload them with
448 possible exception */
450 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
451 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
453 eflags_mask
&= 0xffff;
454 load_eflags(new_eflags
, eflags_mask
);
455 /* XXX: what to do in 16 bit case ? */
464 if (new_eflags
& VM_MASK
) {
465 for(i
= 0; i
< 6; i
++)
466 load_seg_vm(i
, new_segs
[i
]);
467 /* in vm86, CPL is always 3 */
468 cpu_x86_set_cpl(env
, 3);
470 /* CPL is set the RPL of CS */
471 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
472 /* first just selectors as the rest may trigger exceptions */
473 for(i
= 0; i
< 6; i
++)
474 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
477 env
->ldt
.selector
= new_ldt
& ~4;
484 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
486 if ((new_ldt
& 0xfffc) != 0) {
488 index
= new_ldt
& ~7;
489 if ((index
+ 7) > dt
->limit
)
490 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
491 ptr
= dt
->base
+ index
;
492 e1
= ldl_kernel(ptr
);
493 e2
= ldl_kernel(ptr
+ 4);
494 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
495 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
496 if (!(e2
& DESC_P_MASK
))
497 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
498 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
501 /* load the segments */
502 if (!(new_eflags
& VM_MASK
)) {
503 tss_load_seg(R_CS
, new_segs
[R_CS
]);
504 tss_load_seg(R_SS
, new_segs
[R_SS
]);
505 tss_load_seg(R_ES
, new_segs
[R_ES
]);
506 tss_load_seg(R_DS
, new_segs
[R_DS
]);
507 tss_load_seg(R_FS
, new_segs
[R_FS
]);
508 tss_load_seg(R_GS
, new_segs
[R_GS
]);
511 /* check that EIP is in the CS segment limits */
512 if (new_eip
> env
->segs
[R_CS
].limit
) {
513 /* XXX: different exception if CALL ? */
514 raise_exception_err(EXCP0D_GPF
, 0);
517 #ifndef CONFIG_USER_ONLY
518 /* reset local breakpoints */
519 if (env
->dr
[7] & 0x55) {
520 for (i
= 0; i
< 4; i
++) {
521 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1)
522 hw_breakpoint_remove(env
, i
);
529 /* check if Port I/O is allowed in TSS */
530 static inline void check_io(int addr
, int size
)
532 int io_offset
, val
, mask
;
534 /* TSS must be a valid 32 bit one */
535 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
536 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
539 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
540 io_offset
+= (addr
>> 3);
541 /* Note: the check needs two bytes */
542 if ((io_offset
+ 1) > env
->tr
.limit
)
544 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
546 mask
= (1 << size
) - 1;
547 /* all bits must be zero to allow the I/O */
548 if ((val
& mask
) != 0) {
550 raise_exception_err(EXCP0D_GPF
, 0);
554 void helper_check_iob(uint32_t t0
)
559 void helper_check_iow(uint32_t t0
)
564 void helper_check_iol(uint32_t t0
)
569 void helper_outb(uint32_t port
, uint32_t data
)
571 cpu_outb(port
, data
& 0xff);
574 target_ulong
helper_inb(uint32_t port
)
576 return cpu_inb(port
);
579 void helper_outw(uint32_t port
, uint32_t data
)
581 cpu_outw(port
, data
& 0xffff);
584 target_ulong
helper_inw(uint32_t port
)
586 return cpu_inw(port
);
589 void helper_outl(uint32_t port
, uint32_t data
)
591 cpu_outl(port
, data
);
594 target_ulong
helper_inl(uint32_t port
)
596 return cpu_inl(port
);
599 static inline unsigned int get_sp_mask(unsigned int e2
)
601 if (e2
& DESC_B_MASK
)
607 static int exeption_has_error_code(int intno
)
623 #define SET_ESP(val, sp_mask)\
625 if ((sp_mask) == 0xffff)\
626 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
627 else if ((sp_mask) == 0xffffffffLL)\
628 ESP = (uint32_t)(val);\
633 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
636 /* in 64-bit machines, this can overflow. So this segment addition macro
637 * can be used to trim the value to 32-bit whenever needed */
638 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
640 /* XXX: add a is_user flag to have proper security support */
641 #define PUSHW(ssp, sp, sp_mask, val)\
644 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
647 #define PUSHL(ssp, sp, sp_mask, val)\
650 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
653 #define POPW(ssp, sp, sp_mask, val)\
655 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
659 #define POPL(ssp, sp, sp_mask, val)\
661 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
665 /* protected mode interrupt */
666 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
667 unsigned int next_eip
, int is_hw
)
670 target_ulong ptr
, ssp
;
671 int type
, dpl
, selector
, ss_dpl
, cpl
;
672 int has_error_code
, new_stack
, shift
;
673 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
674 uint32_t old_eip
, sp_mask
;
677 if (!is_int
&& !is_hw
)
678 has_error_code
= exeption_has_error_code(intno
);
685 if (intno
* 8 + 7 > dt
->limit
)
686 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
687 ptr
= dt
->base
+ intno
* 8;
688 e1
= ldl_kernel(ptr
);
689 e2
= ldl_kernel(ptr
+ 4);
690 /* check gate type */
691 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
693 case 5: /* task gate */
694 /* must do that check here to return the correct error code */
695 if (!(e2
& DESC_P_MASK
))
696 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
697 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
698 if (has_error_code
) {
701 /* push the error code */
702 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
704 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
708 esp
= (ESP
- (2 << shift
)) & mask
;
709 ssp
= env
->segs
[R_SS
].base
+ esp
;
711 stl_kernel(ssp
, error_code
);
713 stw_kernel(ssp
, error_code
);
717 case 6: /* 286 interrupt gate */
718 case 7: /* 286 trap gate */
719 case 14: /* 386 interrupt gate */
720 case 15: /* 386 trap gate */
723 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
726 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
727 cpl
= env
->hflags
& HF_CPL_MASK
;
728 /* check privilege if software int */
729 if (is_int
&& dpl
< cpl
)
730 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
731 /* check valid bit */
732 if (!(e2
& DESC_P_MASK
))
733 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
735 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
736 if ((selector
& 0xfffc) == 0)
737 raise_exception_err(EXCP0D_GPF
, 0);
739 if (load_segment(&e1
, &e2
, selector
) != 0)
740 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
741 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
742 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
743 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
745 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
746 if (!(e2
& DESC_P_MASK
))
747 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
748 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
749 /* to inner privilege */
750 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
751 if ((ss
& 0xfffc) == 0)
752 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
754 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
755 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
756 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
757 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
759 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
760 if (!(ss_e2
& DESC_S_MASK
) ||
761 (ss_e2
& DESC_CS_MASK
) ||
762 !(ss_e2
& DESC_W_MASK
))
763 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
764 if (!(ss_e2
& DESC_P_MASK
))
765 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
767 sp_mask
= get_sp_mask(ss_e2
);
768 ssp
= get_seg_base(ss_e1
, ss_e2
);
769 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
770 /* to same privilege */
771 if (env
->eflags
& VM_MASK
)
772 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
774 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
775 ssp
= env
->segs
[R_SS
].base
;
779 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
780 new_stack
= 0; /* avoid warning */
781 sp_mask
= 0; /* avoid warning */
782 ssp
= 0; /* avoid warning */
783 esp
= 0; /* avoid warning */
789 /* XXX: check that enough room is available */
790 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
791 if (env
->eflags
& VM_MASK
)
797 if (env
->eflags
& VM_MASK
) {
798 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
799 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
800 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
801 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
803 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
804 PUSHL(ssp
, esp
, sp_mask
, ESP
);
806 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
807 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
808 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
809 if (has_error_code
) {
810 PUSHL(ssp
, esp
, sp_mask
, error_code
);
814 if (env
->eflags
& VM_MASK
) {
815 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
816 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
817 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
818 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
820 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
821 PUSHW(ssp
, esp
, sp_mask
, ESP
);
823 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
824 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
825 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
826 if (has_error_code
) {
827 PUSHW(ssp
, esp
, sp_mask
, error_code
);
832 if (env
->eflags
& VM_MASK
) {
833 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
834 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
835 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
836 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
838 ss
= (ss
& ~3) | dpl
;
839 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
840 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
842 SET_ESP(esp
, sp_mask
);
844 selector
= (selector
& ~3) | dpl
;
845 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
846 get_seg_base(e1
, e2
),
847 get_seg_limit(e1
, e2
),
849 cpu_x86_set_cpl(env
, dpl
);
852 /* interrupt gate clear IF mask */
853 if ((type
& 1) == 0) {
854 env
->eflags
&= ~IF_MASK
;
856 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
861 #define PUSHQ(sp, val)\
864 stq_kernel(sp, (val));\
867 #define POPQ(sp, val)\
869 val = ldq_kernel(sp);\
873 static inline target_ulong
get_rsp_from_tss(int level
)
878 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
879 env
->tr
.base
, env
->tr
.limit
);
882 if (!(env
->tr
.flags
& DESC_P_MASK
))
883 cpu_abort(env
, "invalid tss");
884 index
= 8 * level
+ 4;
885 if ((index
+ 7) > env
->tr
.limit
)
886 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
887 return ldq_kernel(env
->tr
.base
+ index
);
890 /* 64 bit interrupt */
891 static void do_interrupt64(int intno
, int is_int
, int error_code
,
892 target_ulong next_eip
, int is_hw
)
896 int type
, dpl
, selector
, cpl
, ist
;
897 int has_error_code
, new_stack
;
898 uint32_t e1
, e2
, e3
, ss
;
899 target_ulong old_eip
, esp
, offset
;
902 if (!is_int
&& !is_hw
)
903 has_error_code
= exeption_has_error_code(intno
);
910 if (intno
* 16 + 15 > dt
->limit
)
911 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
912 ptr
= dt
->base
+ intno
* 16;
913 e1
= ldl_kernel(ptr
);
914 e2
= ldl_kernel(ptr
+ 4);
915 e3
= ldl_kernel(ptr
+ 8);
916 /* check gate type */
917 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
919 case 14: /* 386 interrupt gate */
920 case 15: /* 386 trap gate */
923 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
926 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
927 cpl
= env
->hflags
& HF_CPL_MASK
;
928 /* check privilege if software int */
929 if (is_int
&& dpl
< cpl
)
930 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
931 /* check valid bit */
932 if (!(e2
& DESC_P_MASK
))
933 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
935 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
937 if ((selector
& 0xfffc) == 0)
938 raise_exception_err(EXCP0D_GPF
, 0);
940 if (load_segment(&e1
, &e2
, selector
) != 0)
941 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
942 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
943 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
944 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
946 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
947 if (!(e2
& DESC_P_MASK
))
948 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
949 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
950 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
951 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
952 /* to inner privilege */
954 esp
= get_rsp_from_tss(ist
+ 3);
956 esp
= get_rsp_from_tss(dpl
);
957 esp
&= ~0xfLL
; /* align stack */
960 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
961 /* to same privilege */
962 if (env
->eflags
& VM_MASK
)
963 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
966 esp
= get_rsp_from_tss(ist
+ 3);
969 esp
&= ~0xfLL
; /* align stack */
972 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
973 new_stack
= 0; /* avoid warning */
974 esp
= 0; /* avoid warning */
977 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
979 PUSHQ(esp
, compute_eflags());
980 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
982 if (has_error_code
) {
983 PUSHQ(esp
, error_code
);
988 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
992 selector
= (selector
& ~3) | dpl
;
993 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
994 get_seg_base(e1
, e2
),
995 get_seg_limit(e1
, e2
),
997 cpu_x86_set_cpl(env
, dpl
);
1000 /* interrupt gate clear IF mask */
1001 if ((type
& 1) == 0) {
1002 env
->eflags
&= ~IF_MASK
;
1004 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1008 #ifdef TARGET_X86_64
1009 #if defined(CONFIG_USER_ONLY)
1010 void helper_syscall(int next_eip_addend
)
1012 env
->exception_index
= EXCP_SYSCALL
;
1013 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1017 void helper_syscall(int next_eip_addend
)
1021 if (!(env
->efer
& MSR_EFER_SCE
)) {
1022 raise_exception_err(EXCP06_ILLOP
, 0);
1024 selector
= (env
->star
>> 32) & 0xffff;
1025 if (env
->hflags
& HF_LMA_MASK
) {
1028 ECX
= env
->eip
+ next_eip_addend
;
1029 env
->regs
[11] = compute_eflags();
1031 code64
= env
->hflags
& HF_CS64_MASK
;
1033 cpu_x86_set_cpl(env
, 0);
1034 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1036 DESC_G_MASK
| DESC_P_MASK
|
1038 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1039 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1041 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1043 DESC_W_MASK
| DESC_A_MASK
);
1044 env
->eflags
&= ~env
->fmask
;
1045 load_eflags(env
->eflags
, 0);
1047 env
->eip
= env
->lstar
;
1049 env
->eip
= env
->cstar
;
1051 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1053 cpu_x86_set_cpl(env
, 0);
1054 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1056 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1058 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1059 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1061 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1063 DESC_W_MASK
| DESC_A_MASK
);
1064 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1065 env
->eip
= (uint32_t)env
->star
;
1071 #ifdef TARGET_X86_64
1072 void helper_sysret(int dflag
)
1076 if (!(env
->efer
& MSR_EFER_SCE
)) {
1077 raise_exception_err(EXCP06_ILLOP
, 0);
1079 cpl
= env
->hflags
& HF_CPL_MASK
;
1080 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1081 raise_exception_err(EXCP0D_GPF
, 0);
1083 selector
= (env
->star
>> 48) & 0xffff;
1084 if (env
->hflags
& HF_LMA_MASK
) {
1086 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1088 DESC_G_MASK
| DESC_P_MASK
|
1089 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1090 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1094 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1096 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1097 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1098 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1099 env
->eip
= (uint32_t)ECX
;
1101 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1103 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1104 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1105 DESC_W_MASK
| DESC_A_MASK
);
1106 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1107 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1108 cpu_x86_set_cpl(env
, 3);
1110 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1112 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1113 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1114 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1115 env
->eip
= (uint32_t)ECX
;
1116 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1118 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1119 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1120 DESC_W_MASK
| DESC_A_MASK
);
1121 env
->eflags
|= IF_MASK
;
1122 cpu_x86_set_cpl(env
, 3);
1127 /* real mode interrupt */
1128 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1129 unsigned int next_eip
)
1132 target_ulong ptr
, ssp
;
1134 uint32_t offset
, esp
;
1135 uint32_t old_cs
, old_eip
;
1137 /* real mode (simpler !) */
1139 if (intno
* 4 + 3 > dt
->limit
)
1140 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1141 ptr
= dt
->base
+ intno
* 4;
1142 offset
= lduw_kernel(ptr
);
1143 selector
= lduw_kernel(ptr
+ 2);
1145 ssp
= env
->segs
[R_SS
].base
;
1150 old_cs
= env
->segs
[R_CS
].selector
;
1151 /* XXX: use SS segment size ? */
1152 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1153 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1154 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1156 /* update processor state */
1157 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1159 env
->segs
[R_CS
].selector
= selector
;
1160 env
->segs
[R_CS
].base
= (selector
<< 4);
1161 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1164 /* fake user mode interrupt */
1165 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1166 target_ulong next_eip
)
1170 int dpl
, cpl
, shift
;
1174 if (env
->hflags
& HF_LMA_MASK
) {
1179 ptr
= dt
->base
+ (intno
<< shift
);
1180 e2
= ldl_kernel(ptr
+ 4);
1182 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1183 cpl
= env
->hflags
& HF_CPL_MASK
;
1184 /* check privilege if software int */
1185 if (is_int
&& dpl
< cpl
)
1186 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1188 /* Since we emulate only user space, we cannot do more than
1189 exiting the emulation with the suitable exception and error
1195 #if !defined(CONFIG_USER_ONLY)
1196 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1199 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1200 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1203 type
= SVM_EVTINJ_TYPE_SOFT
;
1205 type
= SVM_EVTINJ_TYPE_EXEPT
;
1206 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1207 if (!rm
&& exeption_has_error_code(intno
)) {
1208 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1209 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
), error_code
);
1211 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
);
1217 * Begin execution of an interruption. is_int is TRUE if coming from
1218 * the int instruction. next_eip is the EIP value AFTER the interrupt
1219 * instruction. It is only relevant if is_int is TRUE.
1221 void do_interrupt(int intno
, int is_int
, int error_code
,
1222 target_ulong next_eip
, int is_hw
)
1224 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1225 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1227 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1228 count
, intno
, error_code
, is_int
,
1229 env
->hflags
& HF_CPL_MASK
,
1230 env
->segs
[R_CS
].selector
, EIP
,
1231 (int)env
->segs
[R_CS
].base
+ EIP
,
1232 env
->segs
[R_SS
].selector
, ESP
);
1233 if (intno
== 0x0e) {
1234 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1236 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1239 log_cpu_state(env
, X86_DUMP_CCOP
);
1245 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1246 for(i
= 0; i
< 16; i
++) {
1247 qemu_log(" %02x", ldub(ptr
+ i
));
1255 if (env
->cr
[0] & CR0_PE_MASK
) {
1256 #if !defined(CONFIG_USER_ONLY)
1257 if (env
->hflags
& HF_SVMI_MASK
)
1258 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1260 #ifdef TARGET_X86_64
1261 if (env
->hflags
& HF_LMA_MASK
) {
1262 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1266 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1269 #if !defined(CONFIG_USER_ONLY)
1270 if (env
->hflags
& HF_SVMI_MASK
)
1271 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1273 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1276 #if !defined(CONFIG_USER_ONLY)
1277 if (env
->hflags
& HF_SVMI_MASK
) {
1278 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
1279 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
1284 /* This should come from sysemu.h - if we could include it here... */
1285 void qemu_system_reset_request(void);
1288 * Check nested exceptions and change to double or triple fault if
1289 * needed. It should only be called, if this is not an interrupt.
1290 * Returns the new exception number.
1292 static int check_exception(int intno
, int *error_code
)
1294 int first_contributory
= env
->old_exception
== 0 ||
1295 (env
->old_exception
>= 10 &&
1296 env
->old_exception
<= 13);
1297 int second_contributory
= intno
== 0 ||
1298 (intno
>= 10 && intno
<= 13);
1300 qemu_log_mask(CPU_LOG_INT
, "check_exception old: 0x%x new 0x%x\n",
1301 env
->old_exception
, intno
);
1303 #if !defined(CONFIG_USER_ONLY)
1304 if (env
->old_exception
== EXCP08_DBLE
) {
1305 if (env
->hflags
& HF_SVMI_MASK
)
1306 helper_vmexit(SVM_EXIT_SHUTDOWN
, 0); /* does not return */
1308 qemu_log_mask(CPU_LOG_RESET
, "Triple fault\n");
1310 qemu_system_reset_request();
1315 if ((first_contributory
&& second_contributory
)
1316 || (env
->old_exception
== EXCP0E_PAGE
&&
1317 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1318 intno
= EXCP08_DBLE
;
1322 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1323 (intno
== EXCP08_DBLE
))
1324 env
->old_exception
= intno
;
1330 * Signal an interruption. It is executed in the main CPU loop.
1331 * is_int is TRUE if coming from the int instruction. next_eip is the
1332 * EIP value AFTER the interrupt instruction. It is only relevant if
1335 static void QEMU_NORETURN
raise_interrupt(int intno
, int is_int
, int error_code
,
1336 int next_eip_addend
)
1339 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1340 intno
= check_exception(intno
, &error_code
);
1342 helper_svm_check_intercept_param(SVM_EXIT_SWINT
, 0);
1345 env
->exception_index
= intno
;
1346 env
->error_code
= error_code
;
1347 env
->exception_is_int
= is_int
;
1348 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1352 /* shortcuts to generate exceptions */
1354 void raise_exception_err(int exception_index
, int error_code
)
1356 raise_interrupt(exception_index
, 0, error_code
, 0);
1359 void raise_exception(int exception_index
)
1361 raise_interrupt(exception_index
, 0, 0, 0);
1364 void raise_exception_env(int exception_index
, CPUState
*nenv
)
1367 raise_exception(exception_index
);
1371 #if defined(CONFIG_USER_ONLY)
1373 void do_smm_enter(void)
1377 void helper_rsm(void)
1383 #ifdef TARGET_X86_64
1384 #define SMM_REVISION_ID 0x00020064
1386 #define SMM_REVISION_ID 0x00020000
1389 void do_smm_enter(void)
1391 target_ulong sm_state
;
1395 qemu_log_mask(CPU_LOG_INT
, "SMM: enter\n");
1396 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1398 env
->hflags
|= HF_SMM_MASK
;
1399 cpu_smm_update(env
);
1401 sm_state
= env
->smbase
+ 0x8000;
1403 #ifdef TARGET_X86_64
1404 for(i
= 0; i
< 6; i
++) {
1406 offset
= 0x7e00 + i
* 16;
1407 stw_phys(sm_state
+ offset
, dt
->selector
);
1408 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1409 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1410 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1413 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1414 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1416 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1417 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1418 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1419 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1421 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1422 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1424 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1425 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1426 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1427 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1429 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1431 stq_phys(sm_state
+ 0x7ff8, EAX
);
1432 stq_phys(sm_state
+ 0x7ff0, ECX
);
1433 stq_phys(sm_state
+ 0x7fe8, EDX
);
1434 stq_phys(sm_state
+ 0x7fe0, EBX
);
1435 stq_phys(sm_state
+ 0x7fd8, ESP
);
1436 stq_phys(sm_state
+ 0x7fd0, EBP
);
1437 stq_phys(sm_state
+ 0x7fc8, ESI
);
1438 stq_phys(sm_state
+ 0x7fc0, EDI
);
1439 for(i
= 8; i
< 16; i
++)
1440 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1441 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1442 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1443 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1444 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1446 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1447 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1448 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1450 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1451 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1453 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1454 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1455 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1456 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1457 stl_phys(sm_state
+ 0x7fec, EDI
);
1458 stl_phys(sm_state
+ 0x7fe8, ESI
);
1459 stl_phys(sm_state
+ 0x7fe4, EBP
);
1460 stl_phys(sm_state
+ 0x7fe0, ESP
);
1461 stl_phys(sm_state
+ 0x7fdc, EBX
);
1462 stl_phys(sm_state
+ 0x7fd8, EDX
);
1463 stl_phys(sm_state
+ 0x7fd4, ECX
);
1464 stl_phys(sm_state
+ 0x7fd0, EAX
);
1465 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1466 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1468 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1469 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1470 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1471 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1473 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1474 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1475 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1476 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1478 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1479 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1481 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1482 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1484 for(i
= 0; i
< 6; i
++) {
1487 offset
= 0x7f84 + i
* 12;
1489 offset
= 0x7f2c + (i
- 3) * 12;
1490 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1491 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1492 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1493 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1495 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1497 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1498 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1500 /* init SMM cpu state */
1502 #ifdef TARGET_X86_64
1503 cpu_load_efer(env
, 0);
1505 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1506 env
->eip
= 0x00008000;
1507 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1509 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1510 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1511 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1512 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1513 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1515 cpu_x86_update_cr0(env
,
1516 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1517 cpu_x86_update_cr4(env
, 0);
1518 env
->dr
[7] = 0x00000400;
1519 CC_OP
= CC_OP_EFLAGS
;
1522 void helper_rsm(void)
1524 target_ulong sm_state
;
1528 sm_state
= env
->smbase
+ 0x8000;
1529 #ifdef TARGET_X86_64
1530 cpu_load_efer(env
, ldq_phys(sm_state
+ 0x7ed0));
1532 for(i
= 0; i
< 6; i
++) {
1533 offset
= 0x7e00 + i
* 16;
1534 cpu_x86_load_seg_cache(env
, i
,
1535 lduw_phys(sm_state
+ offset
),
1536 ldq_phys(sm_state
+ offset
+ 8),
1537 ldl_phys(sm_state
+ offset
+ 4),
1538 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1541 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1542 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1544 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1545 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1546 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1547 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1549 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1550 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1552 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1553 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1554 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1555 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1557 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1558 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1559 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1560 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1561 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1562 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1563 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1564 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1565 for(i
= 8; i
< 16; i
++)
1566 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1567 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1568 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1569 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1570 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1571 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1573 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1574 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1575 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1577 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1578 if (val
& 0x20000) {
1579 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1582 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1583 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1584 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1585 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1586 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1587 EDI
= ldl_phys(sm_state
+ 0x7fec);
1588 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1589 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1590 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1591 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1592 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1593 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1594 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1595 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1596 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1598 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1599 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1600 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1601 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1603 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1604 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1605 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1606 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1608 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1609 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1611 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1612 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1614 for(i
= 0; i
< 6; i
++) {
1616 offset
= 0x7f84 + i
* 12;
1618 offset
= 0x7f2c + (i
- 3) * 12;
1619 cpu_x86_load_seg_cache(env
, i
,
1620 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1621 ldl_phys(sm_state
+ offset
+ 8),
1622 ldl_phys(sm_state
+ offset
+ 4),
1623 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1625 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1627 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1628 if (val
& 0x20000) {
1629 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1632 CC_OP
= CC_OP_EFLAGS
;
1633 env
->hflags
&= ~HF_SMM_MASK
;
1634 cpu_smm_update(env
);
1636 qemu_log_mask(CPU_LOG_INT
, "SMM: after RSM\n");
1637 log_cpu_state_mask(CPU_LOG_INT
, env
, X86_DUMP_CCOP
);
1640 #endif /* !CONFIG_USER_ONLY */
1643 /* division, flags are undefined */
1645 void helper_divb_AL(target_ulong t0
)
1647 unsigned int num
, den
, q
, r
;
1649 num
= (EAX
& 0xffff);
1652 raise_exception(EXCP00_DIVZ
);
1656 raise_exception(EXCP00_DIVZ
);
1658 r
= (num
% den
) & 0xff;
1659 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1662 void helper_idivb_AL(target_ulong t0
)
1669 raise_exception(EXCP00_DIVZ
);
1673 raise_exception(EXCP00_DIVZ
);
1675 r
= (num
% den
) & 0xff;
1676 EAX
= (EAX
& ~0xffff) | (r
<< 8) | q
;
1679 void helper_divw_AX(target_ulong t0
)
1681 unsigned int num
, den
, q
, r
;
1683 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1684 den
= (t0
& 0xffff);
1686 raise_exception(EXCP00_DIVZ
);
1690 raise_exception(EXCP00_DIVZ
);
1692 r
= (num
% den
) & 0xffff;
1693 EAX
= (EAX
& ~0xffff) | q
;
1694 EDX
= (EDX
& ~0xffff) | r
;
1697 void helper_idivw_AX(target_ulong t0
)
1701 num
= (EAX
& 0xffff) | ((EDX
& 0xffff) << 16);
1704 raise_exception(EXCP00_DIVZ
);
1707 if (q
!= (int16_t)q
)
1708 raise_exception(EXCP00_DIVZ
);
1710 r
= (num
% den
) & 0xffff;
1711 EAX
= (EAX
& ~0xffff) | q
;
1712 EDX
= (EDX
& ~0xffff) | r
;
1715 void helper_divl_EAX(target_ulong t0
)
1717 unsigned int den
, r
;
1720 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1723 raise_exception(EXCP00_DIVZ
);
1728 raise_exception(EXCP00_DIVZ
);
1733 void helper_idivl_EAX(target_ulong t0
)
1738 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1741 raise_exception(EXCP00_DIVZ
);
1745 if (q
!= (int32_t)q
)
1746 raise_exception(EXCP00_DIVZ
);
1753 /* XXX: exception */
1754 void helper_aam(int base
)
1760 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1764 void helper_aad(int base
)
1768 ah
= (EAX
>> 8) & 0xff;
1769 al
= ((ah
* base
) + al
) & 0xff;
1770 EAX
= (EAX
& ~0xffff) | al
;
1774 void helper_aaa(void)
1780 eflags
= helper_cc_compute_all(CC_OP
);
1783 ah
= (EAX
>> 8) & 0xff;
1785 icarry
= (al
> 0xf9);
1786 if (((al
& 0x0f) > 9 ) || af
) {
1787 al
= (al
+ 6) & 0x0f;
1788 ah
= (ah
+ 1 + icarry
) & 0xff;
1789 eflags
|= CC_C
| CC_A
;
1791 eflags
&= ~(CC_C
| CC_A
);
1794 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1798 void helper_aas(void)
1804 eflags
= helper_cc_compute_all(CC_OP
);
1807 ah
= (EAX
>> 8) & 0xff;
1810 if (((al
& 0x0f) > 9 ) || af
) {
1811 al
= (al
- 6) & 0x0f;
1812 ah
= (ah
- 1 - icarry
) & 0xff;
1813 eflags
|= CC_C
| CC_A
;
1815 eflags
&= ~(CC_C
| CC_A
);
1818 EAX
= (EAX
& ~0xffff) | al
| (ah
<< 8);
1822 void helper_daa(void)
1827 eflags
= helper_cc_compute_all(CC_OP
);
1833 if (((al
& 0x0f) > 9 ) || af
) {
1834 al
= (al
+ 6) & 0xff;
1837 if ((al
> 0x9f) || cf
) {
1838 al
= (al
+ 0x60) & 0xff;
1841 EAX
= (EAX
& ~0xff) | al
;
1842 /* well, speed is not an issue here, so we compute the flags by hand */
1843 eflags
|= (al
== 0) << 6; /* zf */
1844 eflags
|= parity_table
[al
]; /* pf */
1845 eflags
|= (al
& 0x80); /* sf */
1849 void helper_das(void)
1851 int al
, al1
, af
, cf
;
1854 eflags
= helper_cc_compute_all(CC_OP
);
1861 if (((al
& 0x0f) > 9 ) || af
) {
1865 al
= (al
- 6) & 0xff;
1867 if ((al1
> 0x99) || cf
) {
1868 al
= (al
- 0x60) & 0xff;
1871 EAX
= (EAX
& ~0xff) | al
;
1872 /* well, speed is not an issue here, so we compute the flags by hand */
1873 eflags
|= (al
== 0) << 6; /* zf */
1874 eflags
|= parity_table
[al
]; /* pf */
1875 eflags
|= (al
& 0x80); /* sf */
1879 void helper_into(int next_eip_addend
)
1882 eflags
= helper_cc_compute_all(CC_OP
);
1883 if (eflags
& CC_O
) {
1884 raise_interrupt(EXCP04_INTO
, 1, 0, next_eip_addend
);
1888 void helper_cmpxchg8b(target_ulong a0
)
1893 eflags
= helper_cc_compute_all(CC_OP
);
1895 if (d
== (((uint64_t)EDX
<< 32) | (uint32_t)EAX
)) {
1896 stq(a0
, ((uint64_t)ECX
<< 32) | (uint32_t)EBX
);
1899 /* always do the store */
1901 EDX
= (uint32_t)(d
>> 32);
1908 #ifdef TARGET_X86_64
1909 void helper_cmpxchg16b(target_ulong a0
)
1914 if ((a0
& 0xf) != 0)
1915 raise_exception(EXCP0D_GPF
);
1916 eflags
= helper_cc_compute_all(CC_OP
);
1919 if (d0
== EAX
&& d1
== EDX
) {
1924 /* always do the store */
1935 void helper_single_step(void)
1937 #ifndef CONFIG_USER_ONLY
1938 check_hw_breakpoints(env
, 1);
1939 env
->dr
[6] |= DR6_BS
;
1941 raise_exception(EXCP01_DB
);
1944 void helper_cpuid(void)
1946 uint32_t eax
, ebx
, ecx
, edx
;
1948 helper_svm_check_intercept_param(SVM_EXIT_CPUID
, 0);
1950 cpu_x86_cpuid(env
, (uint32_t)EAX
, (uint32_t)ECX
, &eax
, &ebx
, &ecx
, &edx
);
1957 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1960 uint32_t esp_mask
, esp
, ebp
;
1962 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1963 ssp
= env
->segs
[R_SS
].base
;
1972 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1975 stl(ssp
+ (esp
& esp_mask
), t1
);
1982 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1985 stw(ssp
+ (esp
& esp_mask
), t1
);
1989 #ifdef TARGET_X86_64
1990 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1992 target_ulong esp
, ebp
;
2012 stw(esp
, lduw(ebp
));
2020 void helper_lldt(int selector
)
2024 int index
, entry_limit
;
2028 if ((selector
& 0xfffc) == 0) {
2029 /* XXX: NULL selector case: invalid LDT */
2034 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2036 index
= selector
& ~7;
2037 #ifdef TARGET_X86_64
2038 if (env
->hflags
& HF_LMA_MASK
)
2043 if ((index
+ entry_limit
) > dt
->limit
)
2044 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2045 ptr
= dt
->base
+ index
;
2046 e1
= ldl_kernel(ptr
);
2047 e2
= ldl_kernel(ptr
+ 4);
2048 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
2049 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2050 if (!(e2
& DESC_P_MASK
))
2051 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2052 #ifdef TARGET_X86_64
2053 if (env
->hflags
& HF_LMA_MASK
) {
2055 e3
= ldl_kernel(ptr
+ 8);
2056 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2057 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
2061 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
2064 env
->ldt
.selector
= selector
;
2067 void helper_ltr(int selector
)
2071 int index
, type
, entry_limit
;
2075 if ((selector
& 0xfffc) == 0) {
2076 /* NULL selector case: invalid TR */
2082 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2084 index
= selector
& ~7;
2085 #ifdef TARGET_X86_64
2086 if (env
->hflags
& HF_LMA_MASK
)
2091 if ((index
+ entry_limit
) > dt
->limit
)
2092 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2093 ptr
= dt
->base
+ index
;
2094 e1
= ldl_kernel(ptr
);
2095 e2
= ldl_kernel(ptr
+ 4);
2096 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2097 if ((e2
& DESC_S_MASK
) ||
2098 (type
!= 1 && type
!= 9))
2099 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2100 if (!(e2
& DESC_P_MASK
))
2101 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2102 #ifdef TARGET_X86_64
2103 if (env
->hflags
& HF_LMA_MASK
) {
2105 e3
= ldl_kernel(ptr
+ 8);
2106 e4
= ldl_kernel(ptr
+ 12);
2107 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
2108 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2109 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2110 env
->tr
.base
|= (target_ulong
)e3
<< 32;
2114 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
2116 e2
|= DESC_TSS_BUSY_MASK
;
2117 stl_kernel(ptr
+ 4, e2
);
2119 env
->tr
.selector
= selector
;
2122 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2123 void helper_load_seg(int seg_reg
, int selector
)
2132 cpl
= env
->hflags
& HF_CPL_MASK
;
2133 if ((selector
& 0xfffc) == 0) {
2134 /* null selector case */
2136 #ifdef TARGET_X86_64
2137 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
2140 raise_exception_err(EXCP0D_GPF
, 0);
2141 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
2148 index
= selector
& ~7;
2149 if ((index
+ 7) > dt
->limit
)
2150 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2151 ptr
= dt
->base
+ index
;
2152 e1
= ldl_kernel(ptr
);
2153 e2
= ldl_kernel(ptr
+ 4);
2155 if (!(e2
& DESC_S_MASK
))
2156 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2158 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2159 if (seg_reg
== R_SS
) {
2160 /* must be writable segment */
2161 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
2162 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2163 if (rpl
!= cpl
|| dpl
!= cpl
)
2164 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2166 /* must be readable segment */
2167 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
2168 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2170 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2171 /* if not conforming code, test rights */
2172 if (dpl
< cpl
|| dpl
< rpl
)
2173 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2177 if (!(e2
& DESC_P_MASK
)) {
2178 if (seg_reg
== R_SS
)
2179 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
2181 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2184 /* set the access bit if not already set */
2185 if (!(e2
& DESC_A_MASK
)) {
2187 stl_kernel(ptr
+ 4, e2
);
2190 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2191 get_seg_base(e1
, e2
),
2192 get_seg_limit(e1
, e2
),
2195 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2196 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2201 /* protected mode jump */
2202 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
2203 int next_eip_addend
)
2206 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2207 target_ulong next_eip
;
2209 if ((new_cs
& 0xfffc) == 0)
2210 raise_exception_err(EXCP0D_GPF
, 0);
2211 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2212 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2213 cpl
= env
->hflags
& HF_CPL_MASK
;
2214 if (e2
& DESC_S_MASK
) {
2215 if (!(e2
& DESC_CS_MASK
))
2216 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2217 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2218 if (e2
& DESC_C_MASK
) {
2219 /* conforming code segment */
2221 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2223 /* non conforming code segment */
2226 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2228 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2230 if (!(e2
& DESC_P_MASK
))
2231 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2232 limit
= get_seg_limit(e1
, e2
);
2233 if (new_eip
> limit
&&
2234 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2235 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2236 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2237 get_seg_base(e1
, e2
), limit
, e2
);
2240 /* jump to call or task gate */
2241 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2243 cpl
= env
->hflags
& HF_CPL_MASK
;
2244 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2246 case 1: /* 286 TSS */
2247 case 9: /* 386 TSS */
2248 case 5: /* task gate */
2249 if (dpl
< cpl
|| dpl
< rpl
)
2250 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2251 next_eip
= env
->eip
+ next_eip_addend
;
2252 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2253 CC_OP
= CC_OP_EFLAGS
;
2255 case 4: /* 286 call gate */
2256 case 12: /* 386 call gate */
2257 if ((dpl
< cpl
) || (dpl
< rpl
))
2258 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2259 if (!(e2
& DESC_P_MASK
))
2260 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2262 new_eip
= (e1
& 0xffff);
2264 new_eip
|= (e2
& 0xffff0000);
2265 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2266 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2267 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2268 /* must be code segment */
2269 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2270 (DESC_S_MASK
| DESC_CS_MASK
)))
2271 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2272 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2273 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2274 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2275 if (!(e2
& DESC_P_MASK
))
2276 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2277 limit
= get_seg_limit(e1
, e2
);
2278 if (new_eip
> limit
)
2279 raise_exception_err(EXCP0D_GPF
, 0);
2280 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2281 get_seg_base(e1
, e2
), limit
, e2
);
2285 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2291 /* real mode call */
2292 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
2293 int shift
, int next_eip
)
2296 uint32_t esp
, esp_mask
;
2301 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2302 ssp
= env
->segs
[R_SS
].base
;
2304 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2305 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2307 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2308 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2311 SET_ESP(esp
, esp_mask
);
2313 env
->segs
[R_CS
].selector
= new_cs
;
2314 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2317 /* protected mode call */
2318 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
2319 int shift
, int next_eip_addend
)
2322 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2323 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
2324 uint32_t val
, limit
, old_sp_mask
;
2325 target_ulong ssp
, old_ssp
, next_eip
;
2327 next_eip
= env
->eip
+ next_eip_addend
;
2328 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
2329 LOG_PCALL_STATE(env
);
2330 if ((new_cs
& 0xfffc) == 0)
2331 raise_exception_err(EXCP0D_GPF
, 0);
2332 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2333 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2334 cpl
= env
->hflags
& HF_CPL_MASK
;
2335 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
2336 if (e2
& DESC_S_MASK
) {
2337 if (!(e2
& DESC_CS_MASK
))
2338 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2339 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2340 if (e2
& DESC_C_MASK
) {
2341 /* conforming code segment */
2343 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2345 /* non conforming code segment */
2348 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2350 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2352 if (!(e2
& DESC_P_MASK
))
2353 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2355 #ifdef TARGET_X86_64
2356 /* XXX: check 16/32 bit cases in long mode */
2361 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2362 PUSHQ(rsp
, next_eip
);
2363 /* from this point, not restartable */
2365 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2366 get_seg_base(e1
, e2
),
2367 get_seg_limit(e1
, e2
), e2
);
2373 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2374 ssp
= env
->segs
[R_SS
].base
;
2376 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2377 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2379 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2380 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2383 limit
= get_seg_limit(e1
, e2
);
2384 if (new_eip
> limit
)
2385 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2386 /* from this point, not restartable */
2387 SET_ESP(sp
, sp_mask
);
2388 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2389 get_seg_base(e1
, e2
), limit
, e2
);
2393 /* check gate type */
2394 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2395 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2398 case 1: /* available 286 TSS */
2399 case 9: /* available 386 TSS */
2400 case 5: /* task gate */
2401 if (dpl
< cpl
|| dpl
< rpl
)
2402 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2403 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2404 CC_OP
= CC_OP_EFLAGS
;
2406 case 4: /* 286 call gate */
2407 case 12: /* 386 call gate */
2410 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2415 if (dpl
< cpl
|| dpl
< rpl
)
2416 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2417 /* check valid bit */
2418 if (!(e2
& DESC_P_MASK
))
2419 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2420 selector
= e1
>> 16;
2421 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2422 param_count
= e2
& 0x1f;
2423 if ((selector
& 0xfffc) == 0)
2424 raise_exception_err(EXCP0D_GPF
, 0);
2426 if (load_segment(&e1
, &e2
, selector
) != 0)
2427 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2428 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2429 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2430 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2432 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2433 if (!(e2
& DESC_P_MASK
))
2434 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2436 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2437 /* to inner privilege */
2438 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2439 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2440 ss
, sp
, param_count
, ESP
);
2441 if ((ss
& 0xfffc) == 0)
2442 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2443 if ((ss
& 3) != dpl
)
2444 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2445 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2446 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2447 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2449 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2450 if (!(ss_e2
& DESC_S_MASK
) ||
2451 (ss_e2
& DESC_CS_MASK
) ||
2452 !(ss_e2
& DESC_W_MASK
))
2453 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2454 if (!(ss_e2
& DESC_P_MASK
))
2455 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2457 // push_size = ((param_count * 2) + 8) << shift;
2459 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2460 old_ssp
= env
->segs
[R_SS
].base
;
2462 sp_mask
= get_sp_mask(ss_e2
);
2463 ssp
= get_seg_base(ss_e1
, ss_e2
);
2465 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2466 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2467 for(i
= param_count
- 1; i
>= 0; i
--) {
2468 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2469 PUSHL(ssp
, sp
, sp_mask
, val
);
2472 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2473 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2474 for(i
= param_count
- 1; i
>= 0; i
--) {
2475 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2476 PUSHW(ssp
, sp
, sp_mask
, val
);
2481 /* to same privilege */
2483 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2484 ssp
= env
->segs
[R_SS
].base
;
2485 // push_size = (4 << shift);
2490 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2491 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2493 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2494 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2497 /* from this point, not restartable */
2500 ss
= (ss
& ~3) | dpl
;
2501 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2503 get_seg_limit(ss_e1
, ss_e2
),
2507 selector
= (selector
& ~3) | dpl
;
2508 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2509 get_seg_base(e1
, e2
),
2510 get_seg_limit(e1
, e2
),
2512 cpu_x86_set_cpl(env
, dpl
);
2513 SET_ESP(sp
, sp_mask
);
2518 /* real and vm86 mode iret */
2519 void helper_iret_real(int shift
)
2521 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2525 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2527 ssp
= env
->segs
[R_SS
].base
;
2530 POPL(ssp
, sp
, sp_mask
, new_eip
);
2531 POPL(ssp
, sp
, sp_mask
, new_cs
);
2533 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2536 POPW(ssp
, sp
, sp_mask
, new_eip
);
2537 POPW(ssp
, sp
, sp_mask
, new_cs
);
2538 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2540 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2541 env
->segs
[R_CS
].selector
= new_cs
;
2542 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2544 if (env
->eflags
& VM_MASK
)
2545 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2547 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2549 eflags_mask
&= 0xffff;
2550 load_eflags(new_eflags
, eflags_mask
);
2551 env
->hflags2
&= ~HF2_NMI_MASK
;
2554 static inline void validate_seg(int seg_reg
, int cpl
)
2559 /* XXX: on x86_64, we do not want to nullify FS and GS because
2560 they may still contain a valid base. I would be interested to
2561 know how a real x86_64 CPU behaves */
2562 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2563 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2566 e2
= env
->segs
[seg_reg
].flags
;
2567 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2568 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2569 /* data or non conforming code segment */
2571 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2576 /* protected mode iret */
2577 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2579 uint32_t new_cs
, new_eflags
, new_ss
;
2580 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2581 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2582 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2583 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2585 #ifdef TARGET_X86_64
2590 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2592 ssp
= env
->segs
[R_SS
].base
;
2593 new_eflags
= 0; /* avoid warning */
2594 #ifdef TARGET_X86_64
2600 POPQ(sp
, new_eflags
);
2606 POPL(ssp
, sp
, sp_mask
, new_eip
);
2607 POPL(ssp
, sp
, sp_mask
, new_cs
);
2610 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2611 if (new_eflags
& VM_MASK
)
2612 goto return_to_vm86
;
2616 POPW(ssp
, sp
, sp_mask
, new_eip
);
2617 POPW(ssp
, sp
, sp_mask
, new_cs
);
2619 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2621 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2622 new_cs
, new_eip
, shift
, addend
);
2623 LOG_PCALL_STATE(env
);
2624 if ((new_cs
& 0xfffc) == 0)
2625 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2626 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2627 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2628 if (!(e2
& DESC_S_MASK
) ||
2629 !(e2
& DESC_CS_MASK
))
2630 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2631 cpl
= env
->hflags
& HF_CPL_MASK
;
2634 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2635 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2636 if (e2
& DESC_C_MASK
) {
2638 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2641 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2643 if (!(e2
& DESC_P_MASK
))
2644 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2647 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2648 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2649 /* return to same privilege level */
2650 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2651 get_seg_base(e1
, e2
),
2652 get_seg_limit(e1
, e2
),
2655 /* return to different privilege level */
2656 #ifdef TARGET_X86_64
2665 POPL(ssp
, sp
, sp_mask
, new_esp
);
2666 POPL(ssp
, sp
, sp_mask
, new_ss
);
2670 POPW(ssp
, sp
, sp_mask
, new_esp
);
2671 POPW(ssp
, sp
, sp_mask
, new_ss
);
2673 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2675 if ((new_ss
& 0xfffc) == 0) {
2676 #ifdef TARGET_X86_64
2677 /* NULL ss is allowed in long mode if cpl != 3*/
2678 /* XXX: test CS64 ? */
2679 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2680 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2682 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2683 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2684 DESC_W_MASK
| DESC_A_MASK
);
2685 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2689 raise_exception_err(EXCP0D_GPF
, 0);
2692 if ((new_ss
& 3) != rpl
)
2693 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2694 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2695 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2696 if (!(ss_e2
& DESC_S_MASK
) ||
2697 (ss_e2
& DESC_CS_MASK
) ||
2698 !(ss_e2
& DESC_W_MASK
))
2699 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2700 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2702 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2703 if (!(ss_e2
& DESC_P_MASK
))
2704 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2705 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2706 get_seg_base(ss_e1
, ss_e2
),
2707 get_seg_limit(ss_e1
, ss_e2
),
2711 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2712 get_seg_base(e1
, e2
),
2713 get_seg_limit(e1
, e2
),
2715 cpu_x86_set_cpl(env
, rpl
);
2717 #ifdef TARGET_X86_64
2718 if (env
->hflags
& HF_CS64_MASK
)
2722 sp_mask
= get_sp_mask(ss_e2
);
2724 /* validate data segments */
2725 validate_seg(R_ES
, rpl
);
2726 validate_seg(R_DS
, rpl
);
2727 validate_seg(R_FS
, rpl
);
2728 validate_seg(R_GS
, rpl
);
2732 SET_ESP(sp
, sp_mask
);
2735 /* NOTE: 'cpl' is the _old_ CPL */
2736 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2738 eflags_mask
|= IOPL_MASK
;
2739 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2741 eflags_mask
|= IF_MASK
;
2743 eflags_mask
&= 0xffff;
2744 load_eflags(new_eflags
, eflags_mask
);
2749 POPL(ssp
, sp
, sp_mask
, new_esp
);
2750 POPL(ssp
, sp
, sp_mask
, new_ss
);
2751 POPL(ssp
, sp
, sp_mask
, new_es
);
2752 POPL(ssp
, sp
, sp_mask
, new_ds
);
2753 POPL(ssp
, sp
, sp_mask
, new_fs
);
2754 POPL(ssp
, sp
, sp_mask
, new_gs
);
2756 /* modify processor state */
2757 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2758 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2759 load_seg_vm(R_CS
, new_cs
& 0xffff);
2760 cpu_x86_set_cpl(env
, 3);
2761 load_seg_vm(R_SS
, new_ss
& 0xffff);
2762 load_seg_vm(R_ES
, new_es
& 0xffff);
2763 load_seg_vm(R_DS
, new_ds
& 0xffff);
2764 load_seg_vm(R_FS
, new_fs
& 0xffff);
2765 load_seg_vm(R_GS
, new_gs
& 0xffff);
2767 env
->eip
= new_eip
& 0xffff;
2771 void helper_iret_protected(int shift
, int next_eip
)
2773 int tss_selector
, type
;
2776 /* specific case for TSS */
2777 if (env
->eflags
& NT_MASK
) {
2778 #ifdef TARGET_X86_64
2779 if (env
->hflags
& HF_LMA_MASK
)
2780 raise_exception_err(EXCP0D_GPF
, 0);
2782 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2783 if (tss_selector
& 4)
2784 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2785 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2786 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2787 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2788 /* NOTE: we check both segment and busy TSS */
2790 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2791 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2793 helper_ret_protected(shift
, 1, 0);
2795 env
->hflags2
&= ~HF2_NMI_MASK
;
2798 void helper_lret_protected(int shift
, int addend
)
2800 helper_ret_protected(shift
, 0, addend
);
2803 void helper_sysenter(void)
2805 if (env
->sysenter_cs
== 0) {
2806 raise_exception_err(EXCP0D_GPF
, 0);
2808 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2809 cpu_x86_set_cpl(env
, 0);
2811 #ifdef TARGET_X86_64
2812 if (env
->hflags
& HF_LMA_MASK
) {
2813 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2815 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2817 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2821 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2823 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2825 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2827 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2829 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2831 DESC_W_MASK
| DESC_A_MASK
);
2832 ESP
= env
->sysenter_esp
;
2833 EIP
= env
->sysenter_eip
;
2836 void helper_sysexit(int dflag
)
2840 cpl
= env
->hflags
& HF_CPL_MASK
;
2841 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2842 raise_exception_err(EXCP0D_GPF
, 0);
2844 cpu_x86_set_cpl(env
, 3);
2845 #ifdef TARGET_X86_64
2847 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) | 3,
2849 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2850 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2851 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
2852 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) | 3,
2854 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2855 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2856 DESC_W_MASK
| DESC_A_MASK
);
2860 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2862 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2863 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2864 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2865 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2867 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2868 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2869 DESC_W_MASK
| DESC_A_MASK
);
2875 #if defined(CONFIG_USER_ONLY)
2876 target_ulong
helper_read_crN(int reg
)
2881 void helper_write_crN(int reg
, target_ulong t0
)
2885 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2889 target_ulong
helper_read_crN(int reg
)
2893 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0
+ reg
, 0);
2899 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2900 val
= cpu_get_apic_tpr(env
->apic_state
);
2909 void helper_write_crN(int reg
, target_ulong t0
)
2911 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0
+ reg
, 0);
2914 cpu_x86_update_cr0(env
, t0
);
2917 cpu_x86_update_cr3(env
, t0
);
2920 cpu_x86_update_cr4(env
, t0
);
2923 if (!(env
->hflags2
& HF2_VINTR_MASK
)) {
2924 cpu_set_apic_tpr(env
->apic_state
, t0
);
2926 env
->v_tpr
= t0
& 0x0f;
2934 void helper_movl_drN_T0(int reg
, target_ulong t0
)
2939 hw_breakpoint_remove(env
, reg
);
2941 hw_breakpoint_insert(env
, reg
);
2942 } else if (reg
== 7) {
2943 for (i
= 0; i
< 4; i
++)
2944 hw_breakpoint_remove(env
, i
);
2946 for (i
= 0; i
< 4; i
++)
2947 hw_breakpoint_insert(env
, i
);
2953 void helper_lmsw(target_ulong t0
)
2955 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2956 if already set to one. */
2957 t0
= (env
->cr
[0] & ~0xe) | (t0
& 0xf);
2958 helper_write_crN(0, t0
);
2961 void helper_clts(void)
2963 env
->cr
[0] &= ~CR0_TS_MASK
;
2964 env
->hflags
&= ~HF_TS_MASK
;
2967 void helper_invlpg(target_ulong addr
)
2969 helper_svm_check_intercept_param(SVM_EXIT_INVLPG
, 0);
2970 tlb_flush_page(env
, addr
);
2973 void helper_rdtsc(void)
2977 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2978 raise_exception(EXCP0D_GPF
);
2980 helper_svm_check_intercept_param(SVM_EXIT_RDTSC
, 0);
2982 val
= cpu_get_tsc(env
) + env
->tsc_offset
;
2983 EAX
= (uint32_t)(val
);
2984 EDX
= (uint32_t)(val
>> 32);
2987 void helper_rdtscp(void)
2990 ECX
= (uint32_t)(env
->tsc_aux
);
2993 void helper_rdpmc(void)
2995 if ((env
->cr
[4] & CR4_PCE_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2996 raise_exception(EXCP0D_GPF
);
2998 helper_svm_check_intercept_param(SVM_EXIT_RDPMC
, 0);
3000 /* currently unimplemented */
3001 raise_exception_err(EXCP06_ILLOP
, 0);
3004 #if defined(CONFIG_USER_ONLY)
3005 void helper_wrmsr(void)
3009 void helper_rdmsr(void)
3013 void helper_wrmsr(void)
3017 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 1);
3019 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
3021 switch((uint32_t)ECX
) {
3022 case MSR_IA32_SYSENTER_CS
:
3023 env
->sysenter_cs
= val
& 0xffff;
3025 case MSR_IA32_SYSENTER_ESP
:
3026 env
->sysenter_esp
= val
;
3028 case MSR_IA32_SYSENTER_EIP
:
3029 env
->sysenter_eip
= val
;
3031 case MSR_IA32_APICBASE
:
3032 cpu_set_apic_base(env
->apic_state
, val
);
3036 uint64_t update_mask
;
3038 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
3039 update_mask
|= MSR_EFER_SCE
;
3040 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
3041 update_mask
|= MSR_EFER_LME
;
3042 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3043 update_mask
|= MSR_EFER_FFXSR
;
3044 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
3045 update_mask
|= MSR_EFER_NXE
;
3046 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
)
3047 update_mask
|= MSR_EFER_SVME
;
3048 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
3049 update_mask
|= MSR_EFER_FFXSR
;
3050 cpu_load_efer(env
, (env
->efer
& ~update_mask
) |
3051 (val
& update_mask
));
3060 case MSR_VM_HSAVE_PA
:
3061 env
->vm_hsave
= val
;
3063 #ifdef TARGET_X86_64
3074 env
->segs
[R_FS
].base
= val
;
3077 env
->segs
[R_GS
].base
= val
;
3079 case MSR_KERNELGSBASE
:
3080 env
->kernelgsbase
= val
;
3083 case MSR_MTRRphysBase(0):
3084 case MSR_MTRRphysBase(1):
3085 case MSR_MTRRphysBase(2):
3086 case MSR_MTRRphysBase(3):
3087 case MSR_MTRRphysBase(4):
3088 case MSR_MTRRphysBase(5):
3089 case MSR_MTRRphysBase(6):
3090 case MSR_MTRRphysBase(7):
3091 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
= val
;
3093 case MSR_MTRRphysMask(0):
3094 case MSR_MTRRphysMask(1):
3095 case MSR_MTRRphysMask(2):
3096 case MSR_MTRRphysMask(3):
3097 case MSR_MTRRphysMask(4):
3098 case MSR_MTRRphysMask(5):
3099 case MSR_MTRRphysMask(6):
3100 case MSR_MTRRphysMask(7):
3101 env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
= val
;
3103 case MSR_MTRRfix64K_00000
:
3104 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix64K_00000
] = val
;
3106 case MSR_MTRRfix16K_80000
:
3107 case MSR_MTRRfix16K_A0000
:
3108 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1] = val
;
3110 case MSR_MTRRfix4K_C0000
:
3111 case MSR_MTRRfix4K_C8000
:
3112 case MSR_MTRRfix4K_D0000
:
3113 case MSR_MTRRfix4K_D8000
:
3114 case MSR_MTRRfix4K_E0000
:
3115 case MSR_MTRRfix4K_E8000
:
3116 case MSR_MTRRfix4K_F0000
:
3117 case MSR_MTRRfix4K_F8000
:
3118 env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3] = val
;
3120 case MSR_MTRRdefType
:
3121 env
->mtrr_deftype
= val
;
3123 case MSR_MCG_STATUS
:
3124 env
->mcg_status
= val
;
3127 if ((env
->mcg_cap
& MCG_CTL_P
)
3128 && (val
== 0 || val
== ~(uint64_t)0))
3135 if ((uint32_t)ECX
>= MSR_MC0_CTL
3136 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3137 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3138 if ((offset
& 0x3) != 0
3139 || (val
== 0 || val
== ~(uint64_t)0))
3140 env
->mce_banks
[offset
] = val
;
3143 /* XXX: exception ? */
3148 void helper_rdmsr(void)
3152 helper_svm_check_intercept_param(SVM_EXIT_MSR
, 0);
3154 switch((uint32_t)ECX
) {
3155 case MSR_IA32_SYSENTER_CS
:
3156 val
= env
->sysenter_cs
;
3158 case MSR_IA32_SYSENTER_ESP
:
3159 val
= env
->sysenter_esp
;
3161 case MSR_IA32_SYSENTER_EIP
:
3162 val
= env
->sysenter_eip
;
3164 case MSR_IA32_APICBASE
:
3165 val
= cpu_get_apic_base(env
->apic_state
);
3176 case MSR_VM_HSAVE_PA
:
3177 val
= env
->vm_hsave
;
3179 case MSR_IA32_PERF_STATUS
:
3180 /* tsc_increment_by_tick */
3182 /* CPU multiplier */
3183 val
|= (((uint64_t)4ULL) << 40);
3185 #ifdef TARGET_X86_64
3196 val
= env
->segs
[R_FS
].base
;
3199 val
= env
->segs
[R_GS
].base
;
3201 case MSR_KERNELGSBASE
:
3202 val
= env
->kernelgsbase
;
3208 case MSR_MTRRphysBase(0):
3209 case MSR_MTRRphysBase(1):
3210 case MSR_MTRRphysBase(2):
3211 case MSR_MTRRphysBase(3):
3212 case MSR_MTRRphysBase(4):
3213 case MSR_MTRRphysBase(5):
3214 case MSR_MTRRphysBase(6):
3215 case MSR_MTRRphysBase(7):
3216 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysBase(0)) / 2].base
;
3218 case MSR_MTRRphysMask(0):
3219 case MSR_MTRRphysMask(1):
3220 case MSR_MTRRphysMask(2):
3221 case MSR_MTRRphysMask(3):
3222 case MSR_MTRRphysMask(4):
3223 case MSR_MTRRphysMask(5):
3224 case MSR_MTRRphysMask(6):
3225 case MSR_MTRRphysMask(7):
3226 val
= env
->mtrr_var
[((uint32_t)ECX
- MSR_MTRRphysMask(0)) / 2].mask
;
3228 case MSR_MTRRfix64K_00000
:
3229 val
= env
->mtrr_fixed
[0];
3231 case MSR_MTRRfix16K_80000
:
3232 case MSR_MTRRfix16K_A0000
:
3233 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix16K_80000
+ 1];
3235 case MSR_MTRRfix4K_C0000
:
3236 case MSR_MTRRfix4K_C8000
:
3237 case MSR_MTRRfix4K_D0000
:
3238 case MSR_MTRRfix4K_D8000
:
3239 case MSR_MTRRfix4K_E0000
:
3240 case MSR_MTRRfix4K_E8000
:
3241 case MSR_MTRRfix4K_F0000
:
3242 case MSR_MTRRfix4K_F8000
:
3243 val
= env
->mtrr_fixed
[(uint32_t)ECX
- MSR_MTRRfix4K_C0000
+ 3];
3245 case MSR_MTRRdefType
:
3246 val
= env
->mtrr_deftype
;
3249 if (env
->cpuid_features
& CPUID_MTRR
)
3250 val
= MSR_MTRRcap_VCNT
| MSR_MTRRcap_FIXRANGE_SUPPORT
| MSR_MTRRcap_WC_SUPPORTED
;
3252 /* XXX: exception ? */
3259 if (env
->mcg_cap
& MCG_CTL_P
)
3264 case MSR_MCG_STATUS
:
3265 val
= env
->mcg_status
;
3268 if ((uint32_t)ECX
>= MSR_MC0_CTL
3269 && (uint32_t)ECX
< MSR_MC0_CTL
+ (4 * env
->mcg_cap
& 0xff)) {
3270 uint32_t offset
= (uint32_t)ECX
- MSR_MC0_CTL
;
3271 val
= env
->mce_banks
[offset
];
3274 /* XXX: exception ? */
3278 EAX
= (uint32_t)(val
);
3279 EDX
= (uint32_t)(val
>> 32);
3283 target_ulong
helper_lsl(target_ulong selector1
)
3286 uint32_t e1
, e2
, eflags
, selector
;
3287 int rpl
, dpl
, cpl
, type
;
3289 selector
= selector1
& 0xffff;
3290 eflags
= helper_cc_compute_all(CC_OP
);
3291 if ((selector
& 0xfffc) == 0)
3293 if (load_segment(&e1
, &e2
, selector
) != 0)
3296 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3297 cpl
= env
->hflags
& HF_CPL_MASK
;
3298 if (e2
& DESC_S_MASK
) {
3299 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3302 if (dpl
< cpl
|| dpl
< rpl
)
3306 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3317 if (dpl
< cpl
|| dpl
< rpl
) {
3319 CC_SRC
= eflags
& ~CC_Z
;
3323 limit
= get_seg_limit(e1
, e2
);
3324 CC_SRC
= eflags
| CC_Z
;
3328 target_ulong
helper_lar(target_ulong selector1
)
3330 uint32_t e1
, e2
, eflags
, selector
;
3331 int rpl
, dpl
, cpl
, type
;
3333 selector
= selector1
& 0xffff;
3334 eflags
= helper_cc_compute_all(CC_OP
);
3335 if ((selector
& 0xfffc) == 0)
3337 if (load_segment(&e1
, &e2
, selector
) != 0)
3340 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3341 cpl
= env
->hflags
& HF_CPL_MASK
;
3342 if (e2
& DESC_S_MASK
) {
3343 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
3346 if (dpl
< cpl
|| dpl
< rpl
)
3350 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
3364 if (dpl
< cpl
|| dpl
< rpl
) {
3366 CC_SRC
= eflags
& ~CC_Z
;
3370 CC_SRC
= eflags
| CC_Z
;
3371 return e2
& 0x00f0ff00;
3374 void helper_verr(target_ulong selector1
)
3376 uint32_t e1
, e2
, eflags
, selector
;
3379 selector
= selector1
& 0xffff;
3380 eflags
= helper_cc_compute_all(CC_OP
);
3381 if ((selector
& 0xfffc) == 0)
3383 if (load_segment(&e1
, &e2
, selector
) != 0)
3385 if (!(e2
& DESC_S_MASK
))
3388 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3389 cpl
= env
->hflags
& HF_CPL_MASK
;
3390 if (e2
& DESC_CS_MASK
) {
3391 if (!(e2
& DESC_R_MASK
))
3393 if (!(e2
& DESC_C_MASK
)) {
3394 if (dpl
< cpl
|| dpl
< rpl
)
3398 if (dpl
< cpl
|| dpl
< rpl
) {
3400 CC_SRC
= eflags
& ~CC_Z
;
3404 CC_SRC
= eflags
| CC_Z
;
3407 void helper_verw(target_ulong selector1
)
3409 uint32_t e1
, e2
, eflags
, selector
;
3412 selector
= selector1
& 0xffff;
3413 eflags
= helper_cc_compute_all(CC_OP
);
3414 if ((selector
& 0xfffc) == 0)
3416 if (load_segment(&e1
, &e2
, selector
) != 0)
3418 if (!(e2
& DESC_S_MASK
))
3421 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3422 cpl
= env
->hflags
& HF_CPL_MASK
;
3423 if (e2
& DESC_CS_MASK
) {
3426 if (dpl
< cpl
|| dpl
< rpl
)
3428 if (!(e2
& DESC_W_MASK
)) {
3430 CC_SRC
= eflags
& ~CC_Z
;
3434 CC_SRC
= eflags
| CC_Z
;
3437 /* x87 FPU helpers */
3439 static inline double floatx80_to_double(floatx80 a
)
3446 u
.f64
= floatx80_to_float64(a
, &env
->fp_status
);
3450 static inline floatx80
double_to_floatx80(double a
)
3458 return float64_to_floatx80(u
.f64
, &env
->fp_status
);
3461 static void fpu_set_exception(int mask
)
3464 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3465 env
->fpus
|= FPUS_SE
| FPUS_B
;
3468 static inline floatx80
helper_fdiv(floatx80 a
, floatx80 b
)
3470 if (floatx80_is_zero(b
)) {
3471 fpu_set_exception(FPUS_ZE
);
3473 return floatx80_div(a
, b
, &env
->fp_status
);
3476 static void fpu_raise_exception(void)
3478 if (env
->cr
[0] & CR0_NE_MASK
) {
3479 raise_exception(EXCP10_COPR
);
3481 #if !defined(CONFIG_USER_ONLY)
3488 void helper_flds_FT0(uint32_t val
)
3495 FT0
= float32_to_floatx80(u
.f
, &env
->fp_status
);
3498 void helper_fldl_FT0(uint64_t val
)
3505 FT0
= float64_to_floatx80(u
.f
, &env
->fp_status
);
3508 void helper_fildl_FT0(int32_t val
)
3510 FT0
= int32_to_floatx80(val
, &env
->fp_status
);
3513 void helper_flds_ST0(uint32_t val
)
3520 new_fpstt
= (env
->fpstt
- 1) & 7;
3522 env
->fpregs
[new_fpstt
].d
= float32_to_floatx80(u
.f
, &env
->fp_status
);
3523 env
->fpstt
= new_fpstt
;
3524 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3527 void helper_fldl_ST0(uint64_t val
)
3534 new_fpstt
= (env
->fpstt
- 1) & 7;
3536 env
->fpregs
[new_fpstt
].d
= float64_to_floatx80(u
.f
, &env
->fp_status
);
3537 env
->fpstt
= new_fpstt
;
3538 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3541 void helper_fildl_ST0(int32_t val
)
3544 new_fpstt
= (env
->fpstt
- 1) & 7;
3545 env
->fpregs
[new_fpstt
].d
= int32_to_floatx80(val
, &env
->fp_status
);
3546 env
->fpstt
= new_fpstt
;
3547 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3550 void helper_fildll_ST0(int64_t val
)
3553 new_fpstt
= (env
->fpstt
- 1) & 7;
3554 env
->fpregs
[new_fpstt
].d
= int64_to_floatx80(val
, &env
->fp_status
);
3555 env
->fpstt
= new_fpstt
;
3556 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3559 uint32_t helper_fsts_ST0(void)
3565 u
.f
= floatx80_to_float32(ST0
, &env
->fp_status
);
3569 uint64_t helper_fstl_ST0(void)
3575 u
.f
= floatx80_to_float64(ST0
, &env
->fp_status
);
3579 int32_t helper_fist_ST0(void)
3582 val
= floatx80_to_int32(ST0
, &env
->fp_status
);
3583 if (val
!= (int16_t)val
)
3588 int32_t helper_fistl_ST0(void)
3591 val
= floatx80_to_int32(ST0
, &env
->fp_status
);
3595 int64_t helper_fistll_ST0(void)
3598 val
= floatx80_to_int64(ST0
, &env
->fp_status
);
3602 int32_t helper_fistt_ST0(void)
3605 val
= floatx80_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3606 if (val
!= (int16_t)val
)
3611 int32_t helper_fisttl_ST0(void)
3614 val
= floatx80_to_int32_round_to_zero(ST0
, &env
->fp_status
);
3618 int64_t helper_fisttll_ST0(void)
3621 val
= floatx80_to_int64_round_to_zero(ST0
, &env
->fp_status
);
3625 void helper_fldt_ST0(target_ulong ptr
)
3628 new_fpstt
= (env
->fpstt
- 1) & 7;
3629 env
->fpregs
[new_fpstt
].d
= helper_fldt(ptr
);
3630 env
->fpstt
= new_fpstt
;
3631 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3634 void helper_fstt_ST0(target_ulong ptr
)
3636 helper_fstt(ST0
, ptr
);
3639 void helper_fpush(void)
3644 void helper_fpop(void)
3649 void helper_fdecstp(void)
3651 env
->fpstt
= (env
->fpstt
- 1) & 7;
3652 env
->fpus
&= (~0x4700);
3655 void helper_fincstp(void)
3657 env
->fpstt
= (env
->fpstt
+ 1) & 7;
3658 env
->fpus
&= (~0x4700);
3663 void helper_ffree_STN(int st_index
)
3665 env
->fptags
[(env
->fpstt
+ st_index
) & 7] = 1;
3668 void helper_fmov_ST0_FT0(void)
3673 void helper_fmov_FT0_STN(int st_index
)
3678 void helper_fmov_ST0_STN(int st_index
)
3683 void helper_fmov_STN_ST0(int st_index
)
3688 void helper_fxchg_ST0_STN(int st_index
)
3696 /* FPU operations */
3698 static const int fcom_ccval
[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3700 void helper_fcom_ST0_FT0(void)
3704 ret
= floatx80_compare(ST0
, FT0
, &env
->fp_status
);
3705 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3708 void helper_fucom_ST0_FT0(void)
3712 ret
= floatx80_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3713 env
->fpus
= (env
->fpus
& ~0x4500) | fcom_ccval
[ret
+ 1];
3716 static const int fcomi_ccval
[4] = {CC_C
, CC_Z
, 0, CC_Z
| CC_P
| CC_C
};
3718 void helper_fcomi_ST0_FT0(void)
3723 ret
= floatx80_compare(ST0
, FT0
, &env
->fp_status
);
3724 eflags
= helper_cc_compute_all(CC_OP
);
3725 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3729 void helper_fucomi_ST0_FT0(void)
3734 ret
= floatx80_compare_quiet(ST0
, FT0
, &env
->fp_status
);
3735 eflags
= helper_cc_compute_all(CC_OP
);
3736 eflags
= (eflags
& ~(CC_Z
| CC_P
| CC_C
)) | fcomi_ccval
[ret
+ 1];
3740 void helper_fadd_ST0_FT0(void)
3742 ST0
= floatx80_add(ST0
, FT0
, &env
->fp_status
);
3745 void helper_fmul_ST0_FT0(void)
3747 ST0
= floatx80_mul(ST0
, FT0
, &env
->fp_status
);
3750 void helper_fsub_ST0_FT0(void)
3752 ST0
= floatx80_sub(ST0
, FT0
, &env
->fp_status
);
3755 void helper_fsubr_ST0_FT0(void)
3757 ST0
= floatx80_sub(FT0
, ST0
, &env
->fp_status
);
3760 void helper_fdiv_ST0_FT0(void)
3762 ST0
= helper_fdiv(ST0
, FT0
);
3765 void helper_fdivr_ST0_FT0(void)
3767 ST0
= helper_fdiv(FT0
, ST0
);
3770 /* fp operations between STN and ST0 */
3772 void helper_fadd_STN_ST0(int st_index
)
3774 ST(st_index
) = floatx80_add(ST(st_index
), ST0
, &env
->fp_status
);
3777 void helper_fmul_STN_ST0(int st_index
)
3779 ST(st_index
) = floatx80_mul(ST(st_index
), ST0
, &env
->fp_status
);
3782 void helper_fsub_STN_ST0(int st_index
)
3784 ST(st_index
) = floatx80_sub(ST(st_index
), ST0
, &env
->fp_status
);
3787 void helper_fsubr_STN_ST0(int st_index
)
3789 ST(st_index
) = floatx80_sub(ST0
, ST(st_index
), &env
->fp_status
);
3792 void helper_fdiv_STN_ST0(int st_index
)
3796 *p
= helper_fdiv(*p
, ST0
);
3799 void helper_fdivr_STN_ST0(int st_index
)
3803 *p
= helper_fdiv(ST0
, *p
);
3806 /* misc FPU operations */
3807 void helper_fchs_ST0(void)
3809 ST0
= floatx80_chs(ST0
);
3812 void helper_fabs_ST0(void)
3814 ST0
= floatx80_abs(ST0
);
3817 void helper_fld1_ST0(void)
3822 void helper_fldl2t_ST0(void)
3827 void helper_fldl2e_ST0(void)
3832 void helper_fldpi_ST0(void)
3837 void helper_fldlg2_ST0(void)
3842 void helper_fldln2_ST0(void)
3847 void helper_fldz_ST0(void)
3852 void helper_fldz_FT0(void)
3857 uint32_t helper_fnstsw(void)
3859 return (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3862 uint32_t helper_fnstcw(void)
3867 static void update_fp_status(void)
3871 /* set rounding mode */
3872 switch(env
->fpuc
& RC_MASK
) {
3875 rnd_type
= float_round_nearest_even
;
3878 rnd_type
= float_round_down
;
3881 rnd_type
= float_round_up
;
3884 rnd_type
= float_round_to_zero
;
3887 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3888 switch((env
->fpuc
>> 8) & 3) {
3900 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3903 void helper_fldcw(uint32_t val
)
3909 void helper_fclex(void)
3911 env
->fpus
&= 0x7f00;
3914 void helper_fwait(void)
3916 if (env
->fpus
& FPUS_SE
)
3917 fpu_raise_exception();
3920 void helper_fninit(void)
3937 void helper_fbld_ST0(target_ulong ptr
)
3945 for(i
= 8; i
>= 0; i
--) {
3947 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3949 tmp
= int64_to_floatx80(val
, &env
->fp_status
);
3950 if (ldub(ptr
+ 9) & 0x80) {
3957 void helper_fbst_ST0(target_ulong ptr
)
3960 target_ulong mem_ref
, mem_end
;
3963 val
= floatx80_to_int64(ST0
, &env
->fp_status
);
3965 mem_end
= mem_ref
+ 9;
3972 while (mem_ref
< mem_end
) {
3977 v
= ((v
/ 10) << 4) | (v
% 10);
3980 while (mem_ref
< mem_end
) {
3985 void helper_f2xm1(void)
3987 double val
= floatx80_to_double(ST0
);
3988 val
= pow(2.0, val
) - 1.0;
3989 ST0
= double_to_floatx80(val
);
3992 void helper_fyl2x(void)
3994 double fptemp
= floatx80_to_double(ST0
);
3997 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3998 fptemp
*= floatx80_to_double(ST1
);
3999 ST1
= double_to_floatx80(fptemp
);
4002 env
->fpus
&= (~0x4700);
4007 void helper_fptan(void)
4009 double fptemp
= floatx80_to_double(ST0
);
4011 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4014 fptemp
= tan(fptemp
);
4015 ST0
= double_to_floatx80(fptemp
);
4018 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4019 /* the above code is for |arg| < 2**52 only */
4023 void helper_fpatan(void)
4025 double fptemp
, fpsrcop
;
4027 fpsrcop
= floatx80_to_double(ST1
);
4028 fptemp
= floatx80_to_double(ST0
);
4029 ST1
= double_to_floatx80(atan2(fpsrcop
, fptemp
));
4033 void helper_fxtract(void)
4039 if (floatx80_is_zero(ST0
)) {
4040 /* Easy way to generate -inf and raising division by 0 exception */
4041 ST0
= floatx80_div(floatx80_chs(floatx80_one
), floatx80_zero
, &env
->fp_status
);
4047 expdif
= EXPD(temp
) - EXPBIAS
;
4048 /*DP exponent bias*/
4049 ST0
= int32_to_floatx80(expdif
, &env
->fp_status
);
4056 void helper_fprem1(void)
4058 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4059 CPU_LDoubleU fpsrcop1
, fptemp1
;
4061 signed long long int q
;
4063 st0
= floatx80_to_double(ST0
);
4064 st1
= floatx80_to_double(ST1
);
4066 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4067 ST0
= double_to_floatx80(0.0 / 0.0); /* NaN */
4068 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4076 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4079 /* optimisation? taken from the AMD docs */
4080 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4081 /* ST0 is unchanged */
4086 dblq
= fpsrcop
/ fptemp
;
4087 /* round dblq towards nearest integer */
4089 st0
= fpsrcop
- fptemp
* dblq
;
4091 /* convert dblq to q by truncating towards zero */
4093 q
= (signed long long int)(-dblq
);
4095 q
= (signed long long int)dblq
;
4097 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4098 /* (C0,C3,C1) <-- (q2,q1,q0) */
4099 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4100 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4101 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4103 env
->fpus
|= 0x400; /* C2 <-- 1 */
4104 fptemp
= pow(2.0, expdif
- 50);
4105 fpsrcop
= (st0
/ st1
) / fptemp
;
4106 /* fpsrcop = integer obtained by chopping */
4107 fpsrcop
= (fpsrcop
< 0.0) ?
4108 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4109 st0
-= (st1
* fpsrcop
* fptemp
);
4111 ST0
= double_to_floatx80(st0
);
4114 void helper_fprem(void)
4116 double st0
, st1
, dblq
, fpsrcop
, fptemp
;
4117 CPU_LDoubleU fpsrcop1
, fptemp1
;
4119 signed long long int q
;
4121 st0
= floatx80_to_double(ST0
);
4122 st1
= floatx80_to_double(ST1
);
4124 if (isinf(st0
) || isnan(st0
) || isnan(st1
) || (st1
== 0.0)) {
4125 ST0
= double_to_floatx80(0.0 / 0.0); /* NaN */
4126 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4134 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
4137 /* optimisation? taken from the AMD docs */
4138 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4139 /* ST0 is unchanged */
4143 if ( expdif
< 53 ) {
4144 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
4145 /* round dblq towards zero */
4146 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
4147 st0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
4149 /* convert dblq to q by truncating towards zero */
4151 q
= (signed long long int)(-dblq
);
4153 q
= (signed long long int)dblq
;
4155 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4156 /* (C0,C3,C1) <-- (q2,q1,q0) */
4157 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
4158 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
4159 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
4161 int N
= 32 + (expdif
% 32); /* as per AMD docs */
4162 env
->fpus
|= 0x400; /* C2 <-- 1 */
4163 fptemp
= pow(2.0, (double)(expdif
- N
));
4164 fpsrcop
= (st0
/ st1
) / fptemp
;
4165 /* fpsrcop = integer obtained by chopping */
4166 fpsrcop
= (fpsrcop
< 0.0) ?
4167 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
4168 st0
-= (st1
* fpsrcop
* fptemp
);
4170 ST0
= double_to_floatx80(st0
);
4173 void helper_fyl2xp1(void)
4175 double fptemp
= floatx80_to_double(ST0
);
4177 if ((fptemp
+1.0)>0.0) {
4178 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
4179 fptemp
*= floatx80_to_double(ST1
);
4180 ST1
= double_to_floatx80(fptemp
);
4183 env
->fpus
&= (~0x4700);
4188 void helper_fsqrt(void)
4190 if (floatx80_is_neg(ST0
)) {
4191 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4194 ST0
= floatx80_sqrt(ST0
, &env
->fp_status
);
4197 void helper_fsincos(void)
4199 double fptemp
= floatx80_to_double(ST0
);
4201 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4204 ST0
= double_to_floatx80(sin(fptemp
));
4206 ST0
= double_to_floatx80(cos(fptemp
));
4207 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4208 /* the above code is for |arg| < 2**63 only */
4212 void helper_frndint(void)
4214 ST0
= floatx80_round_to_int(ST0
, &env
->fp_status
);
4217 void helper_fscale(void)
4219 if (floatx80_is_any_nan(ST1
)) {
4222 int n
= floatx80_to_int32_round_to_zero(ST1
, &env
->fp_status
);
4223 ST0
= floatx80_scalbn(ST0
, n
, &env
->fp_status
);
4227 void helper_fsin(void)
4229 double fptemp
= floatx80_to_double(ST0
);
4231 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4234 ST0
= double_to_floatx80(sin(fptemp
));
4235 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4236 /* the above code is for |arg| < 2**53 only */
4240 void helper_fcos(void)
4242 double fptemp
= floatx80_to_double(ST0
);
4244 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
4247 ST0
= double_to_floatx80(cos(fptemp
));
4248 env
->fpus
&= (~0x400); /* C2 <-- 0 */
4249 /* the above code is for |arg5 < 2**63 only */
4253 void helper_fxam_ST0(void)
4260 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4262 env
->fpus
|= 0x200; /* C1 <-- 1 */
4264 /* XXX: test fptags too */
4265 expdif
= EXPD(temp
);
4266 if (expdif
== MAXEXPD
) {
4267 if (MANTD(temp
) == 0x8000000000000000ULL
)
4268 env
->fpus
|= 0x500 /*Infinity*/;
4270 env
->fpus
|= 0x100 /*NaN*/;
4271 } else if (expdif
== 0) {
4272 if (MANTD(temp
) == 0)
4273 env
->fpus
|= 0x4000 /*Zero*/;
4275 env
->fpus
|= 0x4400 /*Denormal*/;
4281 void helper_fstenv(target_ulong ptr
, int data32
)
4283 int fpus
, fptag
, exp
, i
;
4287 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4289 for (i
=7; i
>=0; i
--) {
4291 if (env
->fptags
[i
]) {
4294 tmp
.d
= env
->fpregs
[i
].d
;
4297 if (exp
== 0 && mant
== 0) {
4300 } else if (exp
== 0 || exp
== MAXEXPD
4301 || (mant
& (1LL << 63)) == 0
4303 /* NaNs, infinity, denormal */
4310 stl(ptr
, env
->fpuc
);
4312 stl(ptr
+ 8, fptag
);
4313 stl(ptr
+ 12, 0); /* fpip */
4314 stl(ptr
+ 16, 0); /* fpcs */
4315 stl(ptr
+ 20, 0); /* fpoo */
4316 stl(ptr
+ 24, 0); /* fpos */
4319 stw(ptr
, env
->fpuc
);
4321 stw(ptr
+ 4, fptag
);
4329 void helper_fldenv(target_ulong ptr
, int data32
)
4334 env
->fpuc
= lduw(ptr
);
4335 fpus
= lduw(ptr
+ 4);
4336 fptag
= lduw(ptr
+ 8);
4339 env
->fpuc
= lduw(ptr
);
4340 fpus
= lduw(ptr
+ 2);
4341 fptag
= lduw(ptr
+ 4);
4343 env
->fpstt
= (fpus
>> 11) & 7;
4344 env
->fpus
= fpus
& ~0x3800;
4345 for(i
= 0;i
< 8; i
++) {
4346 env
->fptags
[i
] = ((fptag
& 3) == 3);
4351 void helper_fsave(target_ulong ptr
, int data32
)
4356 helper_fstenv(ptr
, data32
);
4358 ptr
+= (14 << data32
);
4359 for(i
= 0;i
< 8; i
++) {
4361 helper_fstt(tmp
, ptr
);
4379 void helper_frstor(target_ulong ptr
, int data32
)
4384 helper_fldenv(ptr
, data32
);
4385 ptr
+= (14 << data32
);
4387 for(i
= 0;i
< 8; i
++) {
4388 tmp
= helper_fldt(ptr
);
4394 void helper_fxsave(target_ulong ptr
, int data64
)
4396 int fpus
, fptag
, i
, nb_xmm_regs
;
4400 /* The operand must be 16 byte aligned */
4402 raise_exception(EXCP0D_GPF
);
4405 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
4407 for(i
= 0; i
< 8; i
++) {
4408 fptag
|= (env
->fptags
[i
] << i
);
4410 stw(ptr
, env
->fpuc
);
4412 stw(ptr
+ 4, fptag
^ 0xff);
4413 #ifdef TARGET_X86_64
4415 stq(ptr
+ 0x08, 0); /* rip */
4416 stq(ptr
+ 0x10, 0); /* rdp */
4420 stl(ptr
+ 0x08, 0); /* eip */
4421 stl(ptr
+ 0x0c, 0); /* sel */
4422 stl(ptr
+ 0x10, 0); /* dp */
4423 stl(ptr
+ 0x14, 0); /* sel */
4427 for(i
= 0;i
< 8; i
++) {
4429 helper_fstt(tmp
, addr
);
4433 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4434 /* XXX: finish it */
4435 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
4436 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
4437 if (env
->hflags
& HF_CS64_MASK
)
4442 /* Fast FXSAVE leaves out the XMM registers */
4443 if (!(env
->efer
& MSR_EFER_FFXSR
)
4444 || (env
->hflags
& HF_CPL_MASK
)
4445 || !(env
->hflags
& HF_LMA_MASK
)) {
4446 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4447 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
4448 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
4455 void helper_fxrstor(target_ulong ptr
, int data64
)
4457 int i
, fpus
, fptag
, nb_xmm_regs
;
4461 /* The operand must be 16 byte aligned */
4463 raise_exception(EXCP0D_GPF
);
4466 env
->fpuc
= lduw(ptr
);
4467 fpus
= lduw(ptr
+ 2);
4468 fptag
= lduw(ptr
+ 4);
4469 env
->fpstt
= (fpus
>> 11) & 7;
4470 env
->fpus
= fpus
& ~0x3800;
4472 for(i
= 0;i
< 8; i
++) {
4473 env
->fptags
[i
] = ((fptag
>> i
) & 1);
4477 for(i
= 0;i
< 8; i
++) {
4478 tmp
= helper_fldt(addr
);
4483 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
4484 /* XXX: finish it */
4485 env
->mxcsr
= ldl(ptr
+ 0x18);
4487 if (env
->hflags
& HF_CS64_MASK
)
4492 /* Fast FXRESTORE leaves out the XMM registers */
4493 if (!(env
->efer
& MSR_EFER_FFXSR
)
4494 || (env
->hflags
& HF_CPL_MASK
)
4495 || !(env
->hflags
& HF_LMA_MASK
)) {
4496 for(i
= 0; i
< nb_xmm_regs
; i
++) {
4497 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
4498 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
4505 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, floatx80 f
)
4510 *pmant
= temp
.l
.lower
;
4511 *pexp
= temp
.l
.upper
;
4514 floatx80
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
4518 temp
.l
.upper
= upper
;
4519 temp
.l
.lower
= mant
;
4523 #ifdef TARGET_X86_64
4525 //#define DEBUG_MULDIV
4527 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
4536 static void neg128(uint64_t *plow
, uint64_t *phigh
)
4540 add128(plow
, phigh
, 1, 0);
4543 /* return TRUE if overflow */
4544 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
4546 uint64_t q
, r
, a1
, a0
;
4559 /* XXX: use a better algorithm */
4560 for(i
= 0; i
< 64; i
++) {
4562 a1
= (a1
<< 1) | (a0
>> 63);
4563 if (ab
|| a1
>= b
) {
4569 a0
= (a0
<< 1) | qb
;
4571 #if defined(DEBUG_MULDIV)
4572 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
4573 *phigh
, *plow
, b
, a0
, a1
);
4581 /* return TRUE if overflow */
4582 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
4585 sa
= ((int64_t)*phigh
< 0);
4587 neg128(plow
, phigh
);
4591 if (div64(plow
, phigh
, b
) != 0)
4594 if (*plow
> (1ULL << 63))
4598 if (*plow
>= (1ULL << 63))
4606 void helper_mulq_EAX_T0(target_ulong t0
)
4610 mulu64(&r0
, &r1
, EAX
, t0
);
4617 void helper_imulq_EAX_T0(target_ulong t0
)
4621 muls64(&r0
, &r1
, EAX
, t0
);
4625 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4628 target_ulong
helper_imulq_T0_T1(target_ulong t0
, target_ulong t1
)
4632 muls64(&r0
, &r1
, t0
, t1
);
4634 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
4638 void helper_divq_EAX(target_ulong t0
)
4642 raise_exception(EXCP00_DIVZ
);
4646 if (div64(&r0
, &r1
, t0
))
4647 raise_exception(EXCP00_DIVZ
);
4652 void helper_idivq_EAX(target_ulong t0
)
4656 raise_exception(EXCP00_DIVZ
);
4660 if (idiv64(&r0
, &r1
, t0
))
4661 raise_exception(EXCP00_DIVZ
);
4667 static void do_hlt(void)
4669 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
4671 env
->exception_index
= EXCP_HLT
;
4675 void helper_hlt(int next_eip_addend
)
4677 helper_svm_check_intercept_param(SVM_EXIT_HLT
, 0);
4678 EIP
+= next_eip_addend
;
4683 void helper_monitor(target_ulong ptr
)
4685 if ((uint32_t)ECX
!= 0)
4686 raise_exception(EXCP0D_GPF
);
4687 /* XXX: store address ? */
4688 helper_svm_check_intercept_param(SVM_EXIT_MONITOR
, 0);
4691 void helper_mwait(int next_eip_addend
)
4693 if ((uint32_t)ECX
!= 0)
4694 raise_exception(EXCP0D_GPF
);
4695 helper_svm_check_intercept_param(SVM_EXIT_MWAIT
, 0);
4696 EIP
+= next_eip_addend
;
4698 /* XXX: not complete but not completely erroneous */
4699 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
4700 /* more than one CPU: do not sleep because another CPU may
4707 void helper_debug(void)
4709 env
->exception_index
= EXCP_DEBUG
;
4713 void helper_reset_rf(void)
4715 env
->eflags
&= ~RF_MASK
;
4718 void helper_raise_interrupt(int intno
, int next_eip_addend
)
4720 raise_interrupt(intno
, 1, 0, next_eip_addend
);
4723 void helper_raise_exception(int exception_index
)
4725 raise_exception(exception_index
);
4728 void helper_cli(void)
4730 env
->eflags
&= ~IF_MASK
;
4733 void helper_sti(void)
4735 env
->eflags
|= IF_MASK
;
4739 /* vm86plus instructions */
4740 void helper_cli_vm(void)
4742 env
->eflags
&= ~VIF_MASK
;
4745 void helper_sti_vm(void)
4747 env
->eflags
|= VIF_MASK
;
4748 if (env
->eflags
& VIP_MASK
) {
4749 raise_exception(EXCP0D_GPF
);
4754 void helper_set_inhibit_irq(void)
4756 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
4759 void helper_reset_inhibit_irq(void)
4761 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4764 void helper_boundw(target_ulong a0
, int v
)
4768 high
= ldsw(a0
+ 2);
4770 if (v
< low
|| v
> high
) {
4771 raise_exception(EXCP05_BOUND
);
4775 void helper_boundl(target_ulong a0
, int v
)
4780 if (v
< low
|| v
> high
) {
4781 raise_exception(EXCP05_BOUND
);
4785 #if !defined(CONFIG_USER_ONLY)
4787 #define MMUSUFFIX _mmu
4790 #include "softmmu_template.h"
4793 #include "softmmu_template.h"
4796 #include "softmmu_template.h"
4799 #include "softmmu_template.h"
4803 #if !defined(CONFIG_USER_ONLY)
4804 /* try to fill the TLB and return an exception if error. If retaddr is
4805 NULL, it means that the function was called in C code (i.e. not
4806 from generated code or from helper.c) */
4807 /* XXX: fix it to restore all registers */
4808 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
4810 TranslationBlock
*tb
;
4813 CPUX86State
*saved_env
;
4815 /* XXX: hack to restore env in all cases, even if not called from
4818 env
= cpu_single_env
;
4820 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
4823 /* now we have a real cpu fault */
4824 pc
= (unsigned long)retaddr
;
4825 tb
= tb_find_pc(pc
);
4827 /* the PC is inside the translated code. It means that we have
4828 a virtual CPU fault */
4829 cpu_restore_state(tb
, env
, pc
);
4832 raise_exception_err(env
->exception_index
, env
->error_code
);
4838 /* Secure Virtual Machine helpers */
4840 #if defined(CONFIG_USER_ONLY)
4842 void helper_vmrun(int aflag
, int next_eip_addend
)
4845 void helper_vmmcall(void)
4848 void helper_vmload(int aflag
)
4851 void helper_vmsave(int aflag
)
4854 void helper_stgi(void)
4857 void helper_clgi(void)
4860 void helper_skinit(void)
4863 void helper_invlpga(int aflag
)
4866 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
4869 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
4873 void helper_svm_check_io(uint32_t port
, uint32_t param
,
4874 uint32_t next_eip_addend
)
4879 static inline void svm_save_seg(target_phys_addr_t addr
,
4880 const SegmentCache
*sc
)
4882 stw_phys(addr
+ offsetof(struct vmcb_seg
, selector
),
4884 stq_phys(addr
+ offsetof(struct vmcb_seg
, base
),
4886 stl_phys(addr
+ offsetof(struct vmcb_seg
, limit
),
4888 stw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
),
4889 ((sc
->flags
>> 8) & 0xff) | ((sc
->flags
>> 12) & 0x0f00));
4892 static inline void svm_load_seg(target_phys_addr_t addr
, SegmentCache
*sc
)
4896 sc
->selector
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, selector
));
4897 sc
->base
= ldq_phys(addr
+ offsetof(struct vmcb_seg
, base
));
4898 sc
->limit
= ldl_phys(addr
+ offsetof(struct vmcb_seg
, limit
));
4899 flags
= lduw_phys(addr
+ offsetof(struct vmcb_seg
, attrib
));
4900 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
4903 static inline void svm_load_seg_cache(target_phys_addr_t addr
,
4904 CPUState
*env
, int seg_reg
)
4906 SegmentCache sc1
, *sc
= &sc1
;
4907 svm_load_seg(addr
, sc
);
4908 cpu_x86_load_seg_cache(env
, seg_reg
, sc
->selector
,
4909 sc
->base
, sc
->limit
, sc
->flags
);
4912 void helper_vmrun(int aflag
, int next_eip_addend
)
4918 helper_svm_check_intercept_param(SVM_EXIT_VMRUN
, 0);
4923 addr
= (uint32_t)EAX
;
4925 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
4927 env
->vm_vmcb
= addr
;
4929 /* save the current CPU state in the hsave page */
4930 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4931 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4933 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4934 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4936 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4937 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4938 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4939 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4940 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4941 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4943 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4944 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4946 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
4948 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
4950 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
4952 svm_save_seg(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
4955 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
4956 EIP
+ next_eip_addend
);
4957 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4958 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4960 /* load the interception bitmaps so we do not need to access the
4962 env
->intercept
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
));
4963 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4964 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4965 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4966 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4967 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4969 /* enable intercepts */
4970 env
->hflags
|= HF_SVMI_MASK
;
4972 env
->tsc_offset
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tsc_offset
));
4974 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4975 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4977 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4978 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4980 /* clear exit_info_2 so we behave like the real hardware */
4981 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4983 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4984 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4985 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4986 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4987 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4988 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
4989 if (int_ctl
& V_INTR_MASKING_MASK
) {
4990 env
->v_tpr
= int_ctl
& V_TPR_MASK
;
4991 env
->hflags2
|= HF2_VINTR_MASK
;
4992 if (env
->eflags
& IF_MASK
)
4993 env
->hflags2
|= HF2_HIF_MASK
;
4997 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
4999 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
5000 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5001 CC_OP
= CC_OP_EFLAGS
;
5003 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5005 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5007 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5009 svm_load_seg_cache(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5012 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
5014 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
5015 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
5016 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
5017 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
5018 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
5020 /* FIXME: guest state consistency checks */
5022 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
5023 case TLB_CONTROL_DO_NOTHING
:
5025 case TLB_CONTROL_FLUSH_ALL_ASID
:
5026 /* FIXME: this is not 100% correct but should work for now */
5031 env
->hflags2
|= HF2_GIF_MASK
;
5033 if (int_ctl
& V_IRQ_MASK
) {
5034 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
5037 /* maybe we need to inject an event */
5038 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
5039 if (event_inj
& SVM_EVTINJ_VALID
) {
5040 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
5041 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
5042 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
5044 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
5045 /* FIXME: need to implement valid_err */
5046 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
5047 case SVM_EVTINJ_TYPE_INTR
:
5048 env
->exception_index
= vector
;
5049 env
->error_code
= event_inj_err
;
5050 env
->exception_is_int
= 0;
5051 env
->exception_next_eip
= -1;
5052 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
5053 /* XXX: is it always correct ? */
5054 do_interrupt(vector
, 0, 0, 0, 1);
5056 case SVM_EVTINJ_TYPE_NMI
:
5057 env
->exception_index
= EXCP02_NMI
;
5058 env
->error_code
= event_inj_err
;
5059 env
->exception_is_int
= 0;
5060 env
->exception_next_eip
= EIP
;
5061 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
5064 case SVM_EVTINJ_TYPE_EXEPT
:
5065 env
->exception_index
= vector
;
5066 env
->error_code
= event_inj_err
;
5067 env
->exception_is_int
= 0;
5068 env
->exception_next_eip
= -1;
5069 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
5072 case SVM_EVTINJ_TYPE_SOFT
:
5073 env
->exception_index
= vector
;
5074 env
->error_code
= event_inj_err
;
5075 env
->exception_is_int
= 1;
5076 env
->exception_next_eip
= EIP
;
5077 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
5081 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
5085 void helper_vmmcall(void)
5087 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL
, 0);
5088 raise_exception(EXCP06_ILLOP
);
5091 void helper_vmload(int aflag
)
5094 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD
, 0);
5099 addr
= (uint32_t)EAX
;
5101 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5102 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5103 env
->segs
[R_FS
].base
);
5105 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.fs
),
5107 svm_load_seg_cache(addr
+ offsetof(struct vmcb
, save
.gs
),
5109 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5111 svm_load_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5114 #ifdef TARGET_X86_64
5115 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
5116 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
5117 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
5118 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
5120 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
5121 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
5122 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
5123 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
5126 void helper_vmsave(int aflag
)
5129 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE
, 0);
5134 addr
= (uint32_t)EAX
;
5136 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
5137 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
5138 env
->segs
[R_FS
].base
);
5140 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.fs
),
5142 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.gs
),
5144 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.tr
),
5146 svm_save_seg(addr
+ offsetof(struct vmcb
, save
.ldtr
),
5149 #ifdef TARGET_X86_64
5150 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
5151 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
5152 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
5153 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
5155 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
5156 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
5157 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
5158 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
5161 void helper_stgi(void)
5163 helper_svm_check_intercept_param(SVM_EXIT_STGI
, 0);
5164 env
->hflags2
|= HF2_GIF_MASK
;
5167 void helper_clgi(void)
5169 helper_svm_check_intercept_param(SVM_EXIT_CLGI
, 0);
5170 env
->hflags2
&= ~HF2_GIF_MASK
;
5173 void helper_skinit(void)
5175 helper_svm_check_intercept_param(SVM_EXIT_SKINIT
, 0);
5176 /* XXX: not implemented */
5177 raise_exception(EXCP06_ILLOP
);
5180 void helper_invlpga(int aflag
)
5183 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA
, 0);
5188 addr
= (uint32_t)EAX
;
5190 /* XXX: could use the ASID to see if it is needed to do the
5192 tlb_flush_page(env
, addr
);
5195 void helper_svm_check_intercept_param(uint32_t type
, uint64_t param
)
5197 if (likely(!(env
->hflags
& HF_SVMI_MASK
)))
5200 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
5201 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
5202 helper_vmexit(type
, param
);
5205 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
5206 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
5207 helper_vmexit(type
, param
);
5210 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
5211 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
5212 helper_vmexit(type
, param
);
5215 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
5216 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
5217 helper_vmexit(type
, param
);
5220 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
5221 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
5222 helper_vmexit(type
, param
);
5226 if (env
->intercept
& (1ULL << (SVM_EXIT_MSR
- SVM_EXIT_INTR
))) {
5227 /* FIXME: this should be read in at vmrun (faster this way?) */
5228 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
5230 switch((uint32_t)ECX
) {
5235 case 0xc0000000 ... 0xc0001fff:
5236 t0
= (8192 + ECX
- 0xc0000000) * 2;
5240 case 0xc0010000 ... 0xc0011fff:
5241 t0
= (16384 + ECX
- 0xc0010000) * 2;
5246 helper_vmexit(type
, param
);
5251 if (ldub_phys(addr
+ t1
) & ((1 << param
) << t0
))
5252 helper_vmexit(type
, param
);
5256 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
5257 helper_vmexit(type
, param
);
5263 void helper_svm_check_io(uint32_t port
, uint32_t param
,
5264 uint32_t next_eip_addend
)
5266 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
5267 /* FIXME: this should be read in at vmrun (faster this way?) */
5268 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
5269 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
5270 if(lduw_phys(addr
+ port
/ 8) & (mask
<< (port
& 7))) {
5272 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
5273 env
->eip
+ next_eip_addend
);
5274 helper_vmexit(SVM_EXIT_IOIO
, param
| (port
<< 16));
5279 /* Note: currently only 32 bits of exit_code are used */
5280 void helper_vmexit(uint32_t exit_code
, uint64_t exit_info_1
)
5284 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
5285 exit_code
, exit_info_1
,
5286 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
5289 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
5290 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
5291 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
5293 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
5296 /* Save the VM state in the vmcb */
5297 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
5299 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
5301 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
5303 svm_save_seg(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
5306 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
5307 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
5309 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
5310 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
5312 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
5313 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
5314 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
5315 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
5316 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
5318 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
5319 int_ctl
&= ~(V_TPR_MASK
| V_IRQ_MASK
);
5320 int_ctl
|= env
->v_tpr
& V_TPR_MASK
;
5321 if (env
->interrupt_request
& CPU_INTERRUPT_VIRQ
)
5322 int_ctl
|= V_IRQ_MASK
;
5323 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
5325 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
5326 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
5327 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
5328 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
5329 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
5330 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
5331 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
5333 /* Reload the host state from vm_hsave */
5334 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
5335 env
->hflags
&= ~HF_SVMI_MASK
;
5337 env
->intercept_exceptions
= 0;
5338 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
5339 env
->tsc_offset
= 0;
5341 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
5342 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
5344 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
5345 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
5347 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
5348 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
5349 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
5350 /* we need to set the efer after the crs so the hidden flags get
5353 ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
)));
5355 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
5356 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
5357 CC_OP
= CC_OP_EFLAGS
;
5359 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
5361 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
5363 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
5365 svm_load_seg_cache(env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
5368 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
5369 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
5370 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
5372 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
5373 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
5376 cpu_x86_set_cpl(env
, 0);
5377 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
5378 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
5380 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
5381 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
)));
5382 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
5383 ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
)));
5384 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
5386 env
->hflags2
&= ~HF2_GIF_MASK
;
5387 /* FIXME: Resets the current ASID register to zero (host ASID). */
5389 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5391 /* Clears the TSC_OFFSET inside the processor. */
5393 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5394 from the page table indicated the host's CR3. If the PDPEs contain
5395 illegal state, the processor causes a shutdown. */
5397 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5398 env
->cr
[0] |= CR0_PE_MASK
;
5399 env
->eflags
&= ~VM_MASK
;
5401 /* Disables all breakpoints in the host DR7 register. */
5403 /* Checks the reloaded host state for consistency. */
5405 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5406 host's code segment or non-canonical (in the case of long mode), a
5407 #GP fault is delivered inside the host.) */
5409 /* remove any pending exception */
5410 env
->exception_index
= -1;
5411 env
->error_code
= 0;
5412 env
->old_exception
= -1;
5420 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5421 void helper_enter_mmx(void)
5424 *(uint32_t *)(env
->fptags
) = 0;
5425 *(uint32_t *)(env
->fptags
+ 4) = 0;
5428 void helper_emms(void)
5430 /* set to empty state */
5431 *(uint32_t *)(env
->fptags
) = 0x01010101;
5432 *(uint32_t *)(env
->fptags
+ 4) = 0x01010101;
5436 void helper_movq(void *d
, void *s
)
5438 *(uint64_t *)d
= *(uint64_t *)s
;
5442 #include "ops_sse.h"
5445 #include "ops_sse.h"
5448 #include "helper_template.h"
5452 #include "helper_template.h"
5456 #include "helper_template.h"
5459 #ifdef TARGET_X86_64
5462 #include "helper_template.h"
5467 /* bit operations */
5468 target_ulong
helper_bsf(target_ulong t0
)
5475 while ((res
& 1) == 0) {
5482 target_ulong
helper_lzcnt(target_ulong t0
, int wordsize
)
5485 target_ulong res
, mask
;
5487 if (wordsize
> 0 && t0
== 0) {
5491 count
= TARGET_LONG_BITS
- 1;
5492 mask
= (target_ulong
)1 << (TARGET_LONG_BITS
- 1);
5493 while ((res
& mask
) == 0) {
5498 return wordsize
- 1 - count
;
5503 target_ulong
helper_bsr(target_ulong t0
)
5505 return helper_lzcnt(t0
, 0);
5508 static int compute_all_eflags(void)
5513 static int compute_c_eflags(void)
5515 return CC_SRC
& CC_C
;
5518 uint32_t helper_cc_compute_all(int op
)
5521 default: /* should never happen */ return 0;
5523 case CC_OP_EFLAGS
: return compute_all_eflags();
5525 case CC_OP_MULB
: return compute_all_mulb();
5526 case CC_OP_MULW
: return compute_all_mulw();
5527 case CC_OP_MULL
: return compute_all_mull();
5529 case CC_OP_ADDB
: return compute_all_addb();
5530 case CC_OP_ADDW
: return compute_all_addw();
5531 case CC_OP_ADDL
: return compute_all_addl();
5533 case CC_OP_ADCB
: return compute_all_adcb();
5534 case CC_OP_ADCW
: return compute_all_adcw();
5535 case CC_OP_ADCL
: return compute_all_adcl();
5537 case CC_OP_SUBB
: return compute_all_subb();
5538 case CC_OP_SUBW
: return compute_all_subw();
5539 case CC_OP_SUBL
: return compute_all_subl();
5541 case CC_OP_SBBB
: return compute_all_sbbb();
5542 case CC_OP_SBBW
: return compute_all_sbbw();
5543 case CC_OP_SBBL
: return compute_all_sbbl();
5545 case CC_OP_LOGICB
: return compute_all_logicb();
5546 case CC_OP_LOGICW
: return compute_all_logicw();
5547 case CC_OP_LOGICL
: return compute_all_logicl();
5549 case CC_OP_INCB
: return compute_all_incb();
5550 case CC_OP_INCW
: return compute_all_incw();
5551 case CC_OP_INCL
: return compute_all_incl();
5553 case CC_OP_DECB
: return compute_all_decb();
5554 case CC_OP_DECW
: return compute_all_decw();
5555 case CC_OP_DECL
: return compute_all_decl();
5557 case CC_OP_SHLB
: return compute_all_shlb();
5558 case CC_OP_SHLW
: return compute_all_shlw();
5559 case CC_OP_SHLL
: return compute_all_shll();
5561 case CC_OP_SARB
: return compute_all_sarb();
5562 case CC_OP_SARW
: return compute_all_sarw();
5563 case CC_OP_SARL
: return compute_all_sarl();
5565 #ifdef TARGET_X86_64
5566 case CC_OP_MULQ
: return compute_all_mulq();
5568 case CC_OP_ADDQ
: return compute_all_addq();
5570 case CC_OP_ADCQ
: return compute_all_adcq();
5572 case CC_OP_SUBQ
: return compute_all_subq();
5574 case CC_OP_SBBQ
: return compute_all_sbbq();
5576 case CC_OP_LOGICQ
: return compute_all_logicq();
5578 case CC_OP_INCQ
: return compute_all_incq();
5580 case CC_OP_DECQ
: return compute_all_decq();
5582 case CC_OP_SHLQ
: return compute_all_shlq();
5584 case CC_OP_SARQ
: return compute_all_sarq();
5589 uint32_t helper_cc_compute_c(int op
)
5592 default: /* should never happen */ return 0;
5594 case CC_OP_EFLAGS
: return compute_c_eflags();
5596 case CC_OP_MULB
: return compute_c_mull();
5597 case CC_OP_MULW
: return compute_c_mull();
5598 case CC_OP_MULL
: return compute_c_mull();
5600 case CC_OP_ADDB
: return compute_c_addb();
5601 case CC_OP_ADDW
: return compute_c_addw();
5602 case CC_OP_ADDL
: return compute_c_addl();
5604 case CC_OP_ADCB
: return compute_c_adcb();
5605 case CC_OP_ADCW
: return compute_c_adcw();
5606 case CC_OP_ADCL
: return compute_c_adcl();
5608 case CC_OP_SUBB
: return compute_c_subb();
5609 case CC_OP_SUBW
: return compute_c_subw();
5610 case CC_OP_SUBL
: return compute_c_subl();
5612 case CC_OP_SBBB
: return compute_c_sbbb();
5613 case CC_OP_SBBW
: return compute_c_sbbw();
5614 case CC_OP_SBBL
: return compute_c_sbbl();
5616 case CC_OP_LOGICB
: return compute_c_logicb();
5617 case CC_OP_LOGICW
: return compute_c_logicw();
5618 case CC_OP_LOGICL
: return compute_c_logicl();
5620 case CC_OP_INCB
: return compute_c_incl();
5621 case CC_OP_INCW
: return compute_c_incl();
5622 case CC_OP_INCL
: return compute_c_incl();
5624 case CC_OP_DECB
: return compute_c_incl();
5625 case CC_OP_DECW
: return compute_c_incl();
5626 case CC_OP_DECL
: return compute_c_incl();
5628 case CC_OP_SHLB
: return compute_c_shlb();
5629 case CC_OP_SHLW
: return compute_c_shlw();
5630 case CC_OP_SHLL
: return compute_c_shll();
5632 case CC_OP_SARB
: return compute_c_sarl();
5633 case CC_OP_SARW
: return compute_c_sarl();
5634 case CC_OP_SARL
: return compute_c_sarl();
5636 #ifdef TARGET_X86_64
5637 case CC_OP_MULQ
: return compute_c_mull();
5639 case CC_OP_ADDQ
: return compute_c_addq();
5641 case CC_OP_ADCQ
: return compute_c_adcq();
5643 case CC_OP_SUBQ
: return compute_c_subq();
5645 case CC_OP_SBBQ
: return compute_c_sbbq();
5647 case CC_OP_LOGICQ
: return compute_c_logicq();
5649 case CC_OP_INCQ
: return compute_c_incl();
5651 case CC_OP_DECQ
: return compute_c_incl();
5653 case CC_OP_SHLQ
: return compute_c_shlq();
5655 case CC_OP_SARQ
: return compute_c_sarl();