]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
Merge remote-tracking branch 'riku/linux-user-for-upstream' into staging
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 18 */
83dae095 19
a2c9ed3c 20#include <math.h>
3e457172
BS
21#include "cpu.h"
22#include "dyngen-exec.h"
eaa728ee 23#include "host-utils.h"
35bed8ee 24#include "ioport.h"
3e457172
BS
25#include "qemu-common.h"
26#include "qemu-log.h"
27#include "cpu-defs.h"
28#include "helper.h"
eaa728ee 29
3e457172
BS
30#if !defined(CONFIG_USER_ONLY)
31#include "softmmu_exec.h"
32#endif /* !defined(CONFIG_USER_ONLY) */
eaa728ee 33
3e457172 34//#define DEBUG_PCALL
d12d51d5
AL
35
36#ifdef DEBUG_PCALL
93fcfe39
AL
37# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
38# define LOG_PCALL_STATE(env) \
39 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5
AL
40#else
41# define LOG_PCALL(...) do { } while (0)
42# define LOG_PCALL_STATE(env) do { } while (0)
43#endif
44
3e457172
BS
45/* n must be a constant to be efficient */
46static inline target_long lshift(target_long x, int n)
47{
48 if (n >= 0) {
49 return x << n;
50 } else {
51 return x >> (-n);
52 }
53}
54
55#define RC_MASK 0xc00
56#define RC_NEAR 0x000
57#define RC_DOWN 0x400
58#define RC_UP 0x800
59#define RC_CHOP 0xc00
60
61#define MAXTAN 9223372036854775808.0
62
63/* the following deal with x86 long double-precision numbers */
64#define MAXEXPD 0x7fff
65#define EXPBIAS 16383
66#define EXPD(fp) (fp.l.upper & 0x7fff)
67#define SIGND(fp) ((fp.l.upper) & 0x8000)
68#define MANTD(fp) (fp.l.lower)
69#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
70
71static inline void fpush(void)
72{
73 env->fpstt = (env->fpstt - 1) & 7;
74 env->fptags[env->fpstt] = 0; /* validate stack entry */
75}
76
77static inline void fpop(void)
78{
79 env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
80 env->fpstt = (env->fpstt + 1) & 7;
81}
82
83static inline floatx80 helper_fldt(target_ulong ptr)
84{
85 CPU_LDoubleU temp;
86
87 temp.l.lower = ldq(ptr);
88 temp.l.upper = lduw(ptr + 8);
89 return temp.d;
90}
91
92static inline void helper_fstt(floatx80 f, target_ulong ptr)
93{
94 CPU_LDoubleU temp;
95
96 temp.d = f;
97 stq(ptr, temp.l.lower);
98 stw(ptr + 8, temp.l.upper);
99}
100
101#define FPUS_IE (1 << 0)
102#define FPUS_DE (1 << 1)
103#define FPUS_ZE (1 << 2)
104#define FPUS_OE (1 << 3)
105#define FPUS_UE (1 << 4)
106#define FPUS_PE (1 << 5)
107#define FPUS_SF (1 << 6)
108#define FPUS_SE (1 << 7)
109#define FPUS_B (1 << 15)
110
111#define FPUC_EM 0x3f
112
113static inline uint32_t compute_eflags(void)
114{
115 return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
116}
117
118/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
119static inline void load_eflags(int eflags, int update_mask)
120{
121 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
122 DF = 1 - (2 * ((eflags >> 10) & 1));
123 env->eflags = (env->eflags & ~update_mask) |
124 (eflags & update_mask) | 0x2;
125}
126
127/* load efer and update the corresponding hflags. XXX: do consistency
128 checks with cpuid bits ? */
129static inline void cpu_load_efer(CPUState *env, uint64_t val)
130{
131 env->efer = val;
132 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
133 if (env->efer & MSR_EFER_LMA) {
134 env->hflags |= HF_LMA_MASK;
135 }
136 if (env->efer & MSR_EFER_SVME) {
137 env->hflags |= HF_SVME_MASK;
138 }
139}
d12d51d5 140
eaa728ee
FB
141#if 0
142#define raise_exception_err(a, b)\
143do {\
93fcfe39 144 qemu_log("raise_exception line=%d\n", __LINE__);\
eaa728ee
FB
145 (raise_exception_err)(a, b);\
146} while (0)
147#endif
148
3e457172
BS
149static void QEMU_NORETURN raise_exception_err(int exception_index,
150 int error_code);
151
d9957a8b 152static const uint8_t parity_table[256] = {
eaa728ee
FB
153 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
154 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
155 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
156 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
157 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
158 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
159 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
160 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
161 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
162 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
163 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
164 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
165 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
166 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
167 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
168 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
169 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
170 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
171 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
172 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
173 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
174 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
175 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
176 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
177 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
178 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
179 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
180 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
181 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
182 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
183 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
184 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
185};
186
187/* modulo 17 table */
d9957a8b 188static const uint8_t rclw_table[32] = {
eaa728ee
FB
189 0, 1, 2, 3, 4, 5, 6, 7,
190 8, 9,10,11,12,13,14,15,
191 16, 0, 1, 2, 3, 4, 5, 6,
192 7, 8, 9,10,11,12,13,14,
193};
194
195/* modulo 9 table */
d9957a8b 196static const uint8_t rclb_table[32] = {
eaa728ee
FB
197 0, 1, 2, 3, 4, 5, 6, 7,
198 8, 0, 1, 2, 3, 4, 5, 6,
199 7, 8, 0, 1, 2, 3, 4, 5,
200 6, 7, 8, 0, 1, 2, 3, 4,
201};
202
c31da136
AJ
203#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
204#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
205#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
206
eaa728ee
FB
207/* broken thread support */
208
c227f099 209static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
210
211void helper_lock(void)
212{
213 spin_lock(&global_cpu_lock);
214}
215
216void helper_unlock(void)
217{
218 spin_unlock(&global_cpu_lock);
219}
220
221void helper_write_eflags(target_ulong t0, uint32_t update_mask)
222{
223 load_eflags(t0, update_mask);
224}
225
226target_ulong helper_read_eflags(void)
227{
228 uint32_t eflags;
a7812ae4 229 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
230 eflags |= (DF & DF_MASK);
231 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
232 return eflags;
233}
234
235/* return non zero if error */
236static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
237 int selector)
238{
239 SegmentCache *dt;
240 int index;
241 target_ulong ptr;
242
243 if (selector & 0x4)
244 dt = &env->ldt;
245 else
246 dt = &env->gdt;
247 index = selector & ~7;
248 if ((index + 7) > dt->limit)
249 return -1;
250 ptr = dt->base + index;
251 *e1_ptr = ldl_kernel(ptr);
252 *e2_ptr = ldl_kernel(ptr + 4);
253 return 0;
254}
255
256static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257{
258 unsigned int limit;
259 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
260 if (e2 & DESC_G_MASK)
261 limit = (limit << 12) | 0xfff;
262 return limit;
263}
264
265static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266{
267 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
268}
269
270static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271{
272 sc->base = get_seg_base(e1, e2);
273 sc->limit = get_seg_limit(e1, e2);
274 sc->flags = e2;
275}
276
277/* init the segment cache in vm86 mode. */
278static inline void load_seg_vm(int seg, int selector)
279{
280 selector &= 0xffff;
281 cpu_x86_load_seg_cache(env, seg, selector,
282 (selector << 4), 0xffff, 0);
283}
284
285static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
286 uint32_t *esp_ptr, int dpl)
287{
288 int type, index, shift;
289
290#if 0
291 {
292 int i;
293 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
294 for(i=0;i<env->tr.limit;i++) {
295 printf("%02x ", env->tr.base[i]);
296 if ((i & 7) == 7) printf("\n");
297 }
298 printf("\n");
299 }
300#endif
301
302 if (!(env->tr.flags & DESC_P_MASK))
303 cpu_abort(env, "invalid tss");
304 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
305 if ((type & 7) != 1)
306 cpu_abort(env, "invalid tss type");
307 shift = type >> 3;
308 index = (dpl * 4 + 2) << shift;
309 if (index + (4 << shift) - 1 > env->tr.limit)
310 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
311 if (shift == 0) {
312 *esp_ptr = lduw_kernel(env->tr.base + index);
313 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
314 } else {
315 *esp_ptr = ldl_kernel(env->tr.base + index);
316 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
317 }
318}
319
320/* XXX: merge with load_seg() */
321static void tss_load_seg(int seg_reg, int selector)
322{
323 uint32_t e1, e2;
324 int rpl, dpl, cpl;
325
326 if ((selector & 0xfffc) != 0) {
327 if (load_segment(&e1, &e2, selector) != 0)
328 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
329 if (!(e2 & DESC_S_MASK))
330 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
331 rpl = selector & 3;
332 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
333 cpl = env->hflags & HF_CPL_MASK;
334 if (seg_reg == R_CS) {
335 if (!(e2 & DESC_CS_MASK))
336 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
337 /* XXX: is it correct ? */
338 if (dpl != rpl)
339 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
340 if ((e2 & DESC_C_MASK) && dpl > rpl)
341 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
342 } else if (seg_reg == R_SS) {
343 /* SS must be writable data */
344 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
345 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
346 if (dpl != cpl || dpl != rpl)
347 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
348 } else {
349 /* not readable code */
350 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
351 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
352 /* if data or non conforming code, checks the rights */
353 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
354 if (dpl < cpl || dpl < rpl)
355 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
356 }
357 }
358 if (!(e2 & DESC_P_MASK))
359 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
360 cpu_x86_load_seg_cache(env, seg_reg, selector,
361 get_seg_base(e1, e2),
362 get_seg_limit(e1, e2),
363 e2);
364 } else {
365 if (seg_reg == R_SS || seg_reg == R_CS)
366 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
367 }
368}
369
370#define SWITCH_TSS_JMP 0
371#define SWITCH_TSS_IRET 1
372#define SWITCH_TSS_CALL 2
373
374/* XXX: restore CPU state in registers (PowerPC case) */
375static void switch_tss(int tss_selector,
376 uint32_t e1, uint32_t e2, int source,
377 uint32_t next_eip)
378{
379 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
380 target_ulong tss_base;
381 uint32_t new_regs[8], new_segs[6];
382 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
383 uint32_t old_eflags, eflags_mask;
384 SegmentCache *dt;
385 int index;
386 target_ulong ptr;
387
388 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
d12d51d5 389 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
eaa728ee
FB
390
391 /* if task gate, we read the TSS segment and we load it */
392 if (type == 5) {
393 if (!(e2 & DESC_P_MASK))
394 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
395 tss_selector = e1 >> 16;
396 if (tss_selector & 4)
397 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
398 if (load_segment(&e1, &e2, tss_selector) != 0)
399 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
400 if (e2 & DESC_S_MASK)
401 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
402 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
403 if ((type & 7) != 1)
404 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
405 }
406
407 if (!(e2 & DESC_P_MASK))
408 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
409
410 if (type & 8)
411 tss_limit_max = 103;
412 else
413 tss_limit_max = 43;
414 tss_limit = get_seg_limit(e1, e2);
415 tss_base = get_seg_base(e1, e2);
416 if ((tss_selector & 4) != 0 ||
417 tss_limit < tss_limit_max)
418 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
419 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
420 if (old_type & 8)
421 old_tss_limit_max = 103;
422 else
423 old_tss_limit_max = 43;
424
425 /* read all the registers from the new TSS */
426 if (type & 8) {
427 /* 32 bit */
428 new_cr3 = ldl_kernel(tss_base + 0x1c);
429 new_eip = ldl_kernel(tss_base + 0x20);
430 new_eflags = ldl_kernel(tss_base + 0x24);
431 for(i = 0; i < 8; i++)
432 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
433 for(i = 0; i < 6; i++)
434 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
435 new_ldt = lduw_kernel(tss_base + 0x60);
436 new_trap = ldl_kernel(tss_base + 0x64);
437 } else {
438 /* 16 bit */
439 new_cr3 = 0;
440 new_eip = lduw_kernel(tss_base + 0x0e);
441 new_eflags = lduw_kernel(tss_base + 0x10);
442 for(i = 0; i < 8; i++)
443 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
444 for(i = 0; i < 4; i++)
445 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
446 new_ldt = lduw_kernel(tss_base + 0x2a);
447 new_segs[R_FS] = 0;
448 new_segs[R_GS] = 0;
449 new_trap = 0;
450 }
4581cbcd
BS
451 /* XXX: avoid a compiler warning, see
452 http://support.amd.com/us/Processor_TechDocs/24593.pdf
453 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
454 (void)new_trap;
eaa728ee
FB
455
456 /* NOTE: we must avoid memory exceptions during the task switch,
457 so we make dummy accesses before */
458 /* XXX: it can still fail in some cases, so a bigger hack is
459 necessary to valid the TLB after having done the accesses */
460
461 v1 = ldub_kernel(env->tr.base);
462 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
463 stb_kernel(env->tr.base, v1);
464 stb_kernel(env->tr.base + old_tss_limit_max, v2);
465
466 /* clear busy bit (it is restartable) */
467 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
468 target_ulong ptr;
469 uint32_t e2;
470 ptr = env->gdt.base + (env->tr.selector & ~7);
471 e2 = ldl_kernel(ptr + 4);
472 e2 &= ~DESC_TSS_BUSY_MASK;
473 stl_kernel(ptr + 4, e2);
474 }
475 old_eflags = compute_eflags();
476 if (source == SWITCH_TSS_IRET)
477 old_eflags &= ~NT_MASK;
478
479 /* save the current state in the old TSS */
480 if (type & 8) {
481 /* 32 bit */
482 stl_kernel(env->tr.base + 0x20, next_eip);
483 stl_kernel(env->tr.base + 0x24, old_eflags);
484 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
485 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
486 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
487 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
488 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
489 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
490 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
491 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
492 for(i = 0; i < 6; i++)
493 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
494 } else {
495 /* 16 bit */
496 stw_kernel(env->tr.base + 0x0e, next_eip);
497 stw_kernel(env->tr.base + 0x10, old_eflags);
498 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
499 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
500 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
501 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
502 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
503 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
504 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
505 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
506 for(i = 0; i < 4; i++)
507 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
508 }
509
510 /* now if an exception occurs, it will occurs in the next task
511 context */
512
513 if (source == SWITCH_TSS_CALL) {
514 stw_kernel(tss_base, env->tr.selector);
515 new_eflags |= NT_MASK;
516 }
517
518 /* set busy bit */
519 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
520 target_ulong ptr;
521 uint32_t e2;
522 ptr = env->gdt.base + (tss_selector & ~7);
523 e2 = ldl_kernel(ptr + 4);
524 e2 |= DESC_TSS_BUSY_MASK;
525 stl_kernel(ptr + 4, e2);
526 }
527
528 /* set the new CPU state */
529 /* from this point, any exception which occurs can give problems */
530 env->cr[0] |= CR0_TS_MASK;
531 env->hflags |= HF_TS_MASK;
532 env->tr.selector = tss_selector;
533 env->tr.base = tss_base;
534 env->tr.limit = tss_limit;
535 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
536
537 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
538 cpu_x86_update_cr3(env, new_cr3);
539 }
540
541 /* load all registers without an exception, then reload them with
542 possible exception */
543 env->eip = new_eip;
544 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
545 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
546 if (!(type & 8))
547 eflags_mask &= 0xffff;
548 load_eflags(new_eflags, eflags_mask);
549 /* XXX: what to do in 16 bit case ? */
550 EAX = new_regs[0];
551 ECX = new_regs[1];
552 EDX = new_regs[2];
553 EBX = new_regs[3];
554 ESP = new_regs[4];
555 EBP = new_regs[5];
556 ESI = new_regs[6];
557 EDI = new_regs[7];
558 if (new_eflags & VM_MASK) {
559 for(i = 0; i < 6; i++)
560 load_seg_vm(i, new_segs[i]);
561 /* in vm86, CPL is always 3 */
562 cpu_x86_set_cpl(env, 3);
563 } else {
564 /* CPL is set the RPL of CS */
565 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
566 /* first just selectors as the rest may trigger exceptions */
567 for(i = 0; i < 6; i++)
568 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
569 }
570
571 env->ldt.selector = new_ldt & ~4;
572 env->ldt.base = 0;
573 env->ldt.limit = 0;
574 env->ldt.flags = 0;
575
576 /* load the LDT */
577 if (new_ldt & 4)
578 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
579
580 if ((new_ldt & 0xfffc) != 0) {
581 dt = &env->gdt;
582 index = new_ldt & ~7;
583 if ((index + 7) > dt->limit)
584 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
585 ptr = dt->base + index;
586 e1 = ldl_kernel(ptr);
587 e2 = ldl_kernel(ptr + 4);
588 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
589 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
590 if (!(e2 & DESC_P_MASK))
591 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
592 load_seg_cache_raw_dt(&env->ldt, e1, e2);
593 }
594
595 /* load the segments */
596 if (!(new_eflags & VM_MASK)) {
597 tss_load_seg(R_CS, new_segs[R_CS]);
598 tss_load_seg(R_SS, new_segs[R_SS]);
599 tss_load_seg(R_ES, new_segs[R_ES]);
600 tss_load_seg(R_DS, new_segs[R_DS]);
601 tss_load_seg(R_FS, new_segs[R_FS]);
602 tss_load_seg(R_GS, new_segs[R_GS]);
603 }
604
605 /* check that EIP is in the CS segment limits */
606 if (new_eip > env->segs[R_CS].limit) {
607 /* XXX: different exception if CALL ? */
608 raise_exception_err(EXCP0D_GPF, 0);
609 }
01df040b
AL
610
611#ifndef CONFIG_USER_ONLY
612 /* reset local breakpoints */
613 if (env->dr[7] & 0x55) {
614 for (i = 0; i < 4; i++) {
615 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
616 hw_breakpoint_remove(env, i);
617 }
618 env->dr[7] &= ~0x55;
619 }
620#endif
eaa728ee
FB
621}
622
623/* check if Port I/O is allowed in TSS */
624static inline void check_io(int addr, int size)
625{
626 int io_offset, val, mask;
627
628 /* TSS must be a valid 32 bit one */
629 if (!(env->tr.flags & DESC_P_MASK) ||
630 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
631 env->tr.limit < 103)
632 goto fail;
633 io_offset = lduw_kernel(env->tr.base + 0x66);
634 io_offset += (addr >> 3);
635 /* Note: the check needs two bytes */
636 if ((io_offset + 1) > env->tr.limit)
637 goto fail;
638 val = lduw_kernel(env->tr.base + io_offset);
639 val >>= (addr & 7);
640 mask = (1 << size) - 1;
641 /* all bits must be zero to allow the I/O */
642 if ((val & mask) != 0) {
643 fail:
644 raise_exception_err(EXCP0D_GPF, 0);
645 }
646}
647
648void helper_check_iob(uint32_t t0)
649{
650 check_io(t0, 1);
651}
652
653void helper_check_iow(uint32_t t0)
654{
655 check_io(t0, 2);
656}
657
658void helper_check_iol(uint32_t t0)
659{
660 check_io(t0, 4);
661}
662
663void helper_outb(uint32_t port, uint32_t data)
664{
afcea8cb 665 cpu_outb(port, data & 0xff);
eaa728ee
FB
666}
667
668target_ulong helper_inb(uint32_t port)
669{
afcea8cb 670 return cpu_inb(port);
eaa728ee
FB
671}
672
673void helper_outw(uint32_t port, uint32_t data)
674{
afcea8cb 675 cpu_outw(port, data & 0xffff);
eaa728ee
FB
676}
677
678target_ulong helper_inw(uint32_t port)
679{
afcea8cb 680 return cpu_inw(port);
eaa728ee
FB
681}
682
683void helper_outl(uint32_t port, uint32_t data)
684{
afcea8cb 685 cpu_outl(port, data);
eaa728ee
FB
686}
687
688target_ulong helper_inl(uint32_t port)
689{
afcea8cb 690 return cpu_inl(port);
eaa728ee
FB
691}
692
693static inline unsigned int get_sp_mask(unsigned int e2)
694{
695 if (e2 & DESC_B_MASK)
696 return 0xffffffff;
697 else
698 return 0xffff;
699}
700
2ed51f5b
AL
701static int exeption_has_error_code(int intno)
702{
703 switch(intno) {
704 case 8:
705 case 10:
706 case 11:
707 case 12:
708 case 13:
709 case 14:
710 case 17:
711 return 1;
712 }
713 return 0;
714}
715
eaa728ee
FB
716#ifdef TARGET_X86_64
717#define SET_ESP(val, sp_mask)\
718do {\
719 if ((sp_mask) == 0xffff)\
720 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
721 else if ((sp_mask) == 0xffffffffLL)\
722 ESP = (uint32_t)(val);\
723 else\
724 ESP = (val);\
725} while (0)
726#else
727#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
728#endif
729
c0a04f0e
AL
730/* in 64-bit machines, this can overflow. So this segment addition macro
731 * can be used to trim the value to 32-bit whenever needed */
732#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
733
eaa728ee
FB
734/* XXX: add a is_user flag to have proper security support */
735#define PUSHW(ssp, sp, sp_mask, val)\
736{\
737 sp -= 2;\
738 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
739}
740
741#define PUSHL(ssp, sp, sp_mask, val)\
742{\
743 sp -= 4;\
c0a04f0e 744 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
745}
746
747#define POPW(ssp, sp, sp_mask, val)\
748{\
749 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
750 sp += 2;\
751}
752
753#define POPL(ssp, sp, sp_mask, val)\
754{\
c0a04f0e 755 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
756 sp += 4;\
757}
758
759/* protected mode interrupt */
760static void do_interrupt_protected(int intno, int is_int, int error_code,
761 unsigned int next_eip, int is_hw)
762{
763 SegmentCache *dt;
764 target_ulong ptr, ssp;
765 int type, dpl, selector, ss_dpl, cpl;
766 int has_error_code, new_stack, shift;
1c918eba 767 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 768 uint32_t old_eip, sp_mask;
eaa728ee 769
eaa728ee 770 has_error_code = 0;
2ed51f5b
AL
771 if (!is_int && !is_hw)
772 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
773 if (is_int)
774 old_eip = next_eip;
775 else
776 old_eip = env->eip;
777
778 dt = &env->idt;
779 if (intno * 8 + 7 > dt->limit)
780 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
781 ptr = dt->base + intno * 8;
782 e1 = ldl_kernel(ptr);
783 e2 = ldl_kernel(ptr + 4);
784 /* check gate type */
785 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
786 switch(type) {
787 case 5: /* task gate */
788 /* must do that check here to return the correct error code */
789 if (!(e2 & DESC_P_MASK))
790 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
791 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
792 if (has_error_code) {
793 int type;
794 uint32_t mask;
795 /* push the error code */
796 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
797 shift = type >> 3;
798 if (env->segs[R_SS].flags & DESC_B_MASK)
799 mask = 0xffffffff;
800 else
801 mask = 0xffff;
802 esp = (ESP - (2 << shift)) & mask;
803 ssp = env->segs[R_SS].base + esp;
804 if (shift)
805 stl_kernel(ssp, error_code);
806 else
807 stw_kernel(ssp, error_code);
808 SET_ESP(esp, mask);
809 }
810 return;
811 case 6: /* 286 interrupt gate */
812 case 7: /* 286 trap gate */
813 case 14: /* 386 interrupt gate */
814 case 15: /* 386 trap gate */
815 break;
816 default:
817 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
818 break;
819 }
820 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
821 cpl = env->hflags & HF_CPL_MASK;
1235fc06 822 /* check privilege if software int */
eaa728ee
FB
823 if (is_int && dpl < cpl)
824 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
825 /* check valid bit */
826 if (!(e2 & DESC_P_MASK))
827 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
828 selector = e1 >> 16;
829 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
830 if ((selector & 0xfffc) == 0)
831 raise_exception_err(EXCP0D_GPF, 0);
832
833 if (load_segment(&e1, &e2, selector) != 0)
834 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
835 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
836 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
837 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
838 if (dpl > cpl)
839 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
840 if (!(e2 & DESC_P_MASK))
841 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
842 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
843 /* to inner privilege */
844 get_ss_esp_from_tss(&ss, &esp, dpl);
845 if ((ss & 0xfffc) == 0)
846 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
847 if ((ss & 3) != dpl)
848 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
849 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
850 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
851 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
852 if (ss_dpl != dpl)
853 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
854 if (!(ss_e2 & DESC_S_MASK) ||
855 (ss_e2 & DESC_CS_MASK) ||
856 !(ss_e2 & DESC_W_MASK))
857 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
858 if (!(ss_e2 & DESC_P_MASK))
859 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
860 new_stack = 1;
861 sp_mask = get_sp_mask(ss_e2);
862 ssp = get_seg_base(ss_e1, ss_e2);
863 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
864 /* to same privilege */
865 if (env->eflags & VM_MASK)
866 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
867 new_stack = 0;
868 sp_mask = get_sp_mask(env->segs[R_SS].flags);
869 ssp = env->segs[R_SS].base;
870 esp = ESP;
871 dpl = cpl;
872 } else {
873 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
874 new_stack = 0; /* avoid warning */
875 sp_mask = 0; /* avoid warning */
876 ssp = 0; /* avoid warning */
877 esp = 0; /* avoid warning */
878 }
879
880 shift = type >> 3;
881
882#if 0
883 /* XXX: check that enough room is available */
884 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
885 if (env->eflags & VM_MASK)
886 push_size += 8;
887 push_size <<= shift;
888#endif
889 if (shift == 1) {
890 if (new_stack) {
891 if (env->eflags & VM_MASK) {
892 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
893 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
894 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
895 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
896 }
897 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
898 PUSHL(ssp, esp, sp_mask, ESP);
899 }
900 PUSHL(ssp, esp, sp_mask, compute_eflags());
901 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
902 PUSHL(ssp, esp, sp_mask, old_eip);
903 if (has_error_code) {
904 PUSHL(ssp, esp, sp_mask, error_code);
905 }
906 } else {
907 if (new_stack) {
908 if (env->eflags & VM_MASK) {
909 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
910 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
911 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
912 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
913 }
914 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
915 PUSHW(ssp, esp, sp_mask, ESP);
916 }
917 PUSHW(ssp, esp, sp_mask, compute_eflags());
918 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
919 PUSHW(ssp, esp, sp_mask, old_eip);
920 if (has_error_code) {
921 PUSHW(ssp, esp, sp_mask, error_code);
922 }
923 }
924
925 if (new_stack) {
926 if (env->eflags & VM_MASK) {
927 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
928 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
929 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
930 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
931 }
932 ss = (ss & ~3) | dpl;
933 cpu_x86_load_seg_cache(env, R_SS, ss,
934 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
935 }
936 SET_ESP(esp, sp_mask);
937
938 selector = (selector & ~3) | dpl;
939 cpu_x86_load_seg_cache(env, R_CS, selector,
940 get_seg_base(e1, e2),
941 get_seg_limit(e1, e2),
942 e2);
943 cpu_x86_set_cpl(env, dpl);
944 env->eip = offset;
945
946 /* interrupt gate clear IF mask */
947 if ((type & 1) == 0) {
948 env->eflags &= ~IF_MASK;
949 }
950 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
951}
952
953#ifdef TARGET_X86_64
954
955#define PUSHQ(sp, val)\
956{\
957 sp -= 8;\
958 stq_kernel(sp, (val));\
959}
960
961#define POPQ(sp, val)\
962{\
963 val = ldq_kernel(sp);\
964 sp += 8;\
965}
966
967static inline target_ulong get_rsp_from_tss(int level)
968{
969 int index;
970
971#if 0
972 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
973 env->tr.base, env->tr.limit);
974#endif
975
976 if (!(env->tr.flags & DESC_P_MASK))
977 cpu_abort(env, "invalid tss");
978 index = 8 * level + 4;
979 if ((index + 7) > env->tr.limit)
980 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
981 return ldq_kernel(env->tr.base + index);
982}
983
984/* 64 bit interrupt */
985static void do_interrupt64(int intno, int is_int, int error_code,
986 target_ulong next_eip, int is_hw)
987{
988 SegmentCache *dt;
989 target_ulong ptr;
990 int type, dpl, selector, cpl, ist;
991 int has_error_code, new_stack;
992 uint32_t e1, e2, e3, ss;
993 target_ulong old_eip, esp, offset;
eaa728ee 994
eaa728ee 995 has_error_code = 0;
2ed51f5b
AL
996 if (!is_int && !is_hw)
997 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
998 if (is_int)
999 old_eip = next_eip;
1000 else
1001 old_eip = env->eip;
1002
1003 dt = &env->idt;
1004 if (intno * 16 + 15 > dt->limit)
1005 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1006 ptr = dt->base + intno * 16;
1007 e1 = ldl_kernel(ptr);
1008 e2 = ldl_kernel(ptr + 4);
1009 e3 = ldl_kernel(ptr + 8);
1010 /* check gate type */
1011 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1012 switch(type) {
1013 case 14: /* 386 interrupt gate */
1014 case 15: /* 386 trap gate */
1015 break;
1016 default:
1017 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1018 break;
1019 }
1020 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1021 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1022 /* check privilege if software int */
eaa728ee
FB
1023 if (is_int && dpl < cpl)
1024 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1025 /* check valid bit */
1026 if (!(e2 & DESC_P_MASK))
1027 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1028 selector = e1 >> 16;
1029 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1030 ist = e2 & 7;
1031 if ((selector & 0xfffc) == 0)
1032 raise_exception_err(EXCP0D_GPF, 0);
1033
1034 if (load_segment(&e1, &e2, selector) != 0)
1035 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1036 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1037 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1038 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1039 if (dpl > cpl)
1040 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1041 if (!(e2 & DESC_P_MASK))
1042 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1043 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1044 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1045 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1046 /* to inner privilege */
1047 if (ist != 0)
1048 esp = get_rsp_from_tss(ist + 3);
1049 else
1050 esp = get_rsp_from_tss(dpl);
1051 esp &= ~0xfLL; /* align stack */
1052 ss = 0;
1053 new_stack = 1;
1054 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1055 /* to same privilege */
1056 if (env->eflags & VM_MASK)
1057 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1058 new_stack = 0;
1059 if (ist != 0)
1060 esp = get_rsp_from_tss(ist + 3);
1061 else
1062 esp = ESP;
1063 esp &= ~0xfLL; /* align stack */
1064 dpl = cpl;
1065 } else {
1066 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1067 new_stack = 0; /* avoid warning */
1068 esp = 0; /* avoid warning */
1069 }
1070
1071 PUSHQ(esp, env->segs[R_SS].selector);
1072 PUSHQ(esp, ESP);
1073 PUSHQ(esp, compute_eflags());
1074 PUSHQ(esp, env->segs[R_CS].selector);
1075 PUSHQ(esp, old_eip);
1076 if (has_error_code) {
1077 PUSHQ(esp, error_code);
1078 }
1079
1080 if (new_stack) {
1081 ss = 0 | dpl;
1082 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1083 }
1084 ESP = esp;
1085
1086 selector = (selector & ~3) | dpl;
1087 cpu_x86_load_seg_cache(env, R_CS, selector,
1088 get_seg_base(e1, e2),
1089 get_seg_limit(e1, e2),
1090 e2);
1091 cpu_x86_set_cpl(env, dpl);
1092 env->eip = offset;
1093
1094 /* interrupt gate clear IF mask */
1095 if ((type & 1) == 0) {
1096 env->eflags &= ~IF_MASK;
1097 }
1098 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1099}
1100#endif
1101
d9957a8b 1102#ifdef TARGET_X86_64
eaa728ee
FB
1103#if defined(CONFIG_USER_ONLY)
1104void helper_syscall(int next_eip_addend)
1105{
1106 env->exception_index = EXCP_SYSCALL;
1107 env->exception_next_eip = env->eip + next_eip_addend;
1162c041 1108 cpu_loop_exit(env);
eaa728ee
FB
1109}
1110#else
1111void helper_syscall(int next_eip_addend)
1112{
1113 int selector;
1114
1115 if (!(env->efer & MSR_EFER_SCE)) {
1116 raise_exception_err(EXCP06_ILLOP, 0);
1117 }
1118 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1119 if (env->hflags & HF_LMA_MASK) {
1120 int code64;
1121
1122 ECX = env->eip + next_eip_addend;
1123 env->regs[11] = compute_eflags();
1124
1125 code64 = env->hflags & HF_CS64_MASK;
1126
1127 cpu_x86_set_cpl(env, 0);
1128 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1129 0, 0xffffffff,
1130 DESC_G_MASK | DESC_P_MASK |
1131 DESC_S_MASK |
1132 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1133 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1134 0, 0xffffffff,
1135 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1136 DESC_S_MASK |
1137 DESC_W_MASK | DESC_A_MASK);
1138 env->eflags &= ~env->fmask;
1139 load_eflags(env->eflags, 0);
1140 if (code64)
1141 env->eip = env->lstar;
1142 else
1143 env->eip = env->cstar;
d9957a8b 1144 } else {
eaa728ee
FB
1145 ECX = (uint32_t)(env->eip + next_eip_addend);
1146
1147 cpu_x86_set_cpl(env, 0);
1148 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1149 0, 0xffffffff,
1150 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1151 DESC_S_MASK |
1152 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1153 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1154 0, 0xffffffff,
1155 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1156 DESC_S_MASK |
1157 DESC_W_MASK | DESC_A_MASK);
1158 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1159 env->eip = (uint32_t)env->star;
1160 }
1161}
1162#endif
d9957a8b 1163#endif
eaa728ee 1164
d9957a8b 1165#ifdef TARGET_X86_64
eaa728ee
FB
1166void helper_sysret(int dflag)
1167{
1168 int cpl, selector;
1169
1170 if (!(env->efer & MSR_EFER_SCE)) {
1171 raise_exception_err(EXCP06_ILLOP, 0);
1172 }
1173 cpl = env->hflags & HF_CPL_MASK;
1174 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1175 raise_exception_err(EXCP0D_GPF, 0);
1176 }
1177 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1178 if (env->hflags & HF_LMA_MASK) {
1179 if (dflag == 2) {
1180 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1181 0, 0xffffffff,
1182 DESC_G_MASK | DESC_P_MASK |
1183 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1184 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1185 DESC_L_MASK);
1186 env->eip = ECX;
1187 } else {
1188 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1189 0, 0xffffffff,
1190 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1191 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1192 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1193 env->eip = (uint32_t)ECX;
1194 }
1195 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1196 0, 0xffffffff,
1197 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1198 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1199 DESC_W_MASK | DESC_A_MASK);
1200 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1201 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1202 cpu_x86_set_cpl(env, 3);
d9957a8b 1203 } else {
eaa728ee
FB
1204 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1205 0, 0xffffffff,
1206 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1207 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1208 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1209 env->eip = (uint32_t)ECX;
1210 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1211 0, 0xffffffff,
1212 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1213 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1214 DESC_W_MASK | DESC_A_MASK);
1215 env->eflags |= IF_MASK;
1216 cpu_x86_set_cpl(env, 3);
1217 }
eaa728ee 1218}
d9957a8b 1219#endif
eaa728ee
FB
1220
1221/* real mode interrupt */
1222static void do_interrupt_real(int intno, int is_int, int error_code,
1223 unsigned int next_eip)
1224{
1225 SegmentCache *dt;
1226 target_ulong ptr, ssp;
1227 int selector;
1228 uint32_t offset, esp;
1229 uint32_t old_cs, old_eip;
eaa728ee 1230
eaa728ee
FB
1231 /* real mode (simpler !) */
1232 dt = &env->idt;
1233 if (intno * 4 + 3 > dt->limit)
1234 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1235 ptr = dt->base + intno * 4;
1236 offset = lduw_kernel(ptr);
1237 selector = lduw_kernel(ptr + 2);
1238 esp = ESP;
1239 ssp = env->segs[R_SS].base;
1240 if (is_int)
1241 old_eip = next_eip;
1242 else
1243 old_eip = env->eip;
1244 old_cs = env->segs[R_CS].selector;
1245 /* XXX: use SS segment size ? */
1246 PUSHW(ssp, esp, 0xffff, compute_eflags());
1247 PUSHW(ssp, esp, 0xffff, old_cs);
1248 PUSHW(ssp, esp, 0xffff, old_eip);
1249
1250 /* update processor state */
1251 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1252 env->eip = offset;
1253 env->segs[R_CS].selector = selector;
1254 env->segs[R_CS].base = (selector << 4);
1255 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1256}
1257
e694d4e2 1258#if defined(CONFIG_USER_ONLY)
eaa728ee 1259/* fake user mode interrupt */
e694d4e2
BS
1260static void do_interrupt_user(int intno, int is_int, int error_code,
1261 target_ulong next_eip)
eaa728ee
FB
1262{
1263 SegmentCache *dt;
1264 target_ulong ptr;
1265 int dpl, cpl, shift;
1266 uint32_t e2;
1267
1268 dt = &env->idt;
1269 if (env->hflags & HF_LMA_MASK) {
1270 shift = 4;
1271 } else {
1272 shift = 3;
1273 }
1274 ptr = dt->base + (intno << shift);
1275 e2 = ldl_kernel(ptr + 4);
1276
1277 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1278 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1279 /* check privilege if software int */
eaa728ee
FB
1280 if (is_int && dpl < cpl)
1281 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1282
1283 /* Since we emulate only user space, we cannot do more than
1284 exiting the emulation with the suitable exception and error
1285 code */
1286 if (is_int)
1287 EIP = next_eip;
1288}
1289
e694d4e2
BS
1290#else
1291
2ed51f5b
AL
1292static void handle_even_inj(int intno, int is_int, int error_code,
1293 int is_hw, int rm)
1294{
1295 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1296 if (!(event_inj & SVM_EVTINJ_VALID)) {
1297 int type;
1298 if (is_int)
1299 type = SVM_EVTINJ_TYPE_SOFT;
1300 else
1301 type = SVM_EVTINJ_TYPE_EXEPT;
1302 event_inj = intno | type | SVM_EVTINJ_VALID;
1303 if (!rm && exeption_has_error_code(intno)) {
1304 event_inj |= SVM_EVTINJ_VALID_ERR;
1305 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1306 }
1307 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1308 }
1309}
00ea18d1 1310#endif
2ed51f5b 1311
eaa728ee
FB
1312/*
1313 * Begin execution of an interruption. is_int is TRUE if coming from
1314 * the int instruction. next_eip is the EIP value AFTER the interrupt
1315 * instruction. It is only relevant if is_int is TRUE.
1316 */
e694d4e2
BS
1317static void do_interrupt_all(int intno, int is_int, int error_code,
1318 target_ulong next_eip, int is_hw)
eaa728ee 1319{
8fec2b8c 1320 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1321 if ((env->cr[0] & CR0_PE_MASK)) {
1322 static int count;
93fcfe39 1323 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
eaa728ee
FB
1324 count, intno, error_code, is_int,
1325 env->hflags & HF_CPL_MASK,
1326 env->segs[R_CS].selector, EIP,
1327 (int)env->segs[R_CS].base + EIP,
1328 env->segs[R_SS].selector, ESP);
1329 if (intno == 0x0e) {
93fcfe39 1330 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1331 } else {
93fcfe39 1332 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1333 }
93fcfe39
AL
1334 qemu_log("\n");
1335 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1336#if 0
1337 {
1338 int i;
9bd5494e 1339 target_ulong ptr;
93fcfe39 1340 qemu_log(" code=");
eaa728ee
FB
1341 ptr = env->segs[R_CS].base + env->eip;
1342 for(i = 0; i < 16; i++) {
93fcfe39 1343 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1344 }
93fcfe39 1345 qemu_log("\n");
eaa728ee
FB
1346 }
1347#endif
1348 count++;
1349 }
1350 }
1351 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1352#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1353 if (env->hflags & HF_SVMI_MASK)
1354 handle_even_inj(intno, is_int, error_code, is_hw, 0);
00ea18d1 1355#endif
eb38c52c 1356#ifdef TARGET_X86_64
eaa728ee
FB
1357 if (env->hflags & HF_LMA_MASK) {
1358 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1359 } else
1360#endif
1361 {
1362 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1363 }
1364 } else {
00ea18d1 1365#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1366 if (env->hflags & HF_SVMI_MASK)
1367 handle_even_inj(intno, is_int, error_code, is_hw, 1);
00ea18d1 1368#endif
eaa728ee
FB
1369 do_interrupt_real(intno, is_int, error_code, next_eip);
1370 }
2ed51f5b 1371
00ea18d1 1372#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1373 if (env->hflags & HF_SVMI_MASK) {
1374 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1375 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1376 }
00ea18d1 1377#endif
eaa728ee
FB
1378}
1379
e694d4e2
BS
1380void do_interrupt(CPUState *env1)
1381{
1382 CPUState *saved_env;
1383
1384 saved_env = env;
1385 env = env1;
1386#if defined(CONFIG_USER_ONLY)
1387 /* if user mode only, we simulate a fake exception
1388 which will be handled outside the cpu execution
1389 loop */
1390 do_interrupt_user(env->exception_index,
1391 env->exception_is_int,
1392 env->error_code,
1393 env->exception_next_eip);
1394 /* successfully delivered */
1395 env->old_exception = -1;
1396#else
1397 /* simulate a real cpu exception. On i386, it can
1398 trigger new exceptions, but we do not handle
1399 double or triple faults yet. */
1400 do_interrupt_all(env->exception_index,
1401 env->exception_is_int,
1402 env->error_code,
1403 env->exception_next_eip, 0);
1404 /* successfully delivered */
1405 env->old_exception = -1;
1406#endif
1407 env = saved_env;
1408}
1409
1410void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw)
1411{
1412 CPUState *saved_env;
1413
1414 saved_env = env;
1415 env = env1;
1416 do_interrupt_all(intno, 0, 0, 0, is_hw);
1417 env = saved_env;
1418}
1419
f55761a0
AL
1420/* This should come from sysemu.h - if we could include it here... */
1421void qemu_system_reset_request(void);
1422
eaa728ee
FB
1423/*
1424 * Check nested exceptions and change to double or triple fault if
1425 * needed. It should only be called, if this is not an interrupt.
1426 * Returns the new exception number.
1427 */
1428static int check_exception(int intno, int *error_code)
1429{
1430 int first_contributory = env->old_exception == 0 ||
1431 (env->old_exception >= 10 &&
1432 env->old_exception <= 13);
1433 int second_contributory = intno == 0 ||
1434 (intno >= 10 && intno <= 13);
1435
93fcfe39 1436 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
eaa728ee
FB
1437 env->old_exception, intno);
1438
f55761a0
AL
1439#if !defined(CONFIG_USER_ONLY)
1440 if (env->old_exception == EXCP08_DBLE) {
1441 if (env->hflags & HF_SVMI_MASK)
1442 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1443
680c3069 1444 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
f55761a0
AL
1445
1446 qemu_system_reset_request();
1447 return EXCP_HLT;
1448 }
1449#endif
eaa728ee
FB
1450
1451 if ((first_contributory && second_contributory)
1452 || (env->old_exception == EXCP0E_PAGE &&
1453 (second_contributory || (intno == EXCP0E_PAGE)))) {
1454 intno = EXCP08_DBLE;
1455 *error_code = 0;
1456 }
1457
1458 if (second_contributory || (intno == EXCP0E_PAGE) ||
1459 (intno == EXCP08_DBLE))
1460 env->old_exception = intno;
1461
1462 return intno;
1463}
1464
1465/*
1466 * Signal an interruption. It is executed in the main CPU loop.
1467 * is_int is TRUE if coming from the int instruction. next_eip is the
1468 * EIP value AFTER the interrupt instruction. It is only relevant if
1469 * is_int is TRUE.
1470 */
a5e50b26 1471static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1472 int next_eip_addend)
eaa728ee
FB
1473{
1474 if (!is_int) {
1475 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1476 intno = check_exception(intno, &error_code);
872929aa
FB
1477 } else {
1478 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1479 }
1480
1481 env->exception_index = intno;
1482 env->error_code = error_code;
1483 env->exception_is_int = is_int;
1484 env->exception_next_eip = env->eip + next_eip_addend;
1162c041 1485 cpu_loop_exit(env);
eaa728ee
FB
1486}
1487
eaa728ee
FB
1488/* shortcuts to generate exceptions */
1489
3e457172
BS
1490static void QEMU_NORETURN raise_exception_err(int exception_index,
1491 int error_code)
1492{
1493 raise_interrupt(exception_index, 0, error_code, 0);
1494}
1495
1496void raise_exception_err_env(CPUState *nenv, int exception_index,
1497 int error_code)
eaa728ee 1498{
3e457172 1499 env = nenv;
eaa728ee
FB
1500 raise_interrupt(exception_index, 0, error_code, 0);
1501}
1502
3e457172 1503static void QEMU_NORETURN raise_exception(int exception_index)
eaa728ee
FB
1504{
1505 raise_interrupt(exception_index, 0, 0, 0);
1506}
1507
63a54736
JW
1508void raise_exception_env(int exception_index, CPUState *nenv)
1509{
1510 env = nenv;
1511 raise_exception(exception_index);
1512}
eaa728ee
FB
1513/* SMM support */
1514
1515#if defined(CONFIG_USER_ONLY)
1516
e694d4e2 1517void do_smm_enter(CPUState *env1)
eaa728ee
FB
1518{
1519}
1520
1521void helper_rsm(void)
1522{
1523}
1524
1525#else
1526
1527#ifdef TARGET_X86_64
1528#define SMM_REVISION_ID 0x00020064
1529#else
1530#define SMM_REVISION_ID 0x00020000
1531#endif
1532
e694d4e2 1533void do_smm_enter(CPUState *env1)
eaa728ee
FB
1534{
1535 target_ulong sm_state;
1536 SegmentCache *dt;
1537 int i, offset;
e694d4e2
BS
1538 CPUState *saved_env;
1539
1540 saved_env = env;
1541 env = env1;
eaa728ee 1542
93fcfe39
AL
1543 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1544 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1545
1546 env->hflags |= HF_SMM_MASK;
1547 cpu_smm_update(env);
1548
1549 sm_state = env->smbase + 0x8000;
1550
1551#ifdef TARGET_X86_64
1552 for(i = 0; i < 6; i++) {
1553 dt = &env->segs[i];
1554 offset = 0x7e00 + i * 16;
1555 stw_phys(sm_state + offset, dt->selector);
1556 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1557 stl_phys(sm_state + offset + 4, dt->limit);
1558 stq_phys(sm_state + offset + 8, dt->base);
1559 }
1560
1561 stq_phys(sm_state + 0x7e68, env->gdt.base);
1562 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1563
1564 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1565 stq_phys(sm_state + 0x7e78, env->ldt.base);
1566 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1567 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1568
1569 stq_phys(sm_state + 0x7e88, env->idt.base);
1570 stl_phys(sm_state + 0x7e84, env->idt.limit);
1571
1572 stw_phys(sm_state + 0x7e90, env->tr.selector);
1573 stq_phys(sm_state + 0x7e98, env->tr.base);
1574 stl_phys(sm_state + 0x7e94, env->tr.limit);
1575 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1576
1577 stq_phys(sm_state + 0x7ed0, env->efer);
1578
1579 stq_phys(sm_state + 0x7ff8, EAX);
1580 stq_phys(sm_state + 0x7ff0, ECX);
1581 stq_phys(sm_state + 0x7fe8, EDX);
1582 stq_phys(sm_state + 0x7fe0, EBX);
1583 stq_phys(sm_state + 0x7fd8, ESP);
1584 stq_phys(sm_state + 0x7fd0, EBP);
1585 stq_phys(sm_state + 0x7fc8, ESI);
1586 stq_phys(sm_state + 0x7fc0, EDI);
1587 for(i = 8; i < 16; i++)
1588 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1589 stq_phys(sm_state + 0x7f78, env->eip);
1590 stl_phys(sm_state + 0x7f70, compute_eflags());
1591 stl_phys(sm_state + 0x7f68, env->dr[6]);
1592 stl_phys(sm_state + 0x7f60, env->dr[7]);
1593
1594 stl_phys(sm_state + 0x7f48, env->cr[4]);
1595 stl_phys(sm_state + 0x7f50, env->cr[3]);
1596 stl_phys(sm_state + 0x7f58, env->cr[0]);
1597
1598 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1599 stl_phys(sm_state + 0x7f00, env->smbase);
1600#else
1601 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1602 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1603 stl_phys(sm_state + 0x7ff4, compute_eflags());
1604 stl_phys(sm_state + 0x7ff0, env->eip);
1605 stl_phys(sm_state + 0x7fec, EDI);
1606 stl_phys(sm_state + 0x7fe8, ESI);
1607 stl_phys(sm_state + 0x7fe4, EBP);
1608 stl_phys(sm_state + 0x7fe0, ESP);
1609 stl_phys(sm_state + 0x7fdc, EBX);
1610 stl_phys(sm_state + 0x7fd8, EDX);
1611 stl_phys(sm_state + 0x7fd4, ECX);
1612 stl_phys(sm_state + 0x7fd0, EAX);
1613 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1614 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1615
1616 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1617 stl_phys(sm_state + 0x7f64, env->tr.base);
1618 stl_phys(sm_state + 0x7f60, env->tr.limit);
1619 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1620
1621 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1622 stl_phys(sm_state + 0x7f80, env->ldt.base);
1623 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1624 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1625
1626 stl_phys(sm_state + 0x7f74, env->gdt.base);
1627 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1628
1629 stl_phys(sm_state + 0x7f58, env->idt.base);
1630 stl_phys(sm_state + 0x7f54, env->idt.limit);
1631
1632 for(i = 0; i < 6; i++) {
1633 dt = &env->segs[i];
1634 if (i < 3)
1635 offset = 0x7f84 + i * 12;
1636 else
1637 offset = 0x7f2c + (i - 3) * 12;
1638 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1639 stl_phys(sm_state + offset + 8, dt->base);
1640 stl_phys(sm_state + offset + 4, dt->limit);
1641 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1642 }
1643 stl_phys(sm_state + 0x7f14, env->cr[4]);
1644
1645 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1646 stl_phys(sm_state + 0x7ef8, env->smbase);
1647#endif
1648 /* init SMM cpu state */
1649
1650#ifdef TARGET_X86_64
5efc27bb 1651 cpu_load_efer(env, 0);
eaa728ee
FB
1652#endif
1653 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1654 env->eip = 0x00008000;
1655 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1656 0xffffffff, 0);
1657 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1658 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1659 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1660 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1661 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1662
1663 cpu_x86_update_cr0(env,
1664 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1665 cpu_x86_update_cr4(env, 0);
1666 env->dr[7] = 0x00000400;
1667 CC_OP = CC_OP_EFLAGS;
e694d4e2 1668 env = saved_env;
eaa728ee
FB
1669}
1670
1671void helper_rsm(void)
1672{
1673 target_ulong sm_state;
1674 int i, offset;
1675 uint32_t val;
1676
1677 sm_state = env->smbase + 0x8000;
1678#ifdef TARGET_X86_64
5efc27bb 1679 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1680
1681 for(i = 0; i < 6; i++) {
1682 offset = 0x7e00 + i * 16;
1683 cpu_x86_load_seg_cache(env, i,
1684 lduw_phys(sm_state + offset),
1685 ldq_phys(sm_state + offset + 8),
1686 ldl_phys(sm_state + offset + 4),
1687 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1688 }
1689
1690 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1691 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1692
1693 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1694 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1695 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1696 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1697
1698 env->idt.base = ldq_phys(sm_state + 0x7e88);
1699 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1700
1701 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1702 env->tr.base = ldq_phys(sm_state + 0x7e98);
1703 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1704 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1705
1706 EAX = ldq_phys(sm_state + 0x7ff8);
1707 ECX = ldq_phys(sm_state + 0x7ff0);
1708 EDX = ldq_phys(sm_state + 0x7fe8);
1709 EBX = ldq_phys(sm_state + 0x7fe0);
1710 ESP = ldq_phys(sm_state + 0x7fd8);
1711 EBP = ldq_phys(sm_state + 0x7fd0);
1712 ESI = ldq_phys(sm_state + 0x7fc8);
1713 EDI = ldq_phys(sm_state + 0x7fc0);
1714 for(i = 8; i < 16; i++)
1715 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1716 env->eip = ldq_phys(sm_state + 0x7f78);
1717 load_eflags(ldl_phys(sm_state + 0x7f70),
1718 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1719 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1720 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1721
1722 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1723 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1724 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1725
1726 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1727 if (val & 0x20000) {
1728 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1729 }
1730#else
1731 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1732 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1733 load_eflags(ldl_phys(sm_state + 0x7ff4),
1734 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1735 env->eip = ldl_phys(sm_state + 0x7ff0);
1736 EDI = ldl_phys(sm_state + 0x7fec);
1737 ESI = ldl_phys(sm_state + 0x7fe8);
1738 EBP = ldl_phys(sm_state + 0x7fe4);
1739 ESP = ldl_phys(sm_state + 0x7fe0);
1740 EBX = ldl_phys(sm_state + 0x7fdc);
1741 EDX = ldl_phys(sm_state + 0x7fd8);
1742 ECX = ldl_phys(sm_state + 0x7fd4);
1743 EAX = ldl_phys(sm_state + 0x7fd0);
1744 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1745 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1746
1747 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1748 env->tr.base = ldl_phys(sm_state + 0x7f64);
1749 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1750 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1751
1752 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1753 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1754 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1755 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1756
1757 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1758 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1759
1760 env->idt.base = ldl_phys(sm_state + 0x7f58);
1761 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1762
1763 for(i = 0; i < 6; i++) {
1764 if (i < 3)
1765 offset = 0x7f84 + i * 12;
1766 else
1767 offset = 0x7f2c + (i - 3) * 12;
1768 cpu_x86_load_seg_cache(env, i,
1769 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1770 ldl_phys(sm_state + offset + 8),
1771 ldl_phys(sm_state + offset + 4),
1772 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1773 }
1774 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1775
1776 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1777 if (val & 0x20000) {
1778 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1779 }
1780#endif
1781 CC_OP = CC_OP_EFLAGS;
1782 env->hflags &= ~HF_SMM_MASK;
1783 cpu_smm_update(env);
1784
93fcfe39
AL
1785 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1786 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1787}
1788
1789#endif /* !CONFIG_USER_ONLY */
1790
1791
1792/* division, flags are undefined */
1793
1794void helper_divb_AL(target_ulong t0)
1795{
1796 unsigned int num, den, q, r;
1797
1798 num = (EAX & 0xffff);
1799 den = (t0 & 0xff);
1800 if (den == 0) {
1801 raise_exception(EXCP00_DIVZ);
1802 }
1803 q = (num / den);
1804 if (q > 0xff)
1805 raise_exception(EXCP00_DIVZ);
1806 q &= 0xff;
1807 r = (num % den) & 0xff;
1808 EAX = (EAX & ~0xffff) | (r << 8) | q;
1809}
1810
1811void helper_idivb_AL(target_ulong t0)
1812{
1813 int num, den, q, r;
1814
1815 num = (int16_t)EAX;
1816 den = (int8_t)t0;
1817 if (den == 0) {
1818 raise_exception(EXCP00_DIVZ);
1819 }
1820 q = (num / den);
1821 if (q != (int8_t)q)
1822 raise_exception(EXCP00_DIVZ);
1823 q &= 0xff;
1824 r = (num % den) & 0xff;
1825 EAX = (EAX & ~0xffff) | (r << 8) | q;
1826}
1827
1828void helper_divw_AX(target_ulong t0)
1829{
1830 unsigned int num, den, q, r;
1831
1832 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1833 den = (t0 & 0xffff);
1834 if (den == 0) {
1835 raise_exception(EXCP00_DIVZ);
1836 }
1837 q = (num / den);
1838 if (q > 0xffff)
1839 raise_exception(EXCP00_DIVZ);
1840 q &= 0xffff;
1841 r = (num % den) & 0xffff;
1842 EAX = (EAX & ~0xffff) | q;
1843 EDX = (EDX & ~0xffff) | r;
1844}
1845
1846void helper_idivw_AX(target_ulong t0)
1847{
1848 int num, den, q, r;
1849
1850 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1851 den = (int16_t)t0;
1852 if (den == 0) {
1853 raise_exception(EXCP00_DIVZ);
1854 }
1855 q = (num / den);
1856 if (q != (int16_t)q)
1857 raise_exception(EXCP00_DIVZ);
1858 q &= 0xffff;
1859 r = (num % den) & 0xffff;
1860 EAX = (EAX & ~0xffff) | q;
1861 EDX = (EDX & ~0xffff) | r;
1862}
1863
1864void helper_divl_EAX(target_ulong t0)
1865{
1866 unsigned int den, r;
1867 uint64_t num, q;
1868
1869 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1870 den = t0;
1871 if (den == 0) {
1872 raise_exception(EXCP00_DIVZ);
1873 }
1874 q = (num / den);
1875 r = (num % den);
1876 if (q > 0xffffffff)
1877 raise_exception(EXCP00_DIVZ);
1878 EAX = (uint32_t)q;
1879 EDX = (uint32_t)r;
1880}
1881
1882void helper_idivl_EAX(target_ulong t0)
1883{
1884 int den, r;
1885 int64_t num, q;
1886
1887 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1888 den = t0;
1889 if (den == 0) {
1890 raise_exception(EXCP00_DIVZ);
1891 }
1892 q = (num / den);
1893 r = (num % den);
1894 if (q != (int32_t)q)
1895 raise_exception(EXCP00_DIVZ);
1896 EAX = (uint32_t)q;
1897 EDX = (uint32_t)r;
1898}
1899
1900/* bcd */
1901
1902/* XXX: exception */
1903void helper_aam(int base)
1904{
1905 int al, ah;
1906 al = EAX & 0xff;
1907 ah = al / base;
1908 al = al % base;
1909 EAX = (EAX & ~0xffff) | al | (ah << 8);
1910 CC_DST = al;
1911}
1912
1913void helper_aad(int base)
1914{
1915 int al, ah;
1916 al = EAX & 0xff;
1917 ah = (EAX >> 8) & 0xff;
1918 al = ((ah * base) + al) & 0xff;
1919 EAX = (EAX & ~0xffff) | al;
1920 CC_DST = al;
1921}
1922
1923void helper_aaa(void)
1924{
1925 int icarry;
1926 int al, ah, af;
1927 int eflags;
1928
a7812ae4 1929 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1930 af = eflags & CC_A;
1931 al = EAX & 0xff;
1932 ah = (EAX >> 8) & 0xff;
1933
1934 icarry = (al > 0xf9);
1935 if (((al & 0x0f) > 9 ) || af) {
1936 al = (al + 6) & 0x0f;
1937 ah = (ah + 1 + icarry) & 0xff;
1938 eflags |= CC_C | CC_A;
1939 } else {
1940 eflags &= ~(CC_C | CC_A);
1941 al &= 0x0f;
1942 }
1943 EAX = (EAX & ~0xffff) | al | (ah << 8);
1944 CC_SRC = eflags;
eaa728ee
FB
1945}
1946
1947void helper_aas(void)
1948{
1949 int icarry;
1950 int al, ah, af;
1951 int eflags;
1952
a7812ae4 1953 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1954 af = eflags & CC_A;
1955 al = EAX & 0xff;
1956 ah = (EAX >> 8) & 0xff;
1957
1958 icarry = (al < 6);
1959 if (((al & 0x0f) > 9 ) || af) {
1960 al = (al - 6) & 0x0f;
1961 ah = (ah - 1 - icarry) & 0xff;
1962 eflags |= CC_C | CC_A;
1963 } else {
1964 eflags &= ~(CC_C | CC_A);
1965 al &= 0x0f;
1966 }
1967 EAX = (EAX & ~0xffff) | al | (ah << 8);
1968 CC_SRC = eflags;
eaa728ee
FB
1969}
1970
1971void helper_daa(void)
1972{
c6bfc164 1973 int old_al, al, af, cf;
eaa728ee
FB
1974 int eflags;
1975
a7812ae4 1976 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1977 cf = eflags & CC_C;
1978 af = eflags & CC_A;
c6bfc164 1979 old_al = al = EAX & 0xff;
eaa728ee
FB
1980
1981 eflags = 0;
1982 if (((al & 0x0f) > 9 ) || af) {
1983 al = (al + 6) & 0xff;
1984 eflags |= CC_A;
1985 }
c6bfc164 1986 if ((old_al > 0x99) || cf) {
eaa728ee
FB
1987 al = (al + 0x60) & 0xff;
1988 eflags |= CC_C;
1989 }
1990 EAX = (EAX & ~0xff) | al;
1991 /* well, speed is not an issue here, so we compute the flags by hand */
1992 eflags |= (al == 0) << 6; /* zf */
1993 eflags |= parity_table[al]; /* pf */
1994 eflags |= (al & 0x80); /* sf */
1995 CC_SRC = eflags;
eaa728ee
FB
1996}
1997
1998void helper_das(void)
1999{
2000 int al, al1, af, cf;
2001 int eflags;
2002
a7812ae4 2003 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2004 cf = eflags & CC_C;
2005 af = eflags & CC_A;
2006 al = EAX & 0xff;
2007
2008 eflags = 0;
2009 al1 = al;
2010 if (((al & 0x0f) > 9 ) || af) {
2011 eflags |= CC_A;
2012 if (al < 6 || cf)
2013 eflags |= CC_C;
2014 al = (al - 6) & 0xff;
2015 }
2016 if ((al1 > 0x99) || cf) {
2017 al = (al - 0x60) & 0xff;
2018 eflags |= CC_C;
2019 }
2020 EAX = (EAX & ~0xff) | al;
2021 /* well, speed is not an issue here, so we compute the flags by hand */
2022 eflags |= (al == 0) << 6; /* zf */
2023 eflags |= parity_table[al]; /* pf */
2024 eflags |= (al & 0x80); /* sf */
2025 CC_SRC = eflags;
eaa728ee
FB
2026}
2027
2028void helper_into(int next_eip_addend)
2029{
2030 int eflags;
a7812ae4 2031 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2032 if (eflags & CC_O) {
2033 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2034 }
2035}
2036
2037void helper_cmpxchg8b(target_ulong a0)
2038{
2039 uint64_t d;
2040 int eflags;
2041
a7812ae4 2042 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2043 d = ldq(a0);
2044 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2045 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2046 eflags |= CC_Z;
2047 } else {
278ed7c3
FB
2048 /* always do the store */
2049 stq(a0, d);
eaa728ee
FB
2050 EDX = (uint32_t)(d >> 32);
2051 EAX = (uint32_t)d;
2052 eflags &= ~CC_Z;
2053 }
2054 CC_SRC = eflags;
2055}
2056
2057#ifdef TARGET_X86_64
2058void helper_cmpxchg16b(target_ulong a0)
2059{
2060 uint64_t d0, d1;
2061 int eflags;
2062
278ed7c3
FB
2063 if ((a0 & 0xf) != 0)
2064 raise_exception(EXCP0D_GPF);
a7812ae4 2065 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2066 d0 = ldq(a0);
2067 d1 = ldq(a0 + 8);
2068 if (d0 == EAX && d1 == EDX) {
2069 stq(a0, EBX);
2070 stq(a0 + 8, ECX);
2071 eflags |= CC_Z;
2072 } else {
278ed7c3
FB
2073 /* always do the store */
2074 stq(a0, d0);
2075 stq(a0 + 8, d1);
eaa728ee
FB
2076 EDX = d1;
2077 EAX = d0;
2078 eflags &= ~CC_Z;
2079 }
2080 CC_SRC = eflags;
2081}
2082#endif
2083
2084void helper_single_step(void)
2085{
01df040b
AL
2086#ifndef CONFIG_USER_ONLY
2087 check_hw_breakpoints(env, 1);
2088 env->dr[6] |= DR6_BS;
2089#endif
2090 raise_exception(EXCP01_DB);
eaa728ee
FB
2091}
2092
2093void helper_cpuid(void)
2094{
6fd805e1 2095 uint32_t eax, ebx, ecx, edx;
eaa728ee 2096
872929aa 2097 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 2098
e00b6f80 2099 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
2100 EAX = eax;
2101 EBX = ebx;
2102 ECX = ecx;
2103 EDX = edx;
eaa728ee
FB
2104}
2105
2106void helper_enter_level(int level, int data32, target_ulong t1)
2107{
2108 target_ulong ssp;
2109 uint32_t esp_mask, esp, ebp;
2110
2111 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2112 ssp = env->segs[R_SS].base;
2113 ebp = EBP;
2114 esp = ESP;
2115 if (data32) {
2116 /* 32 bit */
2117 esp -= 4;
2118 while (--level) {
2119 esp -= 4;
2120 ebp -= 4;
2121 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2122 }
2123 esp -= 4;
2124 stl(ssp + (esp & esp_mask), t1);
2125 } else {
2126 /* 16 bit */
2127 esp -= 2;
2128 while (--level) {
2129 esp -= 2;
2130 ebp -= 2;
2131 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2132 }
2133 esp -= 2;
2134 stw(ssp + (esp & esp_mask), t1);
2135 }
2136}
2137
2138#ifdef TARGET_X86_64
2139void helper_enter64_level(int level, int data64, target_ulong t1)
2140{
2141 target_ulong esp, ebp;
2142 ebp = EBP;
2143 esp = ESP;
2144
2145 if (data64) {
2146 /* 64 bit */
2147 esp -= 8;
2148 while (--level) {
2149 esp -= 8;
2150 ebp -= 8;
2151 stq(esp, ldq(ebp));
2152 }
2153 esp -= 8;
2154 stq(esp, t1);
2155 } else {
2156 /* 16 bit */
2157 esp -= 2;
2158 while (--level) {
2159 esp -= 2;
2160 ebp -= 2;
2161 stw(esp, lduw(ebp));
2162 }
2163 esp -= 2;
2164 stw(esp, t1);
2165 }
2166}
2167#endif
2168
2169void helper_lldt(int selector)
2170{
2171 SegmentCache *dt;
2172 uint32_t e1, e2;
2173 int index, entry_limit;
2174 target_ulong ptr;
2175
2176 selector &= 0xffff;
2177 if ((selector & 0xfffc) == 0) {
2178 /* XXX: NULL selector case: invalid LDT */
2179 env->ldt.base = 0;
2180 env->ldt.limit = 0;
2181 } else {
2182 if (selector & 0x4)
2183 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2184 dt = &env->gdt;
2185 index = selector & ~7;
2186#ifdef TARGET_X86_64
2187 if (env->hflags & HF_LMA_MASK)
2188 entry_limit = 15;
2189 else
2190#endif
2191 entry_limit = 7;
2192 if ((index + entry_limit) > dt->limit)
2193 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2194 ptr = dt->base + index;
2195 e1 = ldl_kernel(ptr);
2196 e2 = ldl_kernel(ptr + 4);
2197 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2198 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2199 if (!(e2 & DESC_P_MASK))
2200 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2201#ifdef TARGET_X86_64
2202 if (env->hflags & HF_LMA_MASK) {
2203 uint32_t e3;
2204 e3 = ldl_kernel(ptr + 8);
2205 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2206 env->ldt.base |= (target_ulong)e3 << 32;
2207 } else
2208#endif
2209 {
2210 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2211 }
2212 }
2213 env->ldt.selector = selector;
2214}
2215
2216void helper_ltr(int selector)
2217{
2218 SegmentCache *dt;
2219 uint32_t e1, e2;
2220 int index, type, entry_limit;
2221 target_ulong ptr;
2222
2223 selector &= 0xffff;
2224 if ((selector & 0xfffc) == 0) {
2225 /* NULL selector case: invalid TR */
2226 env->tr.base = 0;
2227 env->tr.limit = 0;
2228 env->tr.flags = 0;
2229 } else {
2230 if (selector & 0x4)
2231 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2232 dt = &env->gdt;
2233 index = selector & ~7;
2234#ifdef TARGET_X86_64
2235 if (env->hflags & HF_LMA_MASK)
2236 entry_limit = 15;
2237 else
2238#endif
2239 entry_limit = 7;
2240 if ((index + entry_limit) > dt->limit)
2241 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2242 ptr = dt->base + index;
2243 e1 = ldl_kernel(ptr);
2244 e2 = ldl_kernel(ptr + 4);
2245 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2246 if ((e2 & DESC_S_MASK) ||
2247 (type != 1 && type != 9))
2248 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2249 if (!(e2 & DESC_P_MASK))
2250 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2251#ifdef TARGET_X86_64
2252 if (env->hflags & HF_LMA_MASK) {
2253 uint32_t e3, e4;
2254 e3 = ldl_kernel(ptr + 8);
2255 e4 = ldl_kernel(ptr + 12);
2256 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2257 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2258 load_seg_cache_raw_dt(&env->tr, e1, e2);
2259 env->tr.base |= (target_ulong)e3 << 32;
2260 } else
2261#endif
2262 {
2263 load_seg_cache_raw_dt(&env->tr, e1, e2);
2264 }
2265 e2 |= DESC_TSS_BUSY_MASK;
2266 stl_kernel(ptr + 4, e2);
2267 }
2268 env->tr.selector = selector;
2269}
2270
2271/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2272void helper_load_seg(int seg_reg, int selector)
2273{
2274 uint32_t e1, e2;
2275 int cpl, dpl, rpl;
2276 SegmentCache *dt;
2277 int index;
2278 target_ulong ptr;
2279
2280 selector &= 0xffff;
2281 cpl = env->hflags & HF_CPL_MASK;
2282 if ((selector & 0xfffc) == 0) {
2283 /* null selector case */
2284 if (seg_reg == R_SS
2285#ifdef TARGET_X86_64
2286 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2287#endif
2288 )
2289 raise_exception_err(EXCP0D_GPF, 0);
2290 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2291 } else {
2292
2293 if (selector & 0x4)
2294 dt = &env->ldt;
2295 else
2296 dt = &env->gdt;
2297 index = selector & ~7;
2298 if ((index + 7) > dt->limit)
2299 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2300 ptr = dt->base + index;
2301 e1 = ldl_kernel(ptr);
2302 e2 = ldl_kernel(ptr + 4);
2303
2304 if (!(e2 & DESC_S_MASK))
2305 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2306 rpl = selector & 3;
2307 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2308 if (seg_reg == R_SS) {
2309 /* must be writable segment */
2310 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2311 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2312 if (rpl != cpl || dpl != cpl)
2313 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2314 } else {
2315 /* must be readable segment */
2316 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2317 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2318
2319 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2320 /* if not conforming code, test rights */
2321 if (dpl < cpl || dpl < rpl)
2322 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2323 }
2324 }
2325
2326 if (!(e2 & DESC_P_MASK)) {
2327 if (seg_reg == R_SS)
2328 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2329 else
2330 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2331 }
2332
2333 /* set the access bit if not already set */
2334 if (!(e2 & DESC_A_MASK)) {
2335 e2 |= DESC_A_MASK;
2336 stl_kernel(ptr + 4, e2);
2337 }
2338
2339 cpu_x86_load_seg_cache(env, seg_reg, selector,
2340 get_seg_base(e1, e2),
2341 get_seg_limit(e1, e2),
2342 e2);
2343#if 0
93fcfe39 2344 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2345 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2346#endif
2347 }
2348}
2349
2350/* protected mode jump */
2351void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2352 int next_eip_addend)
2353{
2354 int gate_cs, type;
2355 uint32_t e1, e2, cpl, dpl, rpl, limit;
2356 target_ulong next_eip;
2357
2358 if ((new_cs & 0xfffc) == 0)
2359 raise_exception_err(EXCP0D_GPF, 0);
2360 if (load_segment(&e1, &e2, new_cs) != 0)
2361 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2362 cpl = env->hflags & HF_CPL_MASK;
2363 if (e2 & DESC_S_MASK) {
2364 if (!(e2 & DESC_CS_MASK))
2365 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2366 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2367 if (e2 & DESC_C_MASK) {
2368 /* conforming code segment */
2369 if (dpl > cpl)
2370 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2371 } else {
2372 /* non conforming code segment */
2373 rpl = new_cs & 3;
2374 if (rpl > cpl)
2375 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2376 if (dpl != cpl)
2377 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2378 }
2379 if (!(e2 & DESC_P_MASK))
2380 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2381 limit = get_seg_limit(e1, e2);
2382 if (new_eip > limit &&
2383 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2384 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2385 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2386 get_seg_base(e1, e2), limit, e2);
2387 EIP = new_eip;
2388 } else {
2389 /* jump to call or task gate */
2390 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391 rpl = new_cs & 3;
2392 cpl = env->hflags & HF_CPL_MASK;
2393 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2394 switch(type) {
2395 case 1: /* 286 TSS */
2396 case 9: /* 386 TSS */
2397 case 5: /* task gate */
2398 if (dpl < cpl || dpl < rpl)
2399 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2400 next_eip = env->eip + next_eip_addend;
2401 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2402 CC_OP = CC_OP_EFLAGS;
2403 break;
2404 case 4: /* 286 call gate */
2405 case 12: /* 386 call gate */
2406 if ((dpl < cpl) || (dpl < rpl))
2407 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2408 if (!(e2 & DESC_P_MASK))
2409 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2410 gate_cs = e1 >> 16;
2411 new_eip = (e1 & 0xffff);
2412 if (type == 12)
2413 new_eip |= (e2 & 0xffff0000);
2414 if (load_segment(&e1, &e2, gate_cs) != 0)
2415 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2416 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2417 /* must be code segment */
2418 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2419 (DESC_S_MASK | DESC_CS_MASK)))
2420 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2421 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2422 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2423 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2424 if (!(e2 & DESC_P_MASK))
2425 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2426 limit = get_seg_limit(e1, e2);
2427 if (new_eip > limit)
2428 raise_exception_err(EXCP0D_GPF, 0);
2429 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2430 get_seg_base(e1, e2), limit, e2);
2431 EIP = new_eip;
2432 break;
2433 default:
2434 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2435 break;
2436 }
2437 }
2438}
2439
2440/* real mode call */
2441void helper_lcall_real(int new_cs, target_ulong new_eip1,
2442 int shift, int next_eip)
2443{
2444 int new_eip;
2445 uint32_t esp, esp_mask;
2446 target_ulong ssp;
2447
2448 new_eip = new_eip1;
2449 esp = ESP;
2450 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2451 ssp = env->segs[R_SS].base;
2452 if (shift) {
2453 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2454 PUSHL(ssp, esp, esp_mask, next_eip);
2455 } else {
2456 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2457 PUSHW(ssp, esp, esp_mask, next_eip);
2458 }
2459
2460 SET_ESP(esp, esp_mask);
2461 env->eip = new_eip;
2462 env->segs[R_CS].selector = new_cs;
2463 env->segs[R_CS].base = (new_cs << 4);
2464}
2465
2466/* protected mode call */
2467void helper_lcall_protected(int new_cs, target_ulong new_eip,
2468 int shift, int next_eip_addend)
2469{
2470 int new_stack, i;
2471 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2472 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2473 uint32_t val, limit, old_sp_mask;
2474 target_ulong ssp, old_ssp, next_eip;
2475
2476 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2477 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2478 LOG_PCALL_STATE(env);
eaa728ee
FB
2479 if ((new_cs & 0xfffc) == 0)
2480 raise_exception_err(EXCP0D_GPF, 0);
2481 if (load_segment(&e1, &e2, new_cs) != 0)
2482 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2483 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2484 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee
FB
2485 if (e2 & DESC_S_MASK) {
2486 if (!(e2 & DESC_CS_MASK))
2487 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2488 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2489 if (e2 & DESC_C_MASK) {
2490 /* conforming code segment */
2491 if (dpl > cpl)
2492 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2493 } else {
2494 /* non conforming code segment */
2495 rpl = new_cs & 3;
2496 if (rpl > cpl)
2497 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2498 if (dpl != cpl)
2499 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2500 }
2501 if (!(e2 & DESC_P_MASK))
2502 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2503
2504#ifdef TARGET_X86_64
2505 /* XXX: check 16/32 bit cases in long mode */
2506 if (shift == 2) {
2507 target_ulong rsp;
2508 /* 64 bit case */
2509 rsp = ESP;
2510 PUSHQ(rsp, env->segs[R_CS].selector);
2511 PUSHQ(rsp, next_eip);
2512 /* from this point, not restartable */
2513 ESP = rsp;
2514 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2515 get_seg_base(e1, e2),
2516 get_seg_limit(e1, e2), e2);
2517 EIP = new_eip;
2518 } else
2519#endif
2520 {
2521 sp = ESP;
2522 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2523 ssp = env->segs[R_SS].base;
2524 if (shift) {
2525 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2526 PUSHL(ssp, sp, sp_mask, next_eip);
2527 } else {
2528 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2529 PUSHW(ssp, sp, sp_mask, next_eip);
2530 }
2531
2532 limit = get_seg_limit(e1, e2);
2533 if (new_eip > limit)
2534 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2535 /* from this point, not restartable */
2536 SET_ESP(sp, sp_mask);
2537 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2538 get_seg_base(e1, e2), limit, e2);
2539 EIP = new_eip;
2540 }
2541 } else {
2542 /* check gate type */
2543 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2544 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2545 rpl = new_cs & 3;
2546 switch(type) {
2547 case 1: /* available 286 TSS */
2548 case 9: /* available 386 TSS */
2549 case 5: /* task gate */
2550 if (dpl < cpl || dpl < rpl)
2551 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2552 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2553 CC_OP = CC_OP_EFLAGS;
2554 return;
2555 case 4: /* 286 call gate */
2556 case 12: /* 386 call gate */
2557 break;
2558 default:
2559 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2560 break;
2561 }
2562 shift = type >> 3;
2563
2564 if (dpl < cpl || dpl < rpl)
2565 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2566 /* check valid bit */
2567 if (!(e2 & DESC_P_MASK))
2568 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2569 selector = e1 >> 16;
2570 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2571 param_count = e2 & 0x1f;
2572 if ((selector & 0xfffc) == 0)
2573 raise_exception_err(EXCP0D_GPF, 0);
2574
2575 if (load_segment(&e1, &e2, selector) != 0)
2576 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2577 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2578 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2579 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2580 if (dpl > cpl)
2581 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2582 if (!(e2 & DESC_P_MASK))
2583 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2584
2585 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2586 /* to inner privilege */
2587 get_ss_esp_from_tss(&ss, &sp, dpl);
d12d51d5 2588 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
eaa728ee 2589 ss, sp, param_count, ESP);
eaa728ee
FB
2590 if ((ss & 0xfffc) == 0)
2591 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2592 if ((ss & 3) != dpl)
2593 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2594 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2595 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2596 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2597 if (ss_dpl != dpl)
2598 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2599 if (!(ss_e2 & DESC_S_MASK) ||
2600 (ss_e2 & DESC_CS_MASK) ||
2601 !(ss_e2 & DESC_W_MASK))
2602 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2603 if (!(ss_e2 & DESC_P_MASK))
2604 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2605
2606 // push_size = ((param_count * 2) + 8) << shift;
2607
2608 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2609 old_ssp = env->segs[R_SS].base;
2610
2611 sp_mask = get_sp_mask(ss_e2);
2612 ssp = get_seg_base(ss_e1, ss_e2);
2613 if (shift) {
2614 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2615 PUSHL(ssp, sp, sp_mask, ESP);
2616 for(i = param_count - 1; i >= 0; i--) {
2617 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2618 PUSHL(ssp, sp, sp_mask, val);
2619 }
2620 } else {
2621 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2622 PUSHW(ssp, sp, sp_mask, ESP);
2623 for(i = param_count - 1; i >= 0; i--) {
2624 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2625 PUSHW(ssp, sp, sp_mask, val);
2626 }
2627 }
2628 new_stack = 1;
2629 } else {
2630 /* to same privilege */
2631 sp = ESP;
2632 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2633 ssp = env->segs[R_SS].base;
2634 // push_size = (4 << shift);
2635 new_stack = 0;
2636 }
2637
2638 if (shift) {
2639 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2640 PUSHL(ssp, sp, sp_mask, next_eip);
2641 } else {
2642 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2643 PUSHW(ssp, sp, sp_mask, next_eip);
2644 }
2645
2646 /* from this point, not restartable */
2647
2648 if (new_stack) {
2649 ss = (ss & ~3) | dpl;
2650 cpu_x86_load_seg_cache(env, R_SS, ss,
2651 ssp,
2652 get_seg_limit(ss_e1, ss_e2),
2653 ss_e2);
2654 }
2655
2656 selector = (selector & ~3) | dpl;
2657 cpu_x86_load_seg_cache(env, R_CS, selector,
2658 get_seg_base(e1, e2),
2659 get_seg_limit(e1, e2),
2660 e2);
2661 cpu_x86_set_cpl(env, dpl);
2662 SET_ESP(sp, sp_mask);
2663 EIP = offset;
2664 }
eaa728ee
FB
2665}
2666
2667/* real and vm86 mode iret */
2668void helper_iret_real(int shift)
2669{
2670 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2671 target_ulong ssp;
2672 int eflags_mask;
2673
2674 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2675 sp = ESP;
2676 ssp = env->segs[R_SS].base;
2677 if (shift == 1) {
2678 /* 32 bits */
2679 POPL(ssp, sp, sp_mask, new_eip);
2680 POPL(ssp, sp, sp_mask, new_cs);
2681 new_cs &= 0xffff;
2682 POPL(ssp, sp, sp_mask, new_eflags);
2683 } else {
2684 /* 16 bits */
2685 POPW(ssp, sp, sp_mask, new_eip);
2686 POPW(ssp, sp, sp_mask, new_cs);
2687 POPW(ssp, sp, sp_mask, new_eflags);
2688 }
2689 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2690 env->segs[R_CS].selector = new_cs;
2691 env->segs[R_CS].base = (new_cs << 4);
eaa728ee
FB
2692 env->eip = new_eip;
2693 if (env->eflags & VM_MASK)
2694 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2695 else
2696 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2697 if (shift == 0)
2698 eflags_mask &= 0xffff;
2699 load_eflags(new_eflags, eflags_mask);
db620f46 2700 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2701}
2702
2703static inline void validate_seg(int seg_reg, int cpl)
2704{
2705 int dpl;
2706 uint32_t e2;
2707
2708 /* XXX: on x86_64, we do not want to nullify FS and GS because
2709 they may still contain a valid base. I would be interested to
2710 know how a real x86_64 CPU behaves */
2711 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2712 (env->segs[seg_reg].selector & 0xfffc) == 0)
2713 return;
2714
2715 e2 = env->segs[seg_reg].flags;
2716 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2717 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2718 /* data or non conforming code segment */
2719 if (dpl < cpl) {
2720 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2721 }
2722 }
2723}
2724
2725/* protected mode iret */
2726static inline void helper_ret_protected(int shift, int is_iret, int addend)
2727{
2728 uint32_t new_cs, new_eflags, new_ss;
2729 uint32_t new_es, new_ds, new_fs, new_gs;
2730 uint32_t e1, e2, ss_e1, ss_e2;
2731 int cpl, dpl, rpl, eflags_mask, iopl;
2732 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2733
2734#ifdef TARGET_X86_64
2735 if (shift == 2)
2736 sp_mask = -1;
2737 else
2738#endif
2739 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2740 sp = ESP;
2741 ssp = env->segs[R_SS].base;
2742 new_eflags = 0; /* avoid warning */
2743#ifdef TARGET_X86_64
2744 if (shift == 2) {
2745 POPQ(sp, new_eip);
2746 POPQ(sp, new_cs);
2747 new_cs &= 0xffff;
2748 if (is_iret) {
2749 POPQ(sp, new_eflags);
2750 }
2751 } else
2752#endif
2753 if (shift == 1) {
2754 /* 32 bits */
2755 POPL(ssp, sp, sp_mask, new_eip);
2756 POPL(ssp, sp, sp_mask, new_cs);
2757 new_cs &= 0xffff;
2758 if (is_iret) {
2759 POPL(ssp, sp, sp_mask, new_eflags);
2760 if (new_eflags & VM_MASK)
2761 goto return_to_vm86;
2762 }
2763 } else {
2764 /* 16 bits */
2765 POPW(ssp, sp, sp_mask, new_eip);
2766 POPW(ssp, sp, sp_mask, new_cs);
2767 if (is_iret)
2768 POPW(ssp, sp, sp_mask, new_eflags);
2769 }
d12d51d5
AL
2770 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2771 new_cs, new_eip, shift, addend);
2772 LOG_PCALL_STATE(env);
eaa728ee
FB
2773 if ((new_cs & 0xfffc) == 0)
2774 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2775 if (load_segment(&e1, &e2, new_cs) != 0)
2776 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2777 if (!(e2 & DESC_S_MASK) ||
2778 !(e2 & DESC_CS_MASK))
2779 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2780 cpl = env->hflags & HF_CPL_MASK;
2781 rpl = new_cs & 3;
2782 if (rpl < cpl)
2783 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2784 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2785 if (e2 & DESC_C_MASK) {
2786 if (dpl > rpl)
2787 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2788 } else {
2789 if (dpl != rpl)
2790 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791 }
2792 if (!(e2 & DESC_P_MASK))
2793 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2794
2795 sp += addend;
2796 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2797 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2798 /* return to same privilege level */
eaa728ee
FB
2799 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2800 get_seg_base(e1, e2),
2801 get_seg_limit(e1, e2),
2802 e2);
2803 } else {
2804 /* return to different privilege level */
2805#ifdef TARGET_X86_64
2806 if (shift == 2) {
2807 POPQ(sp, new_esp);
2808 POPQ(sp, new_ss);
2809 new_ss &= 0xffff;
2810 } else
2811#endif
2812 if (shift == 1) {
2813 /* 32 bits */
2814 POPL(ssp, sp, sp_mask, new_esp);
2815 POPL(ssp, sp, sp_mask, new_ss);
2816 new_ss &= 0xffff;
2817 } else {
2818 /* 16 bits */
2819 POPW(ssp, sp, sp_mask, new_esp);
2820 POPW(ssp, sp, sp_mask, new_ss);
2821 }
d12d51d5 2822 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
eaa728ee 2823 new_ss, new_esp);
eaa728ee
FB
2824 if ((new_ss & 0xfffc) == 0) {
2825#ifdef TARGET_X86_64
2826 /* NULL ss is allowed in long mode if cpl != 3*/
2827 /* XXX: test CS64 ? */
2828 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2829 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2830 0, 0xffffffff,
2831 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2832 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2833 DESC_W_MASK | DESC_A_MASK);
2834 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2835 } else
2836#endif
2837 {
2838 raise_exception_err(EXCP0D_GPF, 0);
2839 }
2840 } else {
2841 if ((new_ss & 3) != rpl)
2842 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2843 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2844 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2845 if (!(ss_e2 & DESC_S_MASK) ||
2846 (ss_e2 & DESC_CS_MASK) ||
2847 !(ss_e2 & DESC_W_MASK))
2848 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2849 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2850 if (dpl != rpl)
2851 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2852 if (!(ss_e2 & DESC_P_MASK))
2853 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2854 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2855 get_seg_base(ss_e1, ss_e2),
2856 get_seg_limit(ss_e1, ss_e2),
2857 ss_e2);
2858 }
2859
2860 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2861 get_seg_base(e1, e2),
2862 get_seg_limit(e1, e2),
2863 e2);
2864 cpu_x86_set_cpl(env, rpl);
2865 sp = new_esp;
2866#ifdef TARGET_X86_64
2867 if (env->hflags & HF_CS64_MASK)
2868 sp_mask = -1;
2869 else
2870#endif
2871 sp_mask = get_sp_mask(ss_e2);
2872
2873 /* validate data segments */
2874 validate_seg(R_ES, rpl);
2875 validate_seg(R_DS, rpl);
2876 validate_seg(R_FS, rpl);
2877 validate_seg(R_GS, rpl);
2878
2879 sp += addend;
2880 }
2881 SET_ESP(sp, sp_mask);
2882 env->eip = new_eip;
2883 if (is_iret) {
2884 /* NOTE: 'cpl' is the _old_ CPL */
2885 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2886 if (cpl == 0)
2887 eflags_mask |= IOPL_MASK;
2888 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2889 if (cpl <= iopl)
2890 eflags_mask |= IF_MASK;
2891 if (shift == 0)
2892 eflags_mask &= 0xffff;
2893 load_eflags(new_eflags, eflags_mask);
2894 }
2895 return;
2896
2897 return_to_vm86:
2898 POPL(ssp, sp, sp_mask, new_esp);
2899 POPL(ssp, sp, sp_mask, new_ss);
2900 POPL(ssp, sp, sp_mask, new_es);
2901 POPL(ssp, sp, sp_mask, new_ds);
2902 POPL(ssp, sp, sp_mask, new_fs);
2903 POPL(ssp, sp, sp_mask, new_gs);
2904
2905 /* modify processor state */
2906 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2907 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2908 load_seg_vm(R_CS, new_cs & 0xffff);
2909 cpu_x86_set_cpl(env, 3);
2910 load_seg_vm(R_SS, new_ss & 0xffff);
2911 load_seg_vm(R_ES, new_es & 0xffff);
2912 load_seg_vm(R_DS, new_ds & 0xffff);
2913 load_seg_vm(R_FS, new_fs & 0xffff);
2914 load_seg_vm(R_GS, new_gs & 0xffff);
2915
2916 env->eip = new_eip & 0xffff;
2917 ESP = new_esp;
2918}
2919
2920void helper_iret_protected(int shift, int next_eip)
2921{
2922 int tss_selector, type;
2923 uint32_t e1, e2;
2924
2925 /* specific case for TSS */
2926 if (env->eflags & NT_MASK) {
2927#ifdef TARGET_X86_64
2928 if (env->hflags & HF_LMA_MASK)
2929 raise_exception_err(EXCP0D_GPF, 0);
2930#endif
2931 tss_selector = lduw_kernel(env->tr.base + 0);
2932 if (tss_selector & 4)
2933 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2934 if (load_segment(&e1, &e2, tss_selector) != 0)
2935 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2936 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2937 /* NOTE: we check both segment and busy TSS */
2938 if (type != 3)
2939 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2940 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2941 } else {
2942 helper_ret_protected(shift, 1, 0);
2943 }
db620f46 2944 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2945}
2946
2947void helper_lret_protected(int shift, int addend)
2948{
2949 helper_ret_protected(shift, 0, addend);
eaa728ee
FB
2950}
2951
2952void helper_sysenter(void)
2953{
2954 if (env->sysenter_cs == 0) {
2955 raise_exception_err(EXCP0D_GPF, 0);
2956 }
2957 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2958 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2959
2960#ifdef TARGET_X86_64
2961 if (env->hflags & HF_LMA_MASK) {
2962 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2963 0, 0xffffffff,
2964 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2965 DESC_S_MASK |
2966 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2967 } else
2968#endif
2969 {
2970 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2971 0, 0xffffffff,
2972 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2973 DESC_S_MASK |
2974 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2975 }
eaa728ee
FB
2976 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2977 0, 0xffffffff,
2978 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2979 DESC_S_MASK |
2980 DESC_W_MASK | DESC_A_MASK);
2981 ESP = env->sysenter_esp;
2982 EIP = env->sysenter_eip;
2983}
2984
2436b61a 2985void helper_sysexit(int dflag)
eaa728ee
FB
2986{
2987 int cpl;
2988
2989 cpl = env->hflags & HF_CPL_MASK;
2990 if (env->sysenter_cs == 0 || cpl != 0) {
2991 raise_exception_err(EXCP0D_GPF, 0);
2992 }
2993 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2994#ifdef TARGET_X86_64
2995 if (dflag == 2) {
2996 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2997 0, 0xffffffff,
2998 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2999 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3000 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3001 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3002 0, 0xffffffff,
3003 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3004 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3005 DESC_W_MASK | DESC_A_MASK);
3006 } else
3007#endif
3008 {
3009 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3010 0, 0xffffffff,
3011 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3012 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3013 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3014 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3015 0, 0xffffffff,
3016 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3017 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3018 DESC_W_MASK | DESC_A_MASK);
3019 }
eaa728ee
FB
3020 ESP = ECX;
3021 EIP = EDX;
eaa728ee
FB
3022}
3023
872929aa
FB
3024#if defined(CONFIG_USER_ONLY)
3025target_ulong helper_read_crN(int reg)
eaa728ee 3026{
872929aa
FB
3027 return 0;
3028}
3029
3030void helper_write_crN(int reg, target_ulong t0)
3031{
3032}
01df040b
AL
3033
3034void helper_movl_drN_T0(int reg, target_ulong t0)
3035{
3036}
872929aa
FB
3037#else
3038target_ulong helper_read_crN(int reg)
3039{
3040 target_ulong val;
3041
3042 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3043 switch(reg) {
3044 default:
3045 val = env->cr[reg];
3046 break;
3047 case 8:
db620f46 3048 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 3049 val = cpu_get_apic_tpr(env->apic_state);
db620f46
FB
3050 } else {
3051 val = env->v_tpr;
3052 }
872929aa
FB
3053 break;
3054 }
3055 return val;
3056}
3057
3058void helper_write_crN(int reg, target_ulong t0)
3059{
3060 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
3061 switch(reg) {
3062 case 0:
3063 cpu_x86_update_cr0(env, t0);
3064 break;
3065 case 3:
3066 cpu_x86_update_cr3(env, t0);
3067 break;
3068 case 4:
3069 cpu_x86_update_cr4(env, t0);
3070 break;
3071 case 8:
db620f46 3072 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 3073 cpu_set_apic_tpr(env->apic_state, t0);
db620f46
FB
3074 }
3075 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
3076 break;
3077 default:
3078 env->cr[reg] = t0;
3079 break;
3080 }
eaa728ee 3081}
01df040b
AL
3082
3083void helper_movl_drN_T0(int reg, target_ulong t0)
3084{
3085 int i;
3086
3087 if (reg < 4) {
3088 hw_breakpoint_remove(env, reg);
3089 env->dr[reg] = t0;
3090 hw_breakpoint_insert(env, reg);
3091 } else if (reg == 7) {
3092 for (i = 0; i < 4; i++)
3093 hw_breakpoint_remove(env, i);
3094 env->dr[7] = t0;
3095 for (i = 0; i < 4; i++)
3096 hw_breakpoint_insert(env, i);
3097 } else
3098 env->dr[reg] = t0;
3099}
872929aa 3100#endif
eaa728ee
FB
3101
3102void helper_lmsw(target_ulong t0)
3103{
3104 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3105 if already set to one. */
3106 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 3107 helper_write_crN(0, t0);
eaa728ee
FB
3108}
3109
3110void helper_clts(void)
3111{
3112 env->cr[0] &= ~CR0_TS_MASK;
3113 env->hflags &= ~HF_TS_MASK;
3114}
3115
eaa728ee
FB
3116void helper_invlpg(target_ulong addr)
3117{
872929aa 3118 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 3119 tlb_flush_page(env, addr);
eaa728ee
FB
3120}
3121
3122void helper_rdtsc(void)
3123{
3124 uint64_t val;
3125
3126 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3127 raise_exception(EXCP0D_GPF);
3128 }
872929aa
FB
3129 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3130
33c263df 3131 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
3132 EAX = (uint32_t)(val);
3133 EDX = (uint32_t)(val >> 32);
3134}
3135
1b050077
AP
3136void helper_rdtscp(void)
3137{
3138 helper_rdtsc();
3139 ECX = (uint32_t)(env->tsc_aux);
3140}
3141
eaa728ee
FB
3142void helper_rdpmc(void)
3143{
3144 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3145 raise_exception(EXCP0D_GPF);
3146 }
eaa728ee
FB
3147 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3148
3149 /* currently unimplemented */
3150 raise_exception_err(EXCP06_ILLOP, 0);
3151}
3152
3153#if defined(CONFIG_USER_ONLY)
3154void helper_wrmsr(void)
3155{
3156}
3157
3158void helper_rdmsr(void)
3159{
3160}
3161#else
3162void helper_wrmsr(void)
3163{
3164 uint64_t val;
3165
872929aa
FB
3166 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3167
eaa728ee
FB
3168 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3169
3170 switch((uint32_t)ECX) {
3171 case MSR_IA32_SYSENTER_CS:
3172 env->sysenter_cs = val & 0xffff;
3173 break;
3174 case MSR_IA32_SYSENTER_ESP:
3175 env->sysenter_esp = val;
3176 break;
3177 case MSR_IA32_SYSENTER_EIP:
3178 env->sysenter_eip = val;
3179 break;
3180 case MSR_IA32_APICBASE:
4a942cea 3181 cpu_set_apic_base(env->apic_state, val);
eaa728ee
FB
3182 break;
3183 case MSR_EFER:
3184 {
3185 uint64_t update_mask;
3186 update_mask = 0;
3187 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3188 update_mask |= MSR_EFER_SCE;
3189 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3190 update_mask |= MSR_EFER_LME;
3191 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3192 update_mask |= MSR_EFER_FFXSR;
3193 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3194 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3195 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3196 update_mask |= MSR_EFER_SVME;
eef26553
AL
3197 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3198 update_mask |= MSR_EFER_FFXSR;
5efc27bb
FB
3199 cpu_load_efer(env, (env->efer & ~update_mask) |
3200 (val & update_mask));
eaa728ee
FB
3201 }
3202 break;
3203 case MSR_STAR:
3204 env->star = val;
3205 break;
3206 case MSR_PAT:
3207 env->pat = val;
3208 break;
3209 case MSR_VM_HSAVE_PA:
3210 env->vm_hsave = val;
3211 break;
3212#ifdef TARGET_X86_64
3213 case MSR_LSTAR:
3214 env->lstar = val;
3215 break;
3216 case MSR_CSTAR:
3217 env->cstar = val;
3218 break;
3219 case MSR_FMASK:
3220 env->fmask = val;
3221 break;
3222 case MSR_FSBASE:
3223 env->segs[R_FS].base = val;
3224 break;
3225 case MSR_GSBASE:
3226 env->segs[R_GS].base = val;
3227 break;
3228 case MSR_KERNELGSBASE:
3229 env->kernelgsbase = val;
3230 break;
3231#endif
165d9b82
AL
3232 case MSR_MTRRphysBase(0):
3233 case MSR_MTRRphysBase(1):
3234 case MSR_MTRRphysBase(2):
3235 case MSR_MTRRphysBase(3):
3236 case MSR_MTRRphysBase(4):
3237 case MSR_MTRRphysBase(5):
3238 case MSR_MTRRphysBase(6):
3239 case MSR_MTRRphysBase(7):
3240 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3241 break;
3242 case MSR_MTRRphysMask(0):
3243 case MSR_MTRRphysMask(1):
3244 case MSR_MTRRphysMask(2):
3245 case MSR_MTRRphysMask(3):
3246 case MSR_MTRRphysMask(4):
3247 case MSR_MTRRphysMask(5):
3248 case MSR_MTRRphysMask(6):
3249 case MSR_MTRRphysMask(7):
3250 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3251 break;
3252 case MSR_MTRRfix64K_00000:
3253 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3254 break;
3255 case MSR_MTRRfix16K_80000:
3256 case MSR_MTRRfix16K_A0000:
3257 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3258 break;
3259 case MSR_MTRRfix4K_C0000:
3260 case MSR_MTRRfix4K_C8000:
3261 case MSR_MTRRfix4K_D0000:
3262 case MSR_MTRRfix4K_D8000:
3263 case MSR_MTRRfix4K_E0000:
3264 case MSR_MTRRfix4K_E8000:
3265 case MSR_MTRRfix4K_F0000:
3266 case MSR_MTRRfix4K_F8000:
3267 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3268 break;
3269 case MSR_MTRRdefType:
3270 env->mtrr_deftype = val;
3271 break;
79c4f6b0
HY
3272 case MSR_MCG_STATUS:
3273 env->mcg_status = val;
3274 break;
3275 case MSR_MCG_CTL:
3276 if ((env->mcg_cap & MCG_CTL_P)
3277 && (val == 0 || val == ~(uint64_t)0))
3278 env->mcg_ctl = val;
3279 break;
1b050077
AP
3280 case MSR_TSC_AUX:
3281 env->tsc_aux = val;
3282 break;
21e87c46
AK
3283 case MSR_IA32_MISC_ENABLE:
3284 env->msr_ia32_misc_enable = val;
3285 break;
eaa728ee 3286 default:
79c4f6b0
HY
3287 if ((uint32_t)ECX >= MSR_MC0_CTL
3288 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3289 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3290 if ((offset & 0x3) != 0
3291 || (val == 0 || val == ~(uint64_t)0))
3292 env->mce_banks[offset] = val;
3293 break;
3294 }
eaa728ee
FB
3295 /* XXX: exception ? */
3296 break;
3297 }
3298}
3299
3300void helper_rdmsr(void)
3301{
3302 uint64_t val;
872929aa
FB
3303
3304 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3305
eaa728ee
FB
3306 switch((uint32_t)ECX) {
3307 case MSR_IA32_SYSENTER_CS:
3308 val = env->sysenter_cs;
3309 break;
3310 case MSR_IA32_SYSENTER_ESP:
3311 val = env->sysenter_esp;
3312 break;
3313 case MSR_IA32_SYSENTER_EIP:
3314 val = env->sysenter_eip;
3315 break;
3316 case MSR_IA32_APICBASE:
4a942cea 3317 val = cpu_get_apic_base(env->apic_state);
eaa728ee
FB
3318 break;
3319 case MSR_EFER:
3320 val = env->efer;
3321 break;
3322 case MSR_STAR:
3323 val = env->star;
3324 break;
3325 case MSR_PAT:
3326 val = env->pat;
3327 break;
3328 case MSR_VM_HSAVE_PA:
3329 val = env->vm_hsave;
3330 break;
d5e49a81
AZ
3331 case MSR_IA32_PERF_STATUS:
3332 /* tsc_increment_by_tick */
3333 val = 1000ULL;
3334 /* CPU multiplier */
3335 val |= (((uint64_t)4ULL) << 40);
3336 break;
eaa728ee
FB
3337#ifdef TARGET_X86_64
3338 case MSR_LSTAR:
3339 val = env->lstar;
3340 break;
3341 case MSR_CSTAR:
3342 val = env->cstar;
3343 break;
3344 case MSR_FMASK:
3345 val = env->fmask;
3346 break;
3347 case MSR_FSBASE:
3348 val = env->segs[R_FS].base;
3349 break;
3350 case MSR_GSBASE:
3351 val = env->segs[R_GS].base;
3352 break;
3353 case MSR_KERNELGSBASE:
3354 val = env->kernelgsbase;
3355 break;
1b050077
AP
3356 case MSR_TSC_AUX:
3357 val = env->tsc_aux;
3358 break;
eaa728ee 3359#endif
165d9b82
AL
3360 case MSR_MTRRphysBase(0):
3361 case MSR_MTRRphysBase(1):
3362 case MSR_MTRRphysBase(2):
3363 case MSR_MTRRphysBase(3):
3364 case MSR_MTRRphysBase(4):
3365 case MSR_MTRRphysBase(5):
3366 case MSR_MTRRphysBase(6):
3367 case MSR_MTRRphysBase(7):
3368 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3369 break;
3370 case MSR_MTRRphysMask(0):
3371 case MSR_MTRRphysMask(1):
3372 case MSR_MTRRphysMask(2):
3373 case MSR_MTRRphysMask(3):
3374 case MSR_MTRRphysMask(4):
3375 case MSR_MTRRphysMask(5):
3376 case MSR_MTRRphysMask(6):
3377 case MSR_MTRRphysMask(7):
3378 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3379 break;
3380 case MSR_MTRRfix64K_00000:
3381 val = env->mtrr_fixed[0];
3382 break;
3383 case MSR_MTRRfix16K_80000:
3384 case MSR_MTRRfix16K_A0000:
3385 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3386 break;
3387 case MSR_MTRRfix4K_C0000:
3388 case MSR_MTRRfix4K_C8000:
3389 case MSR_MTRRfix4K_D0000:
3390 case MSR_MTRRfix4K_D8000:
3391 case MSR_MTRRfix4K_E0000:
3392 case MSR_MTRRfix4K_E8000:
3393 case MSR_MTRRfix4K_F0000:
3394 case MSR_MTRRfix4K_F8000:
3395 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3396 break;
3397 case MSR_MTRRdefType:
3398 val = env->mtrr_deftype;
3399 break;
dd5e3b17
AL
3400 case MSR_MTRRcap:
3401 if (env->cpuid_features & CPUID_MTRR)
3402 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3403 else
3404 /* XXX: exception ? */
3405 val = 0;
3406 break;
79c4f6b0
HY
3407 case MSR_MCG_CAP:
3408 val = env->mcg_cap;
3409 break;
3410 case MSR_MCG_CTL:
3411 if (env->mcg_cap & MCG_CTL_P)
3412 val = env->mcg_ctl;
3413 else
3414 val = 0;
3415 break;
3416 case MSR_MCG_STATUS:
3417 val = env->mcg_status;
3418 break;
21e87c46
AK
3419 case MSR_IA32_MISC_ENABLE:
3420 val = env->msr_ia32_misc_enable;
3421 break;
eaa728ee 3422 default:
79c4f6b0
HY
3423 if ((uint32_t)ECX >= MSR_MC0_CTL
3424 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3425 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3426 val = env->mce_banks[offset];
3427 break;
3428 }
eaa728ee
FB
3429 /* XXX: exception ? */
3430 val = 0;
3431 break;
3432 }
3433 EAX = (uint32_t)(val);
3434 EDX = (uint32_t)(val >> 32);
3435}
3436#endif
3437
3438target_ulong helper_lsl(target_ulong selector1)
3439{
3440 unsigned int limit;
3441 uint32_t e1, e2, eflags, selector;
3442 int rpl, dpl, cpl, type;
3443
3444 selector = selector1 & 0xffff;
a7812ae4 3445 eflags = helper_cc_compute_all(CC_OP);
dc1ded53
AL
3446 if ((selector & 0xfffc) == 0)
3447 goto fail;
eaa728ee
FB
3448 if (load_segment(&e1, &e2, selector) != 0)
3449 goto fail;
3450 rpl = selector & 3;
3451 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3452 cpl = env->hflags & HF_CPL_MASK;
3453 if (e2 & DESC_S_MASK) {
3454 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3455 /* conforming */
3456 } else {
3457 if (dpl < cpl || dpl < rpl)
3458 goto fail;
3459 }
3460 } else {
3461 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3462 switch(type) {
3463 case 1:
3464 case 2:
3465 case 3:
3466 case 9:
3467 case 11:
3468 break;
3469 default:
3470 goto fail;
3471 }
3472 if (dpl < cpl || dpl < rpl) {
3473 fail:
3474 CC_SRC = eflags & ~CC_Z;
3475 return 0;
3476 }
3477 }
3478 limit = get_seg_limit(e1, e2);
3479 CC_SRC = eflags | CC_Z;
3480 return limit;
3481}
3482
3483target_ulong helper_lar(target_ulong selector1)
3484{
3485 uint32_t e1, e2, eflags, selector;
3486 int rpl, dpl, cpl, type;
3487
3488 selector = selector1 & 0xffff;
a7812ae4 3489 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3490 if ((selector & 0xfffc) == 0)
3491 goto fail;
3492 if (load_segment(&e1, &e2, selector) != 0)
3493 goto fail;
3494 rpl = selector & 3;
3495 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3496 cpl = env->hflags & HF_CPL_MASK;
3497 if (e2 & DESC_S_MASK) {
3498 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3499 /* conforming */
3500 } else {
3501 if (dpl < cpl || dpl < rpl)
3502 goto fail;
3503 }
3504 } else {
3505 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3506 switch(type) {
3507 case 1:
3508 case 2:
3509 case 3:
3510 case 4:
3511 case 5:
3512 case 9:
3513 case 11:
3514 case 12:
3515 break;
3516 default:
3517 goto fail;
3518 }
3519 if (dpl < cpl || dpl < rpl) {
3520 fail:
3521 CC_SRC = eflags & ~CC_Z;
3522 return 0;
3523 }
3524 }
3525 CC_SRC = eflags | CC_Z;
3526 return e2 & 0x00f0ff00;
3527}
3528
3529void helper_verr(target_ulong selector1)
3530{
3531 uint32_t e1, e2, eflags, selector;
3532 int rpl, dpl, cpl;
3533
3534 selector = selector1 & 0xffff;
a7812ae4 3535 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3536 if ((selector & 0xfffc) == 0)
3537 goto fail;
3538 if (load_segment(&e1, &e2, selector) != 0)
3539 goto fail;
3540 if (!(e2 & DESC_S_MASK))
3541 goto fail;
3542 rpl = selector & 3;
3543 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3544 cpl = env->hflags & HF_CPL_MASK;
3545 if (e2 & DESC_CS_MASK) {
3546 if (!(e2 & DESC_R_MASK))
3547 goto fail;
3548 if (!(e2 & DESC_C_MASK)) {
3549 if (dpl < cpl || dpl < rpl)
3550 goto fail;
3551 }
3552 } else {
3553 if (dpl < cpl || dpl < rpl) {
3554 fail:
3555 CC_SRC = eflags & ~CC_Z;
3556 return;
3557 }
3558 }
3559 CC_SRC = eflags | CC_Z;
3560}
3561
3562void helper_verw(target_ulong selector1)
3563{
3564 uint32_t e1, e2, eflags, selector;
3565 int rpl, dpl, cpl;
3566
3567 selector = selector1 & 0xffff;
a7812ae4 3568 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3569 if ((selector & 0xfffc) == 0)
3570 goto fail;
3571 if (load_segment(&e1, &e2, selector) != 0)
3572 goto fail;
3573 if (!(e2 & DESC_S_MASK))
3574 goto fail;
3575 rpl = selector & 3;
3576 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3577 cpl = env->hflags & HF_CPL_MASK;
3578 if (e2 & DESC_CS_MASK) {
3579 goto fail;
3580 } else {
3581 if (dpl < cpl || dpl < rpl)
3582 goto fail;
3583 if (!(e2 & DESC_W_MASK)) {
3584 fail:
3585 CC_SRC = eflags & ~CC_Z;
3586 return;
3587 }
3588 }
3589 CC_SRC = eflags | CC_Z;
3590}
3591
3592/* x87 FPU helpers */
3593
c31da136 3594static inline double floatx80_to_double(floatx80 a)
47c0143c
AJ
3595{
3596 union {
3597 float64 f64;
3598 double d;
3599 } u;
3600
c31da136 3601 u.f64 = floatx80_to_float64(a, &env->fp_status);
47c0143c
AJ
3602 return u.d;
3603}
3604
c31da136 3605static inline floatx80 double_to_floatx80(double a)
47c0143c
AJ
3606{
3607 union {
3608 float64 f64;
3609 double d;
3610 } u;
3611
3612 u.d = a;
c31da136 3613 return float64_to_floatx80(u.f64, &env->fp_status);
47c0143c
AJ
3614}
3615
eaa728ee
FB
3616static void fpu_set_exception(int mask)
3617{
3618 env->fpus |= mask;
3619 if (env->fpus & (~env->fpuc & FPUC_EM))
3620 env->fpus |= FPUS_SE | FPUS_B;
3621}
3622
c31da136 3623static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
eaa728ee 3624{
c31da136 3625 if (floatx80_is_zero(b)) {
eaa728ee 3626 fpu_set_exception(FPUS_ZE);
13822781 3627 }
c31da136 3628 return floatx80_div(a, b, &env->fp_status);
eaa728ee
FB
3629}
3630
d9957a8b 3631static void fpu_raise_exception(void)
eaa728ee
FB
3632{
3633 if (env->cr[0] & CR0_NE_MASK) {
3634 raise_exception(EXCP10_COPR);
3635 }
3636#if !defined(CONFIG_USER_ONLY)
3637 else {
3638 cpu_set_ferr(env);
3639 }
3640#endif
3641}
3642
3643void helper_flds_FT0(uint32_t val)
3644{
3645 union {
3646 float32 f;
3647 uint32_t i;
3648 } u;
3649 u.i = val;
c31da136 3650 FT0 = float32_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3651}
3652
3653void helper_fldl_FT0(uint64_t val)
3654{
3655 union {
3656 float64 f;
3657 uint64_t i;
3658 } u;
3659 u.i = val;
c31da136 3660 FT0 = float64_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3661}
3662
3663void helper_fildl_FT0(int32_t val)
3664{
c31da136 3665 FT0 = int32_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3666}
3667
3668void helper_flds_ST0(uint32_t val)
3669{
3670 int new_fpstt;
3671 union {
3672 float32 f;
3673 uint32_t i;
3674 } u;
3675 new_fpstt = (env->fpstt - 1) & 7;
3676 u.i = val;
c31da136 3677 env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3678 env->fpstt = new_fpstt;
3679 env->fptags[new_fpstt] = 0; /* validate stack entry */
3680}
3681
3682void helper_fldl_ST0(uint64_t val)
3683{
3684 int new_fpstt;
3685 union {
3686 float64 f;
3687 uint64_t i;
3688 } u;
3689 new_fpstt = (env->fpstt - 1) & 7;
3690 u.i = val;
c31da136 3691 env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3692 env->fpstt = new_fpstt;
3693 env->fptags[new_fpstt] = 0; /* validate stack entry */
3694}
3695
3696void helper_fildl_ST0(int32_t val)
3697{
3698 int new_fpstt;
3699 new_fpstt = (env->fpstt - 1) & 7;
c31da136 3700 env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3701 env->fpstt = new_fpstt;
3702 env->fptags[new_fpstt] = 0; /* validate stack entry */
3703}
3704
3705void helper_fildll_ST0(int64_t val)
3706{
3707 int new_fpstt;
3708 new_fpstt = (env->fpstt - 1) & 7;
c31da136 3709 env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3710 env->fpstt = new_fpstt;
3711 env->fptags[new_fpstt] = 0; /* validate stack entry */
3712}
3713
3714uint32_t helper_fsts_ST0(void)
3715{
3716 union {
3717 float32 f;
3718 uint32_t i;
3719 } u;
c31da136 3720 u.f = floatx80_to_float32(ST0, &env->fp_status);
eaa728ee
FB
3721 return u.i;
3722}
3723
3724uint64_t helper_fstl_ST0(void)
3725{
3726 union {
3727 float64 f;
3728 uint64_t i;
3729 } u;
c31da136 3730 u.f = floatx80_to_float64(ST0, &env->fp_status);
eaa728ee
FB
3731 return u.i;
3732}
3733
3734int32_t helper_fist_ST0(void)
3735{
3736 int32_t val;
c31da136 3737 val = floatx80_to_int32(ST0, &env->fp_status);
eaa728ee
FB
3738 if (val != (int16_t)val)
3739 val = -32768;
3740 return val;
3741}
3742
3743int32_t helper_fistl_ST0(void)
3744{
3745 int32_t val;
c31da136 3746 val = floatx80_to_int32(ST0, &env->fp_status);
eaa728ee
FB
3747 return val;
3748}
3749
3750int64_t helper_fistll_ST0(void)
3751{
3752 int64_t val;
c31da136 3753 val = floatx80_to_int64(ST0, &env->fp_status);
eaa728ee
FB
3754 return val;
3755}
3756
3757int32_t helper_fistt_ST0(void)
3758{
3759 int32_t val;
c31da136 3760 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3761 if (val != (int16_t)val)
3762 val = -32768;
3763 return val;
3764}
3765
3766int32_t helper_fisttl_ST0(void)
3767{
3768 int32_t val;
c31da136 3769 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3770 return val;
3771}
3772
3773int64_t helper_fisttll_ST0(void)
3774{
3775 int64_t val;
c31da136 3776 val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3777 return val;
3778}
3779
3780void helper_fldt_ST0(target_ulong ptr)
3781{
3782 int new_fpstt;
3783 new_fpstt = (env->fpstt - 1) & 7;
3784 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3785 env->fpstt = new_fpstt;
3786 env->fptags[new_fpstt] = 0; /* validate stack entry */
3787}
3788
3789void helper_fstt_ST0(target_ulong ptr)
3790{
3791 helper_fstt(ST0, ptr);
3792}
3793
3794void helper_fpush(void)
3795{
3796 fpush();
3797}
3798
3799void helper_fpop(void)
3800{
3801 fpop();
3802}
3803
3804void helper_fdecstp(void)
3805{
3806 env->fpstt = (env->fpstt - 1) & 7;
3807 env->fpus &= (~0x4700);
3808}
3809
3810void helper_fincstp(void)
3811{
3812 env->fpstt = (env->fpstt + 1) & 7;
3813 env->fpus &= (~0x4700);
3814}
3815
3816/* FPU move */
3817
3818void helper_ffree_STN(int st_index)
3819{
3820 env->fptags[(env->fpstt + st_index) & 7] = 1;
3821}
3822
3823void helper_fmov_ST0_FT0(void)
3824{
3825 ST0 = FT0;
3826}
3827
3828void helper_fmov_FT0_STN(int st_index)
3829{
3830 FT0 = ST(st_index);
3831}
3832
3833void helper_fmov_ST0_STN(int st_index)
3834{
3835 ST0 = ST(st_index);
3836}
3837
3838void helper_fmov_STN_ST0(int st_index)
3839{
3840 ST(st_index) = ST0;
3841}
3842
3843void helper_fxchg_ST0_STN(int st_index)
3844{
c31da136 3845 floatx80 tmp;
eaa728ee
FB
3846 tmp = ST(st_index);
3847 ST(st_index) = ST0;
3848 ST0 = tmp;
3849}
3850
3851/* FPU operations */
3852
3853static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3854
3855void helper_fcom_ST0_FT0(void)
3856{
3857 int ret;
3858
c31da136 3859 ret = floatx80_compare(ST0, FT0, &env->fp_status);
eaa728ee 3860 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
eaa728ee
FB
3861}
3862
3863void helper_fucom_ST0_FT0(void)
3864{
3865 int ret;
3866
c31da136 3867 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
eaa728ee 3868 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
eaa728ee
FB
3869}
3870
3871static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3872
3873void helper_fcomi_ST0_FT0(void)
3874{
3875 int eflags;
3876 int ret;
3877
c31da136 3878 ret = floatx80_compare(ST0, FT0, &env->fp_status);
a7812ae4 3879 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3880 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3881 CC_SRC = eflags;
eaa728ee
FB
3882}
3883
3884void helper_fucomi_ST0_FT0(void)
3885{
3886 int eflags;
3887 int ret;
3888
c31da136 3889 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
a7812ae4 3890 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3891 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3892 CC_SRC = eflags;
eaa728ee
FB
3893}
3894
3895void helper_fadd_ST0_FT0(void)
3896{
c31da136 3897 ST0 = floatx80_add(ST0, FT0, &env->fp_status);
eaa728ee
FB
3898}
3899
3900void helper_fmul_ST0_FT0(void)
3901{
c31da136 3902 ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
eaa728ee
FB
3903}
3904
3905void helper_fsub_ST0_FT0(void)
3906{
c31da136 3907 ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
eaa728ee
FB
3908}
3909
3910void helper_fsubr_ST0_FT0(void)
3911{
c31da136 3912 ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
eaa728ee
FB
3913}
3914
3915void helper_fdiv_ST0_FT0(void)
3916{
3917 ST0 = helper_fdiv(ST0, FT0);
3918}
3919
3920void helper_fdivr_ST0_FT0(void)
3921{
3922 ST0 = helper_fdiv(FT0, ST0);
3923}
3924
3925/* fp operations between STN and ST0 */
3926
3927void helper_fadd_STN_ST0(int st_index)
3928{
c31da136 3929 ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3930}
3931
3932void helper_fmul_STN_ST0(int st_index)
3933{
c31da136 3934 ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3935}
3936
3937void helper_fsub_STN_ST0(int st_index)
3938{
c31da136 3939 ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3940}
3941
3942void helper_fsubr_STN_ST0(int st_index)
3943{
c31da136 3944 ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
eaa728ee
FB
3945}
3946
3947void helper_fdiv_STN_ST0(int st_index)
3948{
c31da136 3949 floatx80 *p;
eaa728ee
FB
3950 p = &ST(st_index);
3951 *p = helper_fdiv(*p, ST0);
3952}
3953
3954void helper_fdivr_STN_ST0(int st_index)
3955{
c31da136 3956 floatx80 *p;
eaa728ee
FB
3957 p = &ST(st_index);
3958 *p = helper_fdiv(ST0, *p);
3959}
3960
3961/* misc FPU operations */
3962void helper_fchs_ST0(void)
3963{
c31da136 3964 ST0 = floatx80_chs(ST0);
eaa728ee
FB
3965}
3966
3967void helper_fabs_ST0(void)
3968{
c31da136 3969 ST0 = floatx80_abs(ST0);
eaa728ee
FB
3970}
3971
3972void helper_fld1_ST0(void)
3973{
66fcf8ff 3974 ST0 = floatx80_one;
eaa728ee
FB
3975}
3976
3977void helper_fldl2t_ST0(void)
3978{
66fcf8ff 3979 ST0 = floatx80_l2t;
eaa728ee
FB
3980}
3981
3982void helper_fldl2e_ST0(void)
3983{
66fcf8ff 3984 ST0 = floatx80_l2e;
eaa728ee
FB
3985}
3986
3987void helper_fldpi_ST0(void)
3988{
66fcf8ff 3989 ST0 = floatx80_pi;
eaa728ee
FB
3990}
3991
3992void helper_fldlg2_ST0(void)
3993{
66fcf8ff 3994 ST0 = floatx80_lg2;
eaa728ee
FB
3995}
3996
3997void helper_fldln2_ST0(void)
3998{
66fcf8ff 3999 ST0 = floatx80_ln2;
eaa728ee
FB
4000}
4001
4002void helper_fldz_ST0(void)
4003{
66fcf8ff 4004 ST0 = floatx80_zero;
eaa728ee
FB
4005}
4006
4007void helper_fldz_FT0(void)
4008{
66fcf8ff 4009 FT0 = floatx80_zero;
eaa728ee
FB
4010}
4011
4012uint32_t helper_fnstsw(void)
4013{
4014 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4015}
4016
4017uint32_t helper_fnstcw(void)
4018{
4019 return env->fpuc;
4020}
4021
4022static void update_fp_status(void)
4023{
4024 int rnd_type;
4025
4026 /* set rounding mode */
4027 switch(env->fpuc & RC_MASK) {
4028 default:
4029 case RC_NEAR:
4030 rnd_type = float_round_nearest_even;
4031 break;
4032 case RC_DOWN:
4033 rnd_type = float_round_down;
4034 break;
4035 case RC_UP:
4036 rnd_type = float_round_up;
4037 break;
4038 case RC_CHOP:
4039 rnd_type = float_round_to_zero;
4040 break;
4041 }
4042 set_float_rounding_mode(rnd_type, &env->fp_status);
eaa728ee
FB
4043 switch((env->fpuc >> 8) & 3) {
4044 case 0:
4045 rnd_type = 32;
4046 break;
4047 case 2:
4048 rnd_type = 64;
4049 break;
4050 case 3:
4051 default:
4052 rnd_type = 80;
4053 break;
4054 }
4055 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
eaa728ee
FB
4056}
4057
4058void helper_fldcw(uint32_t val)
4059{
4060 env->fpuc = val;
4061 update_fp_status();
4062}
4063
4064void helper_fclex(void)
4065{
4066 env->fpus &= 0x7f00;
4067}
4068
4069void helper_fwait(void)
4070{
4071 if (env->fpus & FPUS_SE)
4072 fpu_raise_exception();
eaa728ee
FB
4073}
4074
4075void helper_fninit(void)
4076{
4077 env->fpus = 0;
4078 env->fpstt = 0;
4079 env->fpuc = 0x37f;
4080 env->fptags[0] = 1;
4081 env->fptags[1] = 1;
4082 env->fptags[2] = 1;
4083 env->fptags[3] = 1;
4084 env->fptags[4] = 1;
4085 env->fptags[5] = 1;
4086 env->fptags[6] = 1;
4087 env->fptags[7] = 1;
4088}
4089
4090/* BCD ops */
4091
4092void helper_fbld_ST0(target_ulong ptr)
4093{
c31da136 4094 floatx80 tmp;
eaa728ee
FB
4095 uint64_t val;
4096 unsigned int v;
4097 int i;
4098
4099 val = 0;
4100 for(i = 8; i >= 0; i--) {
4101 v = ldub(ptr + i);
4102 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4103 }
c31da136 4104 tmp = int64_to_floatx80(val, &env->fp_status);
788e7336 4105 if (ldub(ptr + 9) & 0x80) {
c31da136 4106 floatx80_chs(tmp);
788e7336 4107 }
eaa728ee
FB
4108 fpush();
4109 ST0 = tmp;
4110}
4111
4112void helper_fbst_ST0(target_ulong ptr)
4113{
4114 int v;
4115 target_ulong mem_ref, mem_end;
4116 int64_t val;
4117
c31da136 4118 val = floatx80_to_int64(ST0, &env->fp_status);
eaa728ee
FB
4119 mem_ref = ptr;
4120 mem_end = mem_ref + 9;
4121 if (val < 0) {
4122 stb(mem_end, 0x80);
4123 val = -val;
4124 } else {
4125 stb(mem_end, 0x00);
4126 }
4127 while (mem_ref < mem_end) {
4128 if (val == 0)
4129 break;
4130 v = val % 100;
4131 val = val / 100;
4132 v = ((v / 10) << 4) | (v % 10);
4133 stb(mem_ref++, v);
4134 }
4135 while (mem_ref < mem_end) {
4136 stb(mem_ref++, 0);
4137 }
4138}
4139
4140void helper_f2xm1(void)
4141{
c31da136 4142 double val = floatx80_to_double(ST0);
a2c9ed3c 4143 val = pow(2.0, val) - 1.0;
c31da136 4144 ST0 = double_to_floatx80(val);
eaa728ee
FB
4145}
4146
4147void helper_fyl2x(void)
4148{
c31da136 4149 double fptemp = floatx80_to_double(ST0);
eaa728ee 4150
eaa728ee 4151 if (fptemp>0.0){
a2c9ed3c 4152 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
c31da136
AJ
4153 fptemp *= floatx80_to_double(ST1);
4154 ST1 = double_to_floatx80(fptemp);
eaa728ee
FB
4155 fpop();
4156 } else {
4157 env->fpus &= (~0x4700);
4158 env->fpus |= 0x400;
4159 }
4160}
4161
4162void helper_fptan(void)
4163{
c31da136 4164 double fptemp = floatx80_to_double(ST0);
eaa728ee 4165
eaa728ee
FB
4166 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4167 env->fpus |= 0x400;
4168 } else {
a2c9ed3c 4169 fptemp = tan(fptemp);
c31da136 4170 ST0 = double_to_floatx80(fptemp);
eaa728ee 4171 fpush();
c31da136 4172 ST0 = floatx80_one;
eaa728ee
FB
4173 env->fpus &= (~0x400); /* C2 <-- 0 */
4174 /* the above code is for |arg| < 2**52 only */
4175 }
4176}
4177
4178void helper_fpatan(void)
4179{
a2c9ed3c 4180 double fptemp, fpsrcop;
eaa728ee 4181
c31da136
AJ
4182 fpsrcop = floatx80_to_double(ST1);
4183 fptemp = floatx80_to_double(ST0);
4184 ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
eaa728ee
FB
4185 fpop();
4186}
4187
4188void helper_fxtract(void)
4189{
c31da136 4190 CPU_LDoubleU temp;
eaa728ee
FB
4191
4192 temp.d = ST0;
c9ad19c5 4193
c31da136 4194 if (floatx80_is_zero(ST0)) {
c9ad19c5 4195 /* Easy way to generate -inf and raising division by 0 exception */
c31da136 4196 ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
c9ad19c5
AJ
4197 fpush();
4198 ST0 = temp.d;
4199 } else {
4200 int expdif;
4201
4202 expdif = EXPD(temp) - EXPBIAS;
4203 /*DP exponent bias*/
c31da136 4204 ST0 = int32_to_floatx80(expdif, &env->fp_status);
c9ad19c5
AJ
4205 fpush();
4206 BIASEXPONENT(temp);
4207 ST0 = temp.d;
4208 }
eaa728ee
FB
4209}
4210
4211void helper_fprem1(void)
4212{
bcb5fec5 4213 double st0, st1, dblq, fpsrcop, fptemp;
c31da136 4214 CPU_LDoubleU fpsrcop1, fptemp1;
eaa728ee
FB
4215 int expdif;
4216 signed long long int q;
4217
c31da136
AJ
4218 st0 = floatx80_to_double(ST0);
4219 st1 = floatx80_to_double(ST1);
bcb5fec5
AJ
4220
4221 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
c31da136 4222 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
eaa728ee
FB
4223 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4224 return;
4225 }
4226
bcb5fec5
AJ
4227 fpsrcop = st0;
4228 fptemp = st1;
4229 fpsrcop1.d = ST0;
4230 fptemp1.d = ST1;
eaa728ee
FB
4231 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4232
4233 if (expdif < 0) {
4234 /* optimisation? taken from the AMD docs */
4235 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4236 /* ST0 is unchanged */
4237 return;
4238 }
4239
4240 if (expdif < 53) {
4241 dblq = fpsrcop / fptemp;
4242 /* round dblq towards nearest integer */
4243 dblq = rint(dblq);
bcb5fec5 4244 st0 = fpsrcop - fptemp * dblq;
eaa728ee
FB
4245
4246 /* convert dblq to q by truncating towards zero */
4247 if (dblq < 0.0)
4248 q = (signed long long int)(-dblq);
4249 else
4250 q = (signed long long int)dblq;
4251
4252 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4253 /* (C0,C3,C1) <-- (q2,q1,q0) */
4254 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4255 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4256 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4257 } else {
4258 env->fpus |= 0x400; /* C2 <-- 1 */
4259 fptemp = pow(2.0, expdif - 50);
bcb5fec5 4260 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4261 /* fpsrcop = integer obtained by chopping */
4262 fpsrcop = (fpsrcop < 0.0) ?
4263 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4264 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4265 }
c31da136 4266 ST0 = double_to_floatx80(st0);
eaa728ee
FB
4267}
4268
4269void helper_fprem(void)
4270{
bcb5fec5 4271 double st0, st1, dblq, fpsrcop, fptemp;
c31da136 4272 CPU_LDoubleU fpsrcop1, fptemp1;
eaa728ee
FB
4273 int expdif;
4274 signed long long int q;
4275
c31da136
AJ
4276 st0 = floatx80_to_double(ST0);
4277 st1 = floatx80_to_double(ST1);
bcb5fec5
AJ
4278
4279 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
c31da136 4280 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
eaa728ee
FB
4281 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4282 return;
4283 }
4284
bcb5fec5
AJ
4285 fpsrcop = st0;
4286 fptemp = st1;
4287 fpsrcop1.d = ST0;
4288 fptemp1.d = ST1;
eaa728ee
FB
4289 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4290
4291 if (expdif < 0) {
4292 /* optimisation? taken from the AMD docs */
4293 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4294 /* ST0 is unchanged */
4295 return;
4296 }
4297
4298 if ( expdif < 53 ) {
4299 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4300 /* round dblq towards zero */
4301 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
bcb5fec5 4302 st0 = fpsrcop/*ST0*/ - fptemp * dblq;
eaa728ee
FB
4303
4304 /* convert dblq to q by truncating towards zero */
4305 if (dblq < 0.0)
4306 q = (signed long long int)(-dblq);
4307 else
4308 q = (signed long long int)dblq;
4309
4310 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4311 /* (C0,C3,C1) <-- (q2,q1,q0) */
4312 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4313 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4314 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4315 } else {
4316 int N = 32 + (expdif % 32); /* as per AMD docs */
4317 env->fpus |= 0x400; /* C2 <-- 1 */
4318 fptemp = pow(2.0, (double)(expdif - N));
bcb5fec5 4319 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4320 /* fpsrcop = integer obtained by chopping */
4321 fpsrcop = (fpsrcop < 0.0) ?
4322 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4323 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4324 }
c31da136 4325 ST0 = double_to_floatx80(st0);
eaa728ee
FB
4326}
4327
4328void helper_fyl2xp1(void)
4329{
c31da136 4330 double fptemp = floatx80_to_double(ST0);
eaa728ee 4331
eaa728ee
FB
4332 if ((fptemp+1.0)>0.0) {
4333 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
c31da136
AJ
4334 fptemp *= floatx80_to_double(ST1);
4335 ST1 = double_to_floatx80(fptemp);
eaa728ee
FB
4336 fpop();
4337 } else {
4338 env->fpus &= (~0x4700);
4339 env->fpus |= 0x400;
4340 }
4341}
4342
4343void helper_fsqrt(void)
4344{
c31da136 4345 if (floatx80_is_neg(ST0)) {
eaa728ee
FB
4346 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4347 env->fpus |= 0x400;
4348 }
c31da136 4349 ST0 = floatx80_sqrt(ST0, &env->fp_status);
eaa728ee
FB
4350}
4351
4352void helper_fsincos(void)
4353{
c31da136 4354 double fptemp = floatx80_to_double(ST0);
eaa728ee 4355
eaa728ee
FB
4356 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4357 env->fpus |= 0x400;
4358 } else {
c31da136 4359 ST0 = double_to_floatx80(sin(fptemp));
eaa728ee 4360 fpush();
c31da136 4361 ST0 = double_to_floatx80(cos(fptemp));
eaa728ee
FB
4362 env->fpus &= (~0x400); /* C2 <-- 0 */
4363 /* the above code is for |arg| < 2**63 only */
4364 }
4365}
4366
4367void helper_frndint(void)
4368{
c31da136 4369 ST0 = floatx80_round_to_int(ST0, &env->fp_status);
eaa728ee
FB
4370}
4371
4372void helper_fscale(void)
4373{
c31da136 4374 if (floatx80_is_any_nan(ST1)) {
be1c17c7
AJ
4375 ST0 = ST1;
4376 } else {
c31da136
AJ
4377 int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4378 ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
be1c17c7 4379 }
eaa728ee
FB
4380}
4381
4382void helper_fsin(void)
4383{
c31da136 4384 double fptemp = floatx80_to_double(ST0);
eaa728ee 4385
eaa728ee
FB
4386 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4387 env->fpus |= 0x400;
4388 } else {
c31da136 4389 ST0 = double_to_floatx80(sin(fptemp));
eaa728ee
FB
4390 env->fpus &= (~0x400); /* C2 <-- 0 */
4391 /* the above code is for |arg| < 2**53 only */
4392 }
4393}
4394
4395void helper_fcos(void)
4396{
c31da136 4397 double fptemp = floatx80_to_double(ST0);
eaa728ee 4398
eaa728ee
FB
4399 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4400 env->fpus |= 0x400;
4401 } else {
c31da136 4402 ST0 = double_to_floatx80(cos(fptemp));
eaa728ee
FB
4403 env->fpus &= (~0x400); /* C2 <-- 0 */
4404 /* the above code is for |arg5 < 2**63 only */
4405 }
4406}
4407
4408void helper_fxam_ST0(void)
4409{
c31da136 4410 CPU_LDoubleU temp;
eaa728ee
FB
4411 int expdif;
4412
4413 temp.d = ST0;
4414
4415 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4416 if (SIGND(temp))
4417 env->fpus |= 0x200; /* C1 <-- 1 */
4418
4419 /* XXX: test fptags too */
4420 expdif = EXPD(temp);
4421 if (expdif == MAXEXPD) {
eaa728ee 4422 if (MANTD(temp) == 0x8000000000000000ULL)
eaa728ee
FB
4423 env->fpus |= 0x500 /*Infinity*/;
4424 else
4425 env->fpus |= 0x100 /*NaN*/;
4426 } else if (expdif == 0) {
4427 if (MANTD(temp) == 0)
4428 env->fpus |= 0x4000 /*Zero*/;
4429 else
4430 env->fpus |= 0x4400 /*Denormal*/;
4431 } else {
4432 env->fpus |= 0x400;
4433 }
4434}
4435
4436void helper_fstenv(target_ulong ptr, int data32)
4437{
4438 int fpus, fptag, exp, i;
4439 uint64_t mant;
c31da136 4440 CPU_LDoubleU tmp;
eaa728ee
FB
4441
4442 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4443 fptag = 0;
4444 for (i=7; i>=0; i--) {
4445 fptag <<= 2;
4446 if (env->fptags[i]) {
4447 fptag |= 3;
4448 } else {
4449 tmp.d = env->fpregs[i].d;
4450 exp = EXPD(tmp);
4451 mant = MANTD(tmp);
4452 if (exp == 0 && mant == 0) {
4453 /* zero */
4454 fptag |= 1;
4455 } else if (exp == 0 || exp == MAXEXPD
eaa728ee 4456 || (mant & (1LL << 63)) == 0
eaa728ee
FB
4457 ) {
4458 /* NaNs, infinity, denormal */
4459 fptag |= 2;
4460 }
4461 }
4462 }
4463 if (data32) {
4464 /* 32 bit */
4465 stl(ptr, env->fpuc);
4466 stl(ptr + 4, fpus);
4467 stl(ptr + 8, fptag);
4468 stl(ptr + 12, 0); /* fpip */
4469 stl(ptr + 16, 0); /* fpcs */
4470 stl(ptr + 20, 0); /* fpoo */
4471 stl(ptr + 24, 0); /* fpos */
4472 } else {
4473 /* 16 bit */
4474 stw(ptr, env->fpuc);
4475 stw(ptr + 2, fpus);
4476 stw(ptr + 4, fptag);
4477 stw(ptr + 6, 0);
4478 stw(ptr + 8, 0);
4479 stw(ptr + 10, 0);
4480 stw(ptr + 12, 0);
4481 }
4482}
4483
4484void helper_fldenv(target_ulong ptr, int data32)
4485{
4486 int i, fpus, fptag;
4487
4488 if (data32) {
4489 env->fpuc = lduw(ptr);
4490 fpus = lduw(ptr + 4);
4491 fptag = lduw(ptr + 8);
4492 }
4493 else {
4494 env->fpuc = lduw(ptr);
4495 fpus = lduw(ptr + 2);
4496 fptag = lduw(ptr + 4);
4497 }
4498 env->fpstt = (fpus >> 11) & 7;
4499 env->fpus = fpus & ~0x3800;
4500 for(i = 0;i < 8; i++) {
4501 env->fptags[i] = ((fptag & 3) == 3);
4502 fptag >>= 2;
4503 }
4504}
4505
4506void helper_fsave(target_ulong ptr, int data32)
4507{
c31da136 4508 floatx80 tmp;
eaa728ee
FB
4509 int i;
4510
4511 helper_fstenv(ptr, data32);
4512
4513 ptr += (14 << data32);
4514 for(i = 0;i < 8; i++) {
4515 tmp = ST(i);
4516 helper_fstt(tmp, ptr);
4517 ptr += 10;
4518 }
4519
4520 /* fninit */
4521 env->fpus = 0;
4522 env->fpstt = 0;
4523 env->fpuc = 0x37f;
4524 env->fptags[0] = 1;
4525 env->fptags[1] = 1;
4526 env->fptags[2] = 1;
4527 env->fptags[3] = 1;
4528 env->fptags[4] = 1;
4529 env->fptags[5] = 1;
4530 env->fptags[6] = 1;
4531 env->fptags[7] = 1;
4532}
4533
4534void helper_frstor(target_ulong ptr, int data32)
4535{
c31da136 4536 floatx80 tmp;
eaa728ee
FB
4537 int i;
4538
4539 helper_fldenv(ptr, data32);
4540 ptr += (14 << data32);
4541
4542 for(i = 0;i < 8; i++) {
4543 tmp = helper_fldt(ptr);
4544 ST(i) = tmp;
4545 ptr += 10;
4546 }
4547}
4548
3e457172
BS
4549
4550#if defined(CONFIG_USER_ONLY)
4551void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
4552{
4553 CPUX86State *saved_env;
4554
4555 saved_env = env;
4556 env = s;
4557 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
4558 selector &= 0xffff;
4559 cpu_x86_load_seg_cache(env, seg_reg, selector,
4560 (selector << 4), 0xffff, 0);
4561 } else {
4562 helper_load_seg(seg_reg, selector);
4563 }
4564 env = saved_env;
4565}
4566
4567void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
4568{
4569 CPUX86State *saved_env;
4570
4571 saved_env = env;
4572 env = s;
4573
4574 helper_fsave(ptr, data32);
4575
4576 env = saved_env;
4577}
4578
4579void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
4580{
4581 CPUX86State *saved_env;
4582
4583 saved_env = env;
4584 env = s;
4585
4586 helper_frstor(ptr, data32);
4587
4588 env = saved_env;
4589}
4590#endif
4591
eaa728ee
FB
4592void helper_fxsave(target_ulong ptr, int data64)
4593{
4594 int fpus, fptag, i, nb_xmm_regs;
c31da136 4595 floatx80 tmp;
eaa728ee
FB
4596 target_ulong addr;
4597
09d85fb8
KW
4598 /* The operand must be 16 byte aligned */
4599 if (ptr & 0xf) {
4600 raise_exception(EXCP0D_GPF);
4601 }
4602
eaa728ee
FB
4603 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4604 fptag = 0;
4605 for(i = 0; i < 8; i++) {
4606 fptag |= (env->fptags[i] << i);
4607 }
4608 stw(ptr, env->fpuc);
4609 stw(ptr + 2, fpus);
4610 stw(ptr + 4, fptag ^ 0xff);
4611#ifdef TARGET_X86_64
4612 if (data64) {
4613 stq(ptr + 0x08, 0); /* rip */
4614 stq(ptr + 0x10, 0); /* rdp */
4615 } else
4616#endif
4617 {
4618 stl(ptr + 0x08, 0); /* eip */
4619 stl(ptr + 0x0c, 0); /* sel */
4620 stl(ptr + 0x10, 0); /* dp */
4621 stl(ptr + 0x14, 0); /* sel */
4622 }
4623
4624 addr = ptr + 0x20;
4625 for(i = 0;i < 8; i++) {
4626 tmp = ST(i);
4627 helper_fstt(tmp, addr);
4628 addr += 16;
4629 }
4630
4631 if (env->cr[4] & CR4_OSFXSR_MASK) {
4632 /* XXX: finish it */
4633 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4634 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4635 if (env->hflags & HF_CS64_MASK)
4636 nb_xmm_regs = 16;
4637 else
4638 nb_xmm_regs = 8;
4639 addr = ptr + 0xa0;
eef26553
AL
4640 /* Fast FXSAVE leaves out the XMM registers */
4641 if (!(env->efer & MSR_EFER_FFXSR)
4642 || (env->hflags & HF_CPL_MASK)
4643 || !(env->hflags & HF_LMA_MASK)) {
4644 for(i = 0; i < nb_xmm_regs; i++) {
4645 stq(addr, env->xmm_regs[i].XMM_Q(0));
4646 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4647 addr += 16;
4648 }
eaa728ee
FB
4649 }
4650 }
4651}
4652
4653void helper_fxrstor(target_ulong ptr, int data64)
4654{
4655 int i, fpus, fptag, nb_xmm_regs;
c31da136 4656 floatx80 tmp;
eaa728ee
FB
4657 target_ulong addr;
4658
09d85fb8
KW
4659 /* The operand must be 16 byte aligned */
4660 if (ptr & 0xf) {
4661 raise_exception(EXCP0D_GPF);
4662 }
4663
eaa728ee
FB
4664 env->fpuc = lduw(ptr);
4665 fpus = lduw(ptr + 2);
4666 fptag = lduw(ptr + 4);
4667 env->fpstt = (fpus >> 11) & 7;
4668 env->fpus = fpus & ~0x3800;
4669 fptag ^= 0xff;
4670 for(i = 0;i < 8; i++) {
4671 env->fptags[i] = ((fptag >> i) & 1);
4672 }
4673
4674 addr = ptr + 0x20;
4675 for(i = 0;i < 8; i++) {
4676 tmp = helper_fldt(addr);
4677 ST(i) = tmp;
4678 addr += 16;
4679 }
4680
4681 if (env->cr[4] & CR4_OSFXSR_MASK) {
4682 /* XXX: finish it */
4683 env->mxcsr = ldl(ptr + 0x18);
4684 //ldl(ptr + 0x1c);
4685 if (env->hflags & HF_CS64_MASK)
4686 nb_xmm_regs = 16;
4687 else
4688 nb_xmm_regs = 8;
4689 addr = ptr + 0xa0;
eef26553
AL
4690 /* Fast FXRESTORE leaves out the XMM registers */
4691 if (!(env->efer & MSR_EFER_FFXSR)
4692 || (env->hflags & HF_CPL_MASK)
4693 || !(env->hflags & HF_LMA_MASK)) {
4694 for(i = 0; i < nb_xmm_regs; i++) {
4695 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4696 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4697 addr += 16;
4698 }
eaa728ee
FB
4699 }
4700 }
4701}
4702
c31da136 4703void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
eaa728ee 4704{
c31da136 4705 CPU_LDoubleU temp;
eaa728ee
FB
4706
4707 temp.d = f;
4708 *pmant = temp.l.lower;
4709 *pexp = temp.l.upper;
4710}
4711
c31da136 4712floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
eaa728ee 4713{
c31da136 4714 CPU_LDoubleU temp;
eaa728ee
FB
4715
4716 temp.l.upper = upper;
4717 temp.l.lower = mant;
4718 return temp.d;
4719}
eaa728ee
FB
4720
4721#ifdef TARGET_X86_64
4722
4723//#define DEBUG_MULDIV
4724
4725static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4726{
4727 *plow += a;
4728 /* carry test */
4729 if (*plow < a)
4730 (*phigh)++;
4731 *phigh += b;
4732}
4733
4734static void neg128(uint64_t *plow, uint64_t *phigh)
4735{
4736 *plow = ~ *plow;
4737 *phigh = ~ *phigh;
4738 add128(plow, phigh, 1, 0);
4739}
4740
4741/* return TRUE if overflow */
4742static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4743{
4744 uint64_t q, r, a1, a0;
4745 int i, qb, ab;
4746
4747 a0 = *plow;
4748 a1 = *phigh;
4749 if (a1 == 0) {
4750 q = a0 / b;
4751 r = a0 % b;
4752 *plow = q;
4753 *phigh = r;
4754 } else {
4755 if (a1 >= b)
4756 return 1;
4757 /* XXX: use a better algorithm */
4758 for(i = 0; i < 64; i++) {
4759 ab = a1 >> 63;
4760 a1 = (a1 << 1) | (a0 >> 63);
4761 if (ab || a1 >= b) {
4762 a1 -= b;
4763 qb = 1;
4764 } else {
4765 qb = 0;
4766 }
4767 a0 = (a0 << 1) | qb;
4768 }
4769#if defined(DEBUG_MULDIV)
4770 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4771 *phigh, *plow, b, a0, a1);
4772#endif
4773 *plow = a0;
4774 *phigh = a1;
4775 }
4776 return 0;
4777}
4778
4779/* return TRUE if overflow */
4780static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4781{
4782 int sa, sb;
4783 sa = ((int64_t)*phigh < 0);
4784 if (sa)
4785 neg128(plow, phigh);
4786 sb = (b < 0);
4787 if (sb)
4788 b = -b;
4789 if (div64(plow, phigh, b) != 0)
4790 return 1;
4791 if (sa ^ sb) {
4792 if (*plow > (1ULL << 63))
4793 return 1;
4794 *plow = - *plow;
4795 } else {
4796 if (*plow >= (1ULL << 63))
4797 return 1;
4798 }
4799 if (sa)
4800 *phigh = - *phigh;
4801 return 0;
4802}
4803
4804void helper_mulq_EAX_T0(target_ulong t0)
4805{
4806 uint64_t r0, r1;
4807
4808 mulu64(&r0, &r1, EAX, t0);
4809 EAX = r0;
4810 EDX = r1;
4811 CC_DST = r0;
4812 CC_SRC = r1;
4813}
4814
4815void helper_imulq_EAX_T0(target_ulong t0)
4816{
4817 uint64_t r0, r1;
4818
4819 muls64(&r0, &r1, EAX, t0);
4820 EAX = r0;
4821 EDX = r1;
4822 CC_DST = r0;
4823 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4824}
4825
4826target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4827{
4828 uint64_t r0, r1;
4829
4830 muls64(&r0, &r1, t0, t1);
4831 CC_DST = r0;
4832 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4833 return r0;
4834}
4835
4836void helper_divq_EAX(target_ulong t0)
4837{
4838 uint64_t r0, r1;
4839 if (t0 == 0) {
4840 raise_exception(EXCP00_DIVZ);
4841 }
4842 r0 = EAX;
4843 r1 = EDX;
4844 if (div64(&r0, &r1, t0))
4845 raise_exception(EXCP00_DIVZ);
4846 EAX = r0;
4847 EDX = r1;
4848}
4849
4850void helper_idivq_EAX(target_ulong t0)
4851{
4852 uint64_t r0, r1;
4853 if (t0 == 0) {
4854 raise_exception(EXCP00_DIVZ);
4855 }
4856 r0 = EAX;
4857 r1 = EDX;
4858 if (idiv64(&r0, &r1, t0))
4859 raise_exception(EXCP00_DIVZ);
4860 EAX = r0;
4861 EDX = r1;
4862}
4863#endif
4864
94451178 4865static void do_hlt(void)
eaa728ee
FB
4866{
4867 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4868 env->halted = 1;
eaa728ee 4869 env->exception_index = EXCP_HLT;
1162c041 4870 cpu_loop_exit(env);
eaa728ee
FB
4871}
4872
94451178
FB
4873void helper_hlt(int next_eip_addend)
4874{
4875 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4876 EIP += next_eip_addend;
4877
4878 do_hlt();
4879}
4880
eaa728ee
FB
4881void helper_monitor(target_ulong ptr)
4882{
4883 if ((uint32_t)ECX != 0)
4884 raise_exception(EXCP0D_GPF);
4885 /* XXX: store address ? */
872929aa 4886 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4887}
4888
94451178 4889void helper_mwait(int next_eip_addend)
eaa728ee
FB
4890{
4891 if ((uint32_t)ECX != 0)
4892 raise_exception(EXCP0D_GPF);
872929aa 4893 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4894 EIP += next_eip_addend;
4895
eaa728ee
FB
4896 /* XXX: not complete but not completely erroneous */
4897 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4898 /* more than one CPU: do not sleep because another CPU may
4899 wake this one */
4900 } else {
94451178 4901 do_hlt();
eaa728ee
FB
4902 }
4903}
4904
4905void helper_debug(void)
4906{
4907 env->exception_index = EXCP_DEBUG;
1162c041 4908 cpu_loop_exit(env);
eaa728ee
FB
4909}
4910
a2397807
JK
4911void helper_reset_rf(void)
4912{
4913 env->eflags &= ~RF_MASK;
4914}
4915
eaa728ee
FB
4916void helper_raise_interrupt(int intno, int next_eip_addend)
4917{
4918 raise_interrupt(intno, 1, 0, next_eip_addend);
4919}
4920
4921void helper_raise_exception(int exception_index)
4922{
4923 raise_exception(exception_index);
4924}
4925
4926void helper_cli(void)
4927{
4928 env->eflags &= ~IF_MASK;
4929}
4930
4931void helper_sti(void)
4932{
4933 env->eflags |= IF_MASK;
4934}
4935
4936#if 0
4937/* vm86plus instructions */
4938void helper_cli_vm(void)
4939{
4940 env->eflags &= ~VIF_MASK;
4941}
4942
4943void helper_sti_vm(void)
4944{
4945 env->eflags |= VIF_MASK;
4946 if (env->eflags & VIP_MASK) {
4947 raise_exception(EXCP0D_GPF);
4948 }
4949}
4950#endif
4951
4952void helper_set_inhibit_irq(void)
4953{
4954 env->hflags |= HF_INHIBIT_IRQ_MASK;
4955}
4956
4957void helper_reset_inhibit_irq(void)
4958{
4959 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4960}
4961
4962void helper_boundw(target_ulong a0, int v)
4963{
4964 int low, high;
4965 low = ldsw(a0);
4966 high = ldsw(a0 + 2);
4967 v = (int16_t)v;
4968 if (v < low || v > high) {
4969 raise_exception(EXCP05_BOUND);
4970 }
eaa728ee
FB
4971}
4972
4973void helper_boundl(target_ulong a0, int v)
4974{
4975 int low, high;
4976 low = ldl(a0);
4977 high = ldl(a0 + 4);
4978 if (v < low || v > high) {
4979 raise_exception(EXCP05_BOUND);
4980 }
eaa728ee
FB
4981}
4982
eaa728ee
FB
4983#if !defined(CONFIG_USER_ONLY)
4984
4985#define MMUSUFFIX _mmu
4986
4987#define SHIFT 0
4988#include "softmmu_template.h"
4989
4990#define SHIFT 1
4991#include "softmmu_template.h"
4992
4993#define SHIFT 2
4994#include "softmmu_template.h"
4995
4996#define SHIFT 3
4997#include "softmmu_template.h"
4998
4999#endif
5000
d9957a8b 5001#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
5002/* try to fill the TLB and return an exception if error. If retaddr is
5003 NULL, it means that the function was called in C code (i.e. not
5004 from generated code or from helper.c) */
5005/* XXX: fix it to restore all registers */
bccd9ec5
BS
5006void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
5007 void *retaddr)
eaa728ee
FB
5008{
5009 TranslationBlock *tb;
5010 int ret;
5011 unsigned long pc;
5012 CPUX86State *saved_env;
5013
eaa728ee 5014 saved_env = env;
bccd9ec5 5015 env = env1;
eaa728ee 5016
97b348e7 5017 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
eaa728ee
FB
5018 if (ret) {
5019 if (retaddr) {
5020 /* now we have a real cpu fault */
5021 pc = (unsigned long)retaddr;
5022 tb = tb_find_pc(pc);
5023 if (tb) {
5024 /* the PC is inside the translated code. It means that we have
5025 a virtual CPU fault */
618ba8e6 5026 cpu_restore_state(tb, env, pc);
eaa728ee
FB
5027 }
5028 }
872929aa 5029 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
5030 }
5031 env = saved_env;
5032}
d9957a8b 5033#endif
eaa728ee
FB
5034
5035/* Secure Virtual Machine helpers */
5036
eaa728ee
FB
5037#if defined(CONFIG_USER_ONLY)
5038
db620f46 5039void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
5040{
5041}
5042void helper_vmmcall(void)
5043{
5044}
914178d3 5045void helper_vmload(int aflag)
eaa728ee
FB
5046{
5047}
914178d3 5048void helper_vmsave(int aflag)
eaa728ee
FB
5049{
5050}
872929aa
FB
5051void helper_stgi(void)
5052{
5053}
5054void helper_clgi(void)
5055{
5056}
eaa728ee
FB
5057void helper_skinit(void)
5058{
5059}
914178d3 5060void helper_invlpga(int aflag)
eaa728ee
FB
5061{
5062}
5063void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5064{
5065}
5066void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5067{
5068}
5069
e694d4e2
BS
5070void svm_check_intercept(CPUState *env1, uint32_t type)
5071{
5072}
5073
eaa728ee
FB
5074void helper_svm_check_io(uint32_t port, uint32_t param,
5075 uint32_t next_eip_addend)
5076{
5077}
5078#else
5079
c227f099 5080static inline void svm_save_seg(target_phys_addr_t addr,
872929aa 5081 const SegmentCache *sc)
eaa728ee 5082{
872929aa
FB
5083 stw_phys(addr + offsetof(struct vmcb_seg, selector),
5084 sc->selector);
5085 stq_phys(addr + offsetof(struct vmcb_seg, base),
5086 sc->base);
5087 stl_phys(addr + offsetof(struct vmcb_seg, limit),
5088 sc->limit);
5089 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 5090 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
5091}
5092
c227f099 5093static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
872929aa
FB
5094{
5095 unsigned int flags;
5096
5097 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
5098 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
5099 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
5100 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
5101 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
5102}
5103
c227f099 5104static inline void svm_load_seg_cache(target_phys_addr_t addr,
872929aa 5105 CPUState *env, int seg_reg)
eaa728ee 5106{
872929aa
FB
5107 SegmentCache sc1, *sc = &sc1;
5108 svm_load_seg(addr, sc);
5109 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
5110 sc->base, sc->limit, sc->flags);
eaa728ee
FB
5111}
5112
db620f46 5113void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
5114{
5115 target_ulong addr;
5116 uint32_t event_inj;
5117 uint32_t int_ctl;
5118
872929aa
FB
5119 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
5120
914178d3
FB
5121 if (aflag == 2)
5122 addr = EAX;
5123 else
5124 addr = (uint32_t)EAX;
5125
93fcfe39 5126 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
5127
5128 env->vm_vmcb = addr;
5129
5130 /* save the current CPU state in the hsave page */
5131 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5132 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5133
5134 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5135 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5136
5137 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
5138 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
5139 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
5140 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
5141 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
5142 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
5143
5144 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
5145 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
5146
872929aa
FB
5147 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
5148 &env->segs[R_ES]);
5149 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
5150 &env->segs[R_CS]);
5151 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
5152 &env->segs[R_SS]);
5153 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
5154 &env->segs[R_DS]);
eaa728ee 5155
db620f46
FB
5156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
5157 EIP + next_eip_addend);
eaa728ee
FB
5158 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
5159 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
5160
5161 /* load the interception bitmaps so we do not need to access the
5162 vmcb in svm mode */
872929aa 5163 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
5164 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5165 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5166 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5167 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5168 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5169
872929aa
FB
5170 /* enable intercepts */
5171 env->hflags |= HF_SVMI_MASK;
5172
33c263df
FB
5173 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5174
eaa728ee
FB
5175 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5176 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5177
5178 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5179 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5180
5181 /* clear exit_info_2 so we behave like the real hardware */
5182 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5183
5184 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5185 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5186 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5187 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5188 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 5189 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 5190 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
5191 env->v_tpr = int_ctl & V_TPR_MASK;
5192 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 5193 if (env->eflags & IF_MASK)
db620f46 5194 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
5195 }
5196
5efc27bb
FB
5197 cpu_load_efer(env,
5198 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5199 env->eflags = 0;
5200 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5201 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5202 CC_OP = CC_OP_EFLAGS;
eaa728ee 5203
872929aa
FB
5204 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5205 env, R_ES);
5206 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5207 env, R_CS);
5208 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5209 env, R_SS);
5210 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5211 env, R_DS);
eaa728ee
FB
5212
5213 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5214 env->eip = EIP;
5215 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5216 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5217 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5218 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5219 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5220
5221 /* FIXME: guest state consistency checks */
5222
5223 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5224 case TLB_CONTROL_DO_NOTHING:
5225 break;
5226 case TLB_CONTROL_FLUSH_ALL_ASID:
5227 /* FIXME: this is not 100% correct but should work for now */
5228 tlb_flush(env, 1);
5229 break;
5230 }
5231
960540b4 5232 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 5233
db620f46
FB
5234 if (int_ctl & V_IRQ_MASK) {
5235 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5236 }
5237
eaa728ee
FB
5238 /* maybe we need to inject an event */
5239 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5240 if (event_inj & SVM_EVTINJ_VALID) {
5241 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5242 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5243 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
eaa728ee 5244
93fcfe39 5245 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
5246 /* FIXME: need to implement valid_err */
5247 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5248 case SVM_EVTINJ_TYPE_INTR:
5249 env->exception_index = vector;
5250 env->error_code = event_inj_err;
5251 env->exception_is_int = 0;
5252 env->exception_next_eip = -1;
93fcfe39 5253 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
db620f46 5254 /* XXX: is it always correct ? */
e694d4e2 5255 do_interrupt_all(vector, 0, 0, 0, 1);
eaa728ee
FB
5256 break;
5257 case SVM_EVTINJ_TYPE_NMI:
db620f46 5258 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5259 env->error_code = event_inj_err;
5260 env->exception_is_int = 0;
5261 env->exception_next_eip = EIP;
93fcfe39 5262 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
1162c041 5263 cpu_loop_exit(env);
eaa728ee
FB
5264 break;
5265 case SVM_EVTINJ_TYPE_EXEPT:
5266 env->exception_index = vector;
5267 env->error_code = event_inj_err;
5268 env->exception_is_int = 0;
5269 env->exception_next_eip = -1;
93fcfe39 5270 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
1162c041 5271 cpu_loop_exit(env);
eaa728ee
FB
5272 break;
5273 case SVM_EVTINJ_TYPE_SOFT:
5274 env->exception_index = vector;
5275 env->error_code = event_inj_err;
5276 env->exception_is_int = 1;
5277 env->exception_next_eip = EIP;
93fcfe39 5278 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
1162c041 5279 cpu_loop_exit(env);
eaa728ee
FB
5280 break;
5281 }
93fcfe39 5282 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
eaa728ee 5283 }
eaa728ee
FB
5284}
5285
5286void helper_vmmcall(void)
5287{
872929aa
FB
5288 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5289 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5290}
5291
914178d3 5292void helper_vmload(int aflag)
eaa728ee
FB
5293{
5294 target_ulong addr;
872929aa
FB
5295 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5296
914178d3
FB
5297 if (aflag == 2)
5298 addr = EAX;
5299 else
5300 addr = (uint32_t)EAX;
5301
93fcfe39 5302 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5303 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5304 env->segs[R_FS].base);
5305
872929aa
FB
5306 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5307 env, R_FS);
5308 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5309 env, R_GS);
5310 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5311 &env->tr);
5312 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5313 &env->ldt);
eaa728ee
FB
5314
5315#ifdef TARGET_X86_64
5316 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5317 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5318 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5319 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5320#endif
5321 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5322 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5323 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5324 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5325}
5326
914178d3 5327void helper_vmsave(int aflag)
eaa728ee
FB
5328{
5329 target_ulong addr;
872929aa 5330 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5331
5332 if (aflag == 2)
5333 addr = EAX;
5334 else
5335 addr = (uint32_t)EAX;
5336
93fcfe39 5337 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5338 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5339 env->segs[R_FS].base);
5340
872929aa
FB
5341 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5342 &env->segs[R_FS]);
5343 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5344 &env->segs[R_GS]);
5345 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5346 &env->tr);
5347 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5348 &env->ldt);
eaa728ee
FB
5349
5350#ifdef TARGET_X86_64
5351 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5352 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5353 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5354 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5355#endif
5356 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5357 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5358 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5359 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5360}
5361
872929aa
FB
5362void helper_stgi(void)
5363{
5364 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5365 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5366}
5367
5368void helper_clgi(void)
5369{
5370 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5371 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5372}
5373
eaa728ee
FB
5374void helper_skinit(void)
5375{
872929aa
FB
5376 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5377 /* XXX: not implemented */
872929aa 5378 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5379}
5380
914178d3 5381void helper_invlpga(int aflag)
eaa728ee 5382{
914178d3 5383 target_ulong addr;
872929aa 5384 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5385
5386 if (aflag == 2)
5387 addr = EAX;
5388 else
5389 addr = (uint32_t)EAX;
5390
5391 /* XXX: could use the ASID to see if it is needed to do the
5392 flush */
5393 tlb_flush_page(env, addr);
eaa728ee
FB
5394}
5395
5396void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5397{
872929aa
FB
5398 if (likely(!(env->hflags & HF_SVMI_MASK)))
5399 return;
eaa728ee
FB
5400 switch(type) {
5401 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5402 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5403 helper_vmexit(type, param);
5404 }
5405 break;
872929aa
FB
5406 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5407 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5408 helper_vmexit(type, param);
5409 }
5410 break;
872929aa
FB
5411 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5412 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5413 helper_vmexit(type, param);
5414 }
5415 break;
872929aa
FB
5416 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5417 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5418 helper_vmexit(type, param);
5419 }
5420 break;
872929aa
FB
5421 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5422 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5423 helper_vmexit(type, param);
5424 }
5425 break;
eaa728ee 5426 case SVM_EXIT_MSR:
872929aa 5427 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5428 /* FIXME: this should be read in at vmrun (faster this way?) */
5429 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5430 uint32_t t0, t1;
5431 switch((uint32_t)ECX) {
5432 case 0 ... 0x1fff:
5433 t0 = (ECX * 2) % 8;
583cd3cb 5434 t1 = (ECX * 2) / 8;
eaa728ee
FB
5435 break;
5436 case 0xc0000000 ... 0xc0001fff:
5437 t0 = (8192 + ECX - 0xc0000000) * 2;
5438 t1 = (t0 / 8);
5439 t0 %= 8;
5440 break;
5441 case 0xc0010000 ... 0xc0011fff:
5442 t0 = (16384 + ECX - 0xc0010000) * 2;
5443 t1 = (t0 / 8);
5444 t0 %= 8;
5445 break;
5446 default:
5447 helper_vmexit(type, param);
5448 t0 = 0;
5449 t1 = 0;
5450 break;
5451 }
5452 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5453 helper_vmexit(type, param);
5454 }
5455 break;
5456 default:
872929aa 5457 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5458 helper_vmexit(type, param);
5459 }
5460 break;
5461 }
5462}
5463
e694d4e2
BS
5464void svm_check_intercept(CPUState *env1, uint32_t type)
5465{
5466 CPUState *saved_env;
5467
5468 saved_env = env;
5469 env = env1;
5470 helper_svm_check_intercept_param(type, 0);
5471 env = saved_env;
5472}
5473
eaa728ee
FB
5474void helper_svm_check_io(uint32_t port, uint32_t param,
5475 uint32_t next_eip_addend)
5476{
872929aa 5477 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5478 /* FIXME: this should be read in at vmrun (faster this way?) */
5479 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5480 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5481 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5482 /* next EIP */
5483 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5484 env->eip + next_eip_addend);
5485 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5486 }
5487 }
5488}
5489
5490/* Note: currently only 32 bits of exit_code are used */
5491void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5492{
5493 uint32_t int_ctl;
5494
93fcfe39 5495 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
eaa728ee
FB
5496 exit_code, exit_info_1,
5497 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5498 EIP);
5499
5500 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5501 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5502 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5503 } else {
5504 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5505 }
5506
5507 /* Save the VM state in the vmcb */
872929aa
FB
5508 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5509 &env->segs[R_ES]);
5510 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5511 &env->segs[R_CS]);
5512 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5513 &env->segs[R_SS]);
5514 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5515 &env->segs[R_DS]);
eaa728ee
FB
5516
5517 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5518 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5519
5520 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5521 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5522
5523 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5524 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5525 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5526 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5527 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5528
db620f46
FB
5529 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5530 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5531 int_ctl |= env->v_tpr & V_TPR_MASK;
5532 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5533 int_ctl |= V_IRQ_MASK;
5534 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5535
5536 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5537 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5538 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5539 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5540 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5541 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5542 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5543
5544 /* Reload the host state from vm_hsave */
db620f46 5545 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5546 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5547 env->intercept = 0;
5548 env->intercept_exceptions = 0;
5549 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5550 env->tsc_offset = 0;
eaa728ee
FB
5551
5552 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5553 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5554
5555 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5556 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5557
5558 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5559 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5560 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5561 /* we need to set the efer after the crs so the hidden flags get
5562 set properly */
5efc27bb
FB
5563 cpu_load_efer(env,
5564 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5565 env->eflags = 0;
5566 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5567 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5568 CC_OP = CC_OP_EFLAGS;
5569
872929aa
FB
5570 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5571 env, R_ES);
5572 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5573 env, R_CS);
5574 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5575 env, R_SS);
5576 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5577 env, R_DS);
eaa728ee
FB
5578
5579 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5580 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5581 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5582
5583 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5584 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5585
5586 /* other setups */
5587 cpu_x86_set_cpl(env, 0);
5588 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5589 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5590
2ed51f5b
AL
5591 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5592 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5593 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5594 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
ab5ea558 5595 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
2ed51f5b 5596
960540b4 5597 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5598 /* FIXME: Resets the current ASID register to zero (host ASID). */
5599
5600 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5601
5602 /* Clears the TSC_OFFSET inside the processor. */
5603
5604 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5605 from the page table indicated the host's CR3. If the PDPEs contain
5606 illegal state, the processor causes a shutdown. */
5607
5608 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5609 env->cr[0] |= CR0_PE_MASK;
5610 env->eflags &= ~VM_MASK;
5611
5612 /* Disables all breakpoints in the host DR7 register. */
5613
5614 /* Checks the reloaded host state for consistency. */
5615
5616 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5617 host's code segment or non-canonical (in the case of long mode), a
5618 #GP fault is delivered inside the host.) */
5619
5620 /* remove any pending exception */
5621 env->exception_index = -1;
5622 env->error_code = 0;
5623 env->old_exception = -1;
5624
1162c041 5625 cpu_loop_exit(env);
eaa728ee
FB
5626}
5627
5628#endif
5629
5630/* MMX/SSE */
5631/* XXX: optimize by storing fptt and fptags in the static cpu state */
5632void helper_enter_mmx(void)
5633{
5634 env->fpstt = 0;
5635 *(uint32_t *)(env->fptags) = 0;
5636 *(uint32_t *)(env->fptags + 4) = 0;
5637}
5638
5639void helper_emms(void)
5640{
5641 /* set to empty state */
5642 *(uint32_t *)(env->fptags) = 0x01010101;
5643 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5644}
5645
5646/* XXX: suppress */
a7812ae4 5647void helper_movq(void *d, void *s)
eaa728ee 5648{
a7812ae4 5649 *(uint64_t *)d = *(uint64_t *)s;
eaa728ee
FB
5650}
5651
5652#define SHIFT 0
5653#include "ops_sse.h"
5654
5655#define SHIFT 1
5656#include "ops_sse.h"
5657
5658#define SHIFT 0
5659#include "helper_template.h"
5660#undef SHIFT
5661
5662#define SHIFT 1
5663#include "helper_template.h"
5664#undef SHIFT
5665
5666#define SHIFT 2
5667#include "helper_template.h"
5668#undef SHIFT
5669
5670#ifdef TARGET_X86_64
5671
5672#define SHIFT 3
5673#include "helper_template.h"
5674#undef SHIFT
5675
5676#endif
5677
5678/* bit operations */
5679target_ulong helper_bsf(target_ulong t0)
5680{
5681 int count;
5682 target_ulong res;
5683
5684 res = t0;
5685 count = 0;
5686 while ((res & 1) == 0) {
5687 count++;
5688 res >>= 1;
5689 }
5690 return count;
5691}
5692
31501a71 5693target_ulong helper_lzcnt(target_ulong t0, int wordsize)
eaa728ee
FB
5694{
5695 int count;
5696 target_ulong res, mask;
31501a71
AP
5697
5698 if (wordsize > 0 && t0 == 0) {
5699 return wordsize;
5700 }
eaa728ee
FB
5701 res = t0;
5702 count = TARGET_LONG_BITS - 1;
5703 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5704 while ((res & mask) == 0) {
5705 count--;
5706 res <<= 1;
5707 }
31501a71
AP
5708 if (wordsize > 0) {
5709 return wordsize - 1 - count;
5710 }
eaa728ee
FB
5711 return count;
5712}
5713
31501a71
AP
5714target_ulong helper_bsr(target_ulong t0)
5715{
5716 return helper_lzcnt(t0, 0);
5717}
eaa728ee
FB
5718
5719static int compute_all_eflags(void)
5720{
5721 return CC_SRC;
5722}
5723
5724static int compute_c_eflags(void)
5725{
5726 return CC_SRC & CC_C;
5727}
5728
a7812ae4
PB
5729uint32_t helper_cc_compute_all(int op)
5730{
5731 switch (op) {
5732 default: /* should never happen */ return 0;
eaa728ee 5733
a7812ae4 5734 case CC_OP_EFLAGS: return compute_all_eflags();
eaa728ee 5735
a7812ae4
PB
5736 case CC_OP_MULB: return compute_all_mulb();
5737 case CC_OP_MULW: return compute_all_mulw();
5738 case CC_OP_MULL: return compute_all_mull();
eaa728ee 5739
a7812ae4
PB
5740 case CC_OP_ADDB: return compute_all_addb();
5741 case CC_OP_ADDW: return compute_all_addw();
5742 case CC_OP_ADDL: return compute_all_addl();
eaa728ee 5743
a7812ae4
PB
5744 case CC_OP_ADCB: return compute_all_adcb();
5745 case CC_OP_ADCW: return compute_all_adcw();
5746 case CC_OP_ADCL: return compute_all_adcl();
eaa728ee 5747
a7812ae4
PB
5748 case CC_OP_SUBB: return compute_all_subb();
5749 case CC_OP_SUBW: return compute_all_subw();
5750 case CC_OP_SUBL: return compute_all_subl();
eaa728ee 5751
a7812ae4
PB
5752 case CC_OP_SBBB: return compute_all_sbbb();
5753 case CC_OP_SBBW: return compute_all_sbbw();
5754 case CC_OP_SBBL: return compute_all_sbbl();
eaa728ee 5755
a7812ae4
PB
5756 case CC_OP_LOGICB: return compute_all_logicb();
5757 case CC_OP_LOGICW: return compute_all_logicw();
5758 case CC_OP_LOGICL: return compute_all_logicl();
eaa728ee 5759
a7812ae4
PB
5760 case CC_OP_INCB: return compute_all_incb();
5761 case CC_OP_INCW: return compute_all_incw();
5762 case CC_OP_INCL: return compute_all_incl();
eaa728ee 5763
a7812ae4
PB
5764 case CC_OP_DECB: return compute_all_decb();
5765 case CC_OP_DECW: return compute_all_decw();
5766 case CC_OP_DECL: return compute_all_decl();
eaa728ee 5767
a7812ae4
PB
5768 case CC_OP_SHLB: return compute_all_shlb();
5769 case CC_OP_SHLW: return compute_all_shlw();
5770 case CC_OP_SHLL: return compute_all_shll();
eaa728ee 5771
a7812ae4
PB
5772 case CC_OP_SARB: return compute_all_sarb();
5773 case CC_OP_SARW: return compute_all_sarw();
5774 case CC_OP_SARL: return compute_all_sarl();
eaa728ee
FB
5775
5776#ifdef TARGET_X86_64
a7812ae4 5777 case CC_OP_MULQ: return compute_all_mulq();
eaa728ee 5778
a7812ae4 5779 case CC_OP_ADDQ: return compute_all_addq();
eaa728ee 5780
a7812ae4 5781 case CC_OP_ADCQ: return compute_all_adcq();
eaa728ee 5782
a7812ae4 5783 case CC_OP_SUBQ: return compute_all_subq();
eaa728ee 5784
a7812ae4 5785 case CC_OP_SBBQ: return compute_all_sbbq();
eaa728ee 5786
a7812ae4 5787 case CC_OP_LOGICQ: return compute_all_logicq();
eaa728ee 5788
a7812ae4 5789 case CC_OP_INCQ: return compute_all_incq();
eaa728ee 5790
a7812ae4 5791 case CC_OP_DECQ: return compute_all_decq();
eaa728ee 5792
a7812ae4 5793 case CC_OP_SHLQ: return compute_all_shlq();
eaa728ee 5794
a7812ae4 5795 case CC_OP_SARQ: return compute_all_sarq();
eaa728ee 5796#endif
a7812ae4
PB
5797 }
5798}
5799
e694d4e2
BS
5800uint32_t cpu_cc_compute_all(CPUState *env1, int op)
5801{
5802 CPUState *saved_env;
5803 uint32_t ret;
5804
5805 saved_env = env;
5806 env = env1;
5807 ret = helper_cc_compute_all(op);
5808 env = saved_env;
5809 return ret;
5810}
5811
a7812ae4
PB
5812uint32_t helper_cc_compute_c(int op)
5813{
5814 switch (op) {
5815 default: /* should never happen */ return 0;
5816
5817 case CC_OP_EFLAGS: return compute_c_eflags();
5818
5819 case CC_OP_MULB: return compute_c_mull();
5820 case CC_OP_MULW: return compute_c_mull();
5821 case CC_OP_MULL: return compute_c_mull();
5822
5823 case CC_OP_ADDB: return compute_c_addb();
5824 case CC_OP_ADDW: return compute_c_addw();
5825 case CC_OP_ADDL: return compute_c_addl();
5826
5827 case CC_OP_ADCB: return compute_c_adcb();
5828 case CC_OP_ADCW: return compute_c_adcw();
5829 case CC_OP_ADCL: return compute_c_adcl();
5830
5831 case CC_OP_SUBB: return compute_c_subb();
5832 case CC_OP_SUBW: return compute_c_subw();
5833 case CC_OP_SUBL: return compute_c_subl();
5834
5835 case CC_OP_SBBB: return compute_c_sbbb();
5836 case CC_OP_SBBW: return compute_c_sbbw();
5837 case CC_OP_SBBL: return compute_c_sbbl();
5838
5839 case CC_OP_LOGICB: return compute_c_logicb();
5840 case CC_OP_LOGICW: return compute_c_logicw();
5841 case CC_OP_LOGICL: return compute_c_logicl();
5842
5843 case CC_OP_INCB: return compute_c_incl();
5844 case CC_OP_INCW: return compute_c_incl();
5845 case CC_OP_INCL: return compute_c_incl();
5846
5847 case CC_OP_DECB: return compute_c_incl();
5848 case CC_OP_DECW: return compute_c_incl();
5849 case CC_OP_DECL: return compute_c_incl();
eaa728ee 5850
a7812ae4
PB
5851 case CC_OP_SHLB: return compute_c_shlb();
5852 case CC_OP_SHLW: return compute_c_shlw();
5853 case CC_OP_SHLL: return compute_c_shll();
5854
5855 case CC_OP_SARB: return compute_c_sarl();
5856 case CC_OP_SARW: return compute_c_sarl();
5857 case CC_OP_SARL: return compute_c_sarl();
5858
5859#ifdef TARGET_X86_64
5860 case CC_OP_MULQ: return compute_c_mull();
5861
5862 case CC_OP_ADDQ: return compute_c_addq();
5863
5864 case CC_OP_ADCQ: return compute_c_adcq();
5865
5866 case CC_OP_SUBQ: return compute_c_subq();
5867
5868 case CC_OP_SBBQ: return compute_c_sbbq();
5869
5870 case CC_OP_LOGICQ: return compute_c_logicq();
5871
5872 case CC_OP_INCQ: return compute_c_incl();
5873
5874 case CC_OP_DECQ: return compute_c_incl();
5875
5876 case CC_OP_SHLQ: return compute_c_shlq();
5877
5878 case CC_OP_SARQ: return compute_c_sarl();
5879#endif
5880 }
5881}