]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
target-i386: move tcg initialization into x86_cpu_initfn()
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 18 */
83dae095 19
a2c9ed3c 20#include <math.h>
3e457172
BS
21#include "cpu.h"
22#include "dyngen-exec.h"
eaa728ee 23#include "host-utils.h"
35bed8ee 24#include "ioport.h"
3e457172
BS
25#include "qemu-log.h"
26#include "cpu-defs.h"
27#include "helper.h"
eaa728ee 28
3e457172
BS
29#if !defined(CONFIG_USER_ONLY)
30#include "softmmu_exec.h"
31#endif /* !defined(CONFIG_USER_ONLY) */
eaa728ee 32
3e457172 33//#define DEBUG_PCALL
d12d51d5
AL
34
35#ifdef DEBUG_PCALL
93fcfe39
AL
36# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
37# define LOG_PCALL_STATE(env) \
38 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5
AL
39#else
40# define LOG_PCALL(...) do { } while (0)
41# define LOG_PCALL_STATE(env) do { } while (0)
42#endif
43
3e457172
BS
44/* n must be a constant to be efficient */
45static inline target_long lshift(target_long x, int n)
46{
47 if (n >= 0) {
48 return x << n;
49 } else {
50 return x >> (-n);
51 }
52}
53
2355c16e
AJ
54#define FPU_RC_MASK 0xc00
55#define FPU_RC_NEAR 0x000
56#define FPU_RC_DOWN 0x400
57#define FPU_RC_UP 0x800
58#define FPU_RC_CHOP 0xc00
3e457172
BS
59
60#define MAXTAN 9223372036854775808.0
61
62/* the following deal with x86 long double-precision numbers */
63#define MAXEXPD 0x7fff
64#define EXPBIAS 16383
65#define EXPD(fp) (fp.l.upper & 0x7fff)
66#define SIGND(fp) ((fp.l.upper) & 0x8000)
67#define MANTD(fp) (fp.l.lower)
68#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
69
70static inline void fpush(void)
71{
72 env->fpstt = (env->fpstt - 1) & 7;
73 env->fptags[env->fpstt] = 0; /* validate stack entry */
74}
75
76static inline void fpop(void)
77{
78 env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
79 env->fpstt = (env->fpstt + 1) & 7;
80}
81
82static inline floatx80 helper_fldt(target_ulong ptr)
83{
84 CPU_LDoubleU temp;
85
86 temp.l.lower = ldq(ptr);
87 temp.l.upper = lduw(ptr + 8);
88 return temp.d;
89}
90
91static inline void helper_fstt(floatx80 f, target_ulong ptr)
92{
93 CPU_LDoubleU temp;
94
95 temp.d = f;
96 stq(ptr, temp.l.lower);
97 stw(ptr + 8, temp.l.upper);
98}
99
100#define FPUS_IE (1 << 0)
101#define FPUS_DE (1 << 1)
102#define FPUS_ZE (1 << 2)
103#define FPUS_OE (1 << 3)
104#define FPUS_UE (1 << 4)
105#define FPUS_PE (1 << 5)
106#define FPUS_SF (1 << 6)
107#define FPUS_SE (1 << 7)
108#define FPUS_B (1 << 15)
109
110#define FPUC_EM 0x3f
111
112static inline uint32_t compute_eflags(void)
113{
114 return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
115}
116
117/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
118static inline void load_eflags(int eflags, int update_mask)
119{
120 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
121 DF = 1 - (2 * ((eflags >> 10) & 1));
122 env->eflags = (env->eflags & ~update_mask) |
123 (eflags & update_mask) | 0x2;
124}
125
126/* load efer and update the corresponding hflags. XXX: do consistency
127 checks with cpuid bits ? */
317ac620 128static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
3e457172
BS
129{
130 env->efer = val;
131 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
132 if (env->efer & MSR_EFER_LMA) {
133 env->hflags |= HF_LMA_MASK;
134 }
135 if (env->efer & MSR_EFER_SVME) {
136 env->hflags |= HF_SVME_MASK;
137 }
138}
d12d51d5 139
eaa728ee
FB
140#if 0
141#define raise_exception_err(a, b)\
142do {\
93fcfe39 143 qemu_log("raise_exception line=%d\n", __LINE__);\
eaa728ee
FB
144 (raise_exception_err)(a, b);\
145} while (0)
146#endif
147
3e457172
BS
148static void QEMU_NORETURN raise_exception_err(int exception_index,
149 int error_code);
150
d9957a8b 151static const uint8_t parity_table[256] = {
eaa728ee
FB
152 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
153 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
154 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
155 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
156 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
157 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
158 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
159 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
160 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
161 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
162 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
163 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
164 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
165 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
166 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
167 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
168 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
169 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
170 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
171 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
172 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
173 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
174 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
175 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
176 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
177 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
178 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
179 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
180 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
181 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
182 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
183 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
184};
185
186/* modulo 17 table */
d9957a8b 187static const uint8_t rclw_table[32] = {
eaa728ee
FB
188 0, 1, 2, 3, 4, 5, 6, 7,
189 8, 9,10,11,12,13,14,15,
190 16, 0, 1, 2, 3, 4, 5, 6,
191 7, 8, 9,10,11,12,13,14,
192};
193
194/* modulo 9 table */
d9957a8b 195static const uint8_t rclb_table[32] = {
eaa728ee
FB
196 0, 1, 2, 3, 4, 5, 6, 7,
197 8, 0, 1, 2, 3, 4, 5, 6,
198 7, 8, 0, 1, 2, 3, 4, 5,
199 6, 7, 8, 0, 1, 2, 3, 4,
200};
201
c31da136
AJ
202#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
203#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
204#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
205
eaa728ee
FB
206/* broken thread support */
207
c227f099 208static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
209
210void helper_lock(void)
211{
212 spin_lock(&global_cpu_lock);
213}
214
215void helper_unlock(void)
216{
217 spin_unlock(&global_cpu_lock);
218}
219
220void helper_write_eflags(target_ulong t0, uint32_t update_mask)
221{
222 load_eflags(t0, update_mask);
223}
224
225target_ulong helper_read_eflags(void)
226{
227 uint32_t eflags;
a7812ae4 228 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
229 eflags |= (DF & DF_MASK);
230 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
231 return eflags;
232}
233
234/* return non zero if error */
235static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
236 int selector)
237{
238 SegmentCache *dt;
239 int index;
240 target_ulong ptr;
241
242 if (selector & 0x4)
243 dt = &env->ldt;
244 else
245 dt = &env->gdt;
246 index = selector & ~7;
247 if ((index + 7) > dt->limit)
248 return -1;
249 ptr = dt->base + index;
250 *e1_ptr = ldl_kernel(ptr);
251 *e2_ptr = ldl_kernel(ptr + 4);
252 return 0;
253}
254
255static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
256{
257 unsigned int limit;
258 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
259 if (e2 & DESC_G_MASK)
260 limit = (limit << 12) | 0xfff;
261 return limit;
262}
263
264static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
265{
266 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
267}
268
269static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
270{
271 sc->base = get_seg_base(e1, e2);
272 sc->limit = get_seg_limit(e1, e2);
273 sc->flags = e2;
274}
275
276/* init the segment cache in vm86 mode. */
277static inline void load_seg_vm(int seg, int selector)
278{
279 selector &= 0xffff;
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
282}
283
284static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
285 uint32_t *esp_ptr, int dpl)
286{
287 int type, index, shift;
288
289#if 0
290 {
291 int i;
292 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
293 for(i=0;i<env->tr.limit;i++) {
294 printf("%02x ", env->tr.base[i]);
295 if ((i & 7) == 7) printf("\n");
296 }
297 printf("\n");
298 }
299#endif
300
301 if (!(env->tr.flags & DESC_P_MASK))
302 cpu_abort(env, "invalid tss");
303 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
304 if ((type & 7) != 1)
305 cpu_abort(env, "invalid tss type");
306 shift = type >> 3;
307 index = (dpl * 4 + 2) << shift;
308 if (index + (4 << shift) - 1 > env->tr.limit)
309 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
310 if (shift == 0) {
311 *esp_ptr = lduw_kernel(env->tr.base + index);
312 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
313 } else {
314 *esp_ptr = ldl_kernel(env->tr.base + index);
315 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
316 }
317}
318
319/* XXX: merge with load_seg() */
320static void tss_load_seg(int seg_reg, int selector)
321{
322 uint32_t e1, e2;
323 int rpl, dpl, cpl;
324
325 if ((selector & 0xfffc) != 0) {
326 if (load_segment(&e1, &e2, selector) != 0)
327 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
328 if (!(e2 & DESC_S_MASK))
329 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
330 rpl = selector & 3;
331 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
332 cpl = env->hflags & HF_CPL_MASK;
333 if (seg_reg == R_CS) {
334 if (!(e2 & DESC_CS_MASK))
335 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336 /* XXX: is it correct ? */
337 if (dpl != rpl)
338 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
339 if ((e2 & DESC_C_MASK) && dpl > rpl)
340 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
341 } else if (seg_reg == R_SS) {
342 /* SS must be writable data */
343 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 if (dpl != cpl || dpl != rpl)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 } else {
348 /* not readable code */
349 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 /* if data or non conforming code, checks the rights */
352 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
353 if (dpl < cpl || dpl < rpl)
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
355 }
356 }
357 if (!(e2 & DESC_P_MASK))
358 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
359 cpu_x86_load_seg_cache(env, seg_reg, selector,
360 get_seg_base(e1, e2),
361 get_seg_limit(e1, e2),
362 e2);
363 } else {
364 if (seg_reg == R_SS || seg_reg == R_CS)
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
366 }
367}
368
369#define SWITCH_TSS_JMP 0
370#define SWITCH_TSS_IRET 1
371#define SWITCH_TSS_CALL 2
372
373/* XXX: restore CPU state in registers (PowerPC case) */
374static void switch_tss(int tss_selector,
375 uint32_t e1, uint32_t e2, int source,
376 uint32_t next_eip)
377{
378 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
379 target_ulong tss_base;
380 uint32_t new_regs[8], new_segs[6];
381 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
382 uint32_t old_eflags, eflags_mask;
383 SegmentCache *dt;
384 int index;
385 target_ulong ptr;
386
387 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
d12d51d5 388 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
eaa728ee
FB
389
390 /* if task gate, we read the TSS segment and we load it */
391 if (type == 5) {
392 if (!(e2 & DESC_P_MASK))
393 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
394 tss_selector = e1 >> 16;
395 if (tss_selector & 4)
396 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
397 if (load_segment(&e1, &e2, tss_selector) != 0)
398 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
399 if (e2 & DESC_S_MASK)
400 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
401 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
402 if ((type & 7) != 1)
403 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
404 }
405
406 if (!(e2 & DESC_P_MASK))
407 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
408
409 if (type & 8)
410 tss_limit_max = 103;
411 else
412 tss_limit_max = 43;
413 tss_limit = get_seg_limit(e1, e2);
414 tss_base = get_seg_base(e1, e2);
415 if ((tss_selector & 4) != 0 ||
416 tss_limit < tss_limit_max)
417 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
418 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
419 if (old_type & 8)
420 old_tss_limit_max = 103;
421 else
422 old_tss_limit_max = 43;
423
424 /* read all the registers from the new TSS */
425 if (type & 8) {
426 /* 32 bit */
427 new_cr3 = ldl_kernel(tss_base + 0x1c);
428 new_eip = ldl_kernel(tss_base + 0x20);
429 new_eflags = ldl_kernel(tss_base + 0x24);
430 for(i = 0; i < 8; i++)
431 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
432 for(i = 0; i < 6; i++)
433 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
434 new_ldt = lduw_kernel(tss_base + 0x60);
435 new_trap = ldl_kernel(tss_base + 0x64);
436 } else {
437 /* 16 bit */
438 new_cr3 = 0;
439 new_eip = lduw_kernel(tss_base + 0x0e);
440 new_eflags = lduw_kernel(tss_base + 0x10);
441 for(i = 0; i < 8; i++)
442 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
443 for(i = 0; i < 4; i++)
444 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
445 new_ldt = lduw_kernel(tss_base + 0x2a);
446 new_segs[R_FS] = 0;
447 new_segs[R_GS] = 0;
448 new_trap = 0;
449 }
4581cbcd
BS
450 /* XXX: avoid a compiler warning, see
451 http://support.amd.com/us/Processor_TechDocs/24593.pdf
452 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
453 (void)new_trap;
eaa728ee
FB
454
455 /* NOTE: we must avoid memory exceptions during the task switch,
456 so we make dummy accesses before */
457 /* XXX: it can still fail in some cases, so a bigger hack is
458 necessary to valid the TLB after having done the accesses */
459
460 v1 = ldub_kernel(env->tr.base);
461 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
462 stb_kernel(env->tr.base, v1);
463 stb_kernel(env->tr.base + old_tss_limit_max, v2);
464
465 /* clear busy bit (it is restartable) */
466 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
467 target_ulong ptr;
468 uint32_t e2;
469 ptr = env->gdt.base + (env->tr.selector & ~7);
470 e2 = ldl_kernel(ptr + 4);
471 e2 &= ~DESC_TSS_BUSY_MASK;
472 stl_kernel(ptr + 4, e2);
473 }
474 old_eflags = compute_eflags();
475 if (source == SWITCH_TSS_IRET)
476 old_eflags &= ~NT_MASK;
477
478 /* save the current state in the old TSS */
479 if (type & 8) {
480 /* 32 bit */
481 stl_kernel(env->tr.base + 0x20, next_eip);
482 stl_kernel(env->tr.base + 0x24, old_eflags);
483 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
484 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
485 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
486 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
487 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
488 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
489 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
490 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
491 for(i = 0; i < 6; i++)
492 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
493 } else {
494 /* 16 bit */
495 stw_kernel(env->tr.base + 0x0e, next_eip);
496 stw_kernel(env->tr.base + 0x10, old_eflags);
497 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
498 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
499 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
500 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
501 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
502 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
503 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
504 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
505 for(i = 0; i < 4; i++)
506 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
507 }
508
509 /* now if an exception occurs, it will occurs in the next task
510 context */
511
512 if (source == SWITCH_TSS_CALL) {
513 stw_kernel(tss_base, env->tr.selector);
514 new_eflags |= NT_MASK;
515 }
516
517 /* set busy bit */
518 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
519 target_ulong ptr;
520 uint32_t e2;
521 ptr = env->gdt.base + (tss_selector & ~7);
522 e2 = ldl_kernel(ptr + 4);
523 e2 |= DESC_TSS_BUSY_MASK;
524 stl_kernel(ptr + 4, e2);
525 }
526
527 /* set the new CPU state */
528 /* from this point, any exception which occurs can give problems */
529 env->cr[0] |= CR0_TS_MASK;
530 env->hflags |= HF_TS_MASK;
531 env->tr.selector = tss_selector;
532 env->tr.base = tss_base;
533 env->tr.limit = tss_limit;
534 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
535
536 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
537 cpu_x86_update_cr3(env, new_cr3);
538 }
539
540 /* load all registers without an exception, then reload them with
541 possible exception */
542 env->eip = new_eip;
543 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
544 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
545 if (!(type & 8))
546 eflags_mask &= 0xffff;
547 load_eflags(new_eflags, eflags_mask);
548 /* XXX: what to do in 16 bit case ? */
549 EAX = new_regs[0];
550 ECX = new_regs[1];
551 EDX = new_regs[2];
552 EBX = new_regs[3];
553 ESP = new_regs[4];
554 EBP = new_regs[5];
555 ESI = new_regs[6];
556 EDI = new_regs[7];
557 if (new_eflags & VM_MASK) {
558 for(i = 0; i < 6; i++)
559 load_seg_vm(i, new_segs[i]);
560 /* in vm86, CPL is always 3 */
561 cpu_x86_set_cpl(env, 3);
562 } else {
563 /* CPL is set the RPL of CS */
564 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
565 /* first just selectors as the rest may trigger exceptions */
566 for(i = 0; i < 6; i++)
567 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
568 }
569
570 env->ldt.selector = new_ldt & ~4;
571 env->ldt.base = 0;
572 env->ldt.limit = 0;
573 env->ldt.flags = 0;
574
575 /* load the LDT */
576 if (new_ldt & 4)
577 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
578
579 if ((new_ldt & 0xfffc) != 0) {
580 dt = &env->gdt;
581 index = new_ldt & ~7;
582 if ((index + 7) > dt->limit)
583 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
584 ptr = dt->base + index;
585 e1 = ldl_kernel(ptr);
586 e2 = ldl_kernel(ptr + 4);
587 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
588 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
589 if (!(e2 & DESC_P_MASK))
590 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
591 load_seg_cache_raw_dt(&env->ldt, e1, e2);
592 }
593
594 /* load the segments */
595 if (!(new_eflags & VM_MASK)) {
596 tss_load_seg(R_CS, new_segs[R_CS]);
597 tss_load_seg(R_SS, new_segs[R_SS]);
598 tss_load_seg(R_ES, new_segs[R_ES]);
599 tss_load_seg(R_DS, new_segs[R_DS]);
600 tss_load_seg(R_FS, new_segs[R_FS]);
601 tss_load_seg(R_GS, new_segs[R_GS]);
602 }
603
604 /* check that EIP is in the CS segment limits */
605 if (new_eip > env->segs[R_CS].limit) {
606 /* XXX: different exception if CALL ? */
607 raise_exception_err(EXCP0D_GPF, 0);
608 }
01df040b
AL
609
610#ifndef CONFIG_USER_ONLY
611 /* reset local breakpoints */
612 if (env->dr[7] & 0x55) {
613 for (i = 0; i < 4; i++) {
614 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
615 hw_breakpoint_remove(env, i);
616 }
617 env->dr[7] &= ~0x55;
618 }
619#endif
eaa728ee
FB
620}
621
622/* check if Port I/O is allowed in TSS */
623static inline void check_io(int addr, int size)
624{
625 int io_offset, val, mask;
626
627 /* TSS must be a valid 32 bit one */
628 if (!(env->tr.flags & DESC_P_MASK) ||
629 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
630 env->tr.limit < 103)
631 goto fail;
632 io_offset = lduw_kernel(env->tr.base + 0x66);
633 io_offset += (addr >> 3);
634 /* Note: the check needs two bytes */
635 if ((io_offset + 1) > env->tr.limit)
636 goto fail;
637 val = lduw_kernel(env->tr.base + io_offset);
638 val >>= (addr & 7);
639 mask = (1 << size) - 1;
640 /* all bits must be zero to allow the I/O */
641 if ((val & mask) != 0) {
642 fail:
643 raise_exception_err(EXCP0D_GPF, 0);
644 }
645}
646
647void helper_check_iob(uint32_t t0)
648{
649 check_io(t0, 1);
650}
651
652void helper_check_iow(uint32_t t0)
653{
654 check_io(t0, 2);
655}
656
657void helper_check_iol(uint32_t t0)
658{
659 check_io(t0, 4);
660}
661
662void helper_outb(uint32_t port, uint32_t data)
663{
afcea8cb 664 cpu_outb(port, data & 0xff);
eaa728ee
FB
665}
666
667target_ulong helper_inb(uint32_t port)
668{
afcea8cb 669 return cpu_inb(port);
eaa728ee
FB
670}
671
672void helper_outw(uint32_t port, uint32_t data)
673{
afcea8cb 674 cpu_outw(port, data & 0xffff);
eaa728ee
FB
675}
676
677target_ulong helper_inw(uint32_t port)
678{
afcea8cb 679 return cpu_inw(port);
eaa728ee
FB
680}
681
682void helper_outl(uint32_t port, uint32_t data)
683{
afcea8cb 684 cpu_outl(port, data);
eaa728ee
FB
685}
686
687target_ulong helper_inl(uint32_t port)
688{
afcea8cb 689 return cpu_inl(port);
eaa728ee
FB
690}
691
692static inline unsigned int get_sp_mask(unsigned int e2)
693{
694 if (e2 & DESC_B_MASK)
695 return 0xffffffff;
696 else
697 return 0xffff;
698}
699
2ed51f5b
AL
700static int exeption_has_error_code(int intno)
701{
702 switch(intno) {
703 case 8:
704 case 10:
705 case 11:
706 case 12:
707 case 13:
708 case 14:
709 case 17:
710 return 1;
711 }
712 return 0;
713}
714
eaa728ee
FB
715#ifdef TARGET_X86_64
716#define SET_ESP(val, sp_mask)\
717do {\
718 if ((sp_mask) == 0xffff)\
719 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
720 else if ((sp_mask) == 0xffffffffLL)\
721 ESP = (uint32_t)(val);\
722 else\
723 ESP = (val);\
724} while (0)
725#else
726#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
727#endif
728
c0a04f0e
AL
729/* in 64-bit machines, this can overflow. So this segment addition macro
730 * can be used to trim the value to 32-bit whenever needed */
731#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
732
eaa728ee
FB
733/* XXX: add a is_user flag to have proper security support */
734#define PUSHW(ssp, sp, sp_mask, val)\
735{\
736 sp -= 2;\
737 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
738}
739
740#define PUSHL(ssp, sp, sp_mask, val)\
741{\
742 sp -= 4;\
c0a04f0e 743 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
744}
745
746#define POPW(ssp, sp, sp_mask, val)\
747{\
748 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
749 sp += 2;\
750}
751
752#define POPL(ssp, sp, sp_mask, val)\
753{\
c0a04f0e 754 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
755 sp += 4;\
756}
757
758/* protected mode interrupt */
759static void do_interrupt_protected(int intno, int is_int, int error_code,
760 unsigned int next_eip, int is_hw)
761{
762 SegmentCache *dt;
763 target_ulong ptr, ssp;
764 int type, dpl, selector, ss_dpl, cpl;
765 int has_error_code, new_stack, shift;
1c918eba 766 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 767 uint32_t old_eip, sp_mask;
eaa728ee 768
eaa728ee 769 has_error_code = 0;
2ed51f5b
AL
770 if (!is_int && !is_hw)
771 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
772 if (is_int)
773 old_eip = next_eip;
774 else
775 old_eip = env->eip;
776
777 dt = &env->idt;
778 if (intno * 8 + 7 > dt->limit)
779 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
780 ptr = dt->base + intno * 8;
781 e1 = ldl_kernel(ptr);
782 e2 = ldl_kernel(ptr + 4);
783 /* check gate type */
784 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
785 switch(type) {
786 case 5: /* task gate */
787 /* must do that check here to return the correct error code */
788 if (!(e2 & DESC_P_MASK))
789 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
790 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
791 if (has_error_code) {
792 int type;
793 uint32_t mask;
794 /* push the error code */
795 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
796 shift = type >> 3;
797 if (env->segs[R_SS].flags & DESC_B_MASK)
798 mask = 0xffffffff;
799 else
800 mask = 0xffff;
801 esp = (ESP - (2 << shift)) & mask;
802 ssp = env->segs[R_SS].base + esp;
803 if (shift)
804 stl_kernel(ssp, error_code);
805 else
806 stw_kernel(ssp, error_code);
807 SET_ESP(esp, mask);
808 }
809 return;
810 case 6: /* 286 interrupt gate */
811 case 7: /* 286 trap gate */
812 case 14: /* 386 interrupt gate */
813 case 15: /* 386 trap gate */
814 break;
815 default:
816 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
817 break;
818 }
819 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
820 cpl = env->hflags & HF_CPL_MASK;
1235fc06 821 /* check privilege if software int */
eaa728ee
FB
822 if (is_int && dpl < cpl)
823 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
824 /* check valid bit */
825 if (!(e2 & DESC_P_MASK))
826 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
827 selector = e1 >> 16;
828 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
829 if ((selector & 0xfffc) == 0)
830 raise_exception_err(EXCP0D_GPF, 0);
831
832 if (load_segment(&e1, &e2, selector) != 0)
833 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
834 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
835 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
836 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
837 if (dpl > cpl)
838 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
839 if (!(e2 & DESC_P_MASK))
840 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
841 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
842 /* to inner privilege */
843 get_ss_esp_from_tss(&ss, &esp, dpl);
844 if ((ss & 0xfffc) == 0)
845 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
846 if ((ss & 3) != dpl)
847 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
848 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
849 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
850 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
851 if (ss_dpl != dpl)
852 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
853 if (!(ss_e2 & DESC_S_MASK) ||
854 (ss_e2 & DESC_CS_MASK) ||
855 !(ss_e2 & DESC_W_MASK))
856 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
857 if (!(ss_e2 & DESC_P_MASK))
858 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
859 new_stack = 1;
860 sp_mask = get_sp_mask(ss_e2);
861 ssp = get_seg_base(ss_e1, ss_e2);
862 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
863 /* to same privilege */
864 if (env->eflags & VM_MASK)
865 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
866 new_stack = 0;
867 sp_mask = get_sp_mask(env->segs[R_SS].flags);
868 ssp = env->segs[R_SS].base;
869 esp = ESP;
870 dpl = cpl;
871 } else {
872 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
873 new_stack = 0; /* avoid warning */
874 sp_mask = 0; /* avoid warning */
875 ssp = 0; /* avoid warning */
876 esp = 0; /* avoid warning */
877 }
878
879 shift = type >> 3;
880
881#if 0
882 /* XXX: check that enough room is available */
883 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
884 if (env->eflags & VM_MASK)
885 push_size += 8;
886 push_size <<= shift;
887#endif
888 if (shift == 1) {
889 if (new_stack) {
890 if (env->eflags & VM_MASK) {
891 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
892 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
893 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
894 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
895 }
896 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
897 PUSHL(ssp, esp, sp_mask, ESP);
898 }
899 PUSHL(ssp, esp, sp_mask, compute_eflags());
900 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
901 PUSHL(ssp, esp, sp_mask, old_eip);
902 if (has_error_code) {
903 PUSHL(ssp, esp, sp_mask, error_code);
904 }
905 } else {
906 if (new_stack) {
907 if (env->eflags & VM_MASK) {
908 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
909 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
910 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
911 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
912 }
913 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
914 PUSHW(ssp, esp, sp_mask, ESP);
915 }
916 PUSHW(ssp, esp, sp_mask, compute_eflags());
917 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
918 PUSHW(ssp, esp, sp_mask, old_eip);
919 if (has_error_code) {
920 PUSHW(ssp, esp, sp_mask, error_code);
921 }
922 }
923
924 if (new_stack) {
925 if (env->eflags & VM_MASK) {
926 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
927 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
928 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
929 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
930 }
931 ss = (ss & ~3) | dpl;
932 cpu_x86_load_seg_cache(env, R_SS, ss,
933 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
934 }
935 SET_ESP(esp, sp_mask);
936
937 selector = (selector & ~3) | dpl;
938 cpu_x86_load_seg_cache(env, R_CS, selector,
939 get_seg_base(e1, e2),
940 get_seg_limit(e1, e2),
941 e2);
942 cpu_x86_set_cpl(env, dpl);
943 env->eip = offset;
944
945 /* interrupt gate clear IF mask */
946 if ((type & 1) == 0) {
947 env->eflags &= ~IF_MASK;
948 }
949 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
950}
951
952#ifdef TARGET_X86_64
953
954#define PUSHQ(sp, val)\
955{\
956 sp -= 8;\
957 stq_kernel(sp, (val));\
958}
959
960#define POPQ(sp, val)\
961{\
962 val = ldq_kernel(sp);\
963 sp += 8;\
964}
965
966static inline target_ulong get_rsp_from_tss(int level)
967{
968 int index;
969
970#if 0
971 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
972 env->tr.base, env->tr.limit);
973#endif
974
975 if (!(env->tr.flags & DESC_P_MASK))
976 cpu_abort(env, "invalid tss");
977 index = 8 * level + 4;
978 if ((index + 7) > env->tr.limit)
979 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
980 return ldq_kernel(env->tr.base + index);
981}
982
983/* 64 bit interrupt */
984static void do_interrupt64(int intno, int is_int, int error_code,
985 target_ulong next_eip, int is_hw)
986{
987 SegmentCache *dt;
988 target_ulong ptr;
989 int type, dpl, selector, cpl, ist;
990 int has_error_code, new_stack;
991 uint32_t e1, e2, e3, ss;
992 target_ulong old_eip, esp, offset;
eaa728ee 993
eaa728ee 994 has_error_code = 0;
2ed51f5b
AL
995 if (!is_int && !is_hw)
996 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
997 if (is_int)
998 old_eip = next_eip;
999 else
1000 old_eip = env->eip;
1001
1002 dt = &env->idt;
1003 if (intno * 16 + 15 > dt->limit)
1004 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1005 ptr = dt->base + intno * 16;
1006 e1 = ldl_kernel(ptr);
1007 e2 = ldl_kernel(ptr + 4);
1008 e3 = ldl_kernel(ptr + 8);
1009 /* check gate type */
1010 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1011 switch(type) {
1012 case 14: /* 386 interrupt gate */
1013 case 15: /* 386 trap gate */
1014 break;
1015 default:
1016 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1017 break;
1018 }
1019 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1020 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1021 /* check privilege if software int */
eaa728ee
FB
1022 if (is_int && dpl < cpl)
1023 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1024 /* check valid bit */
1025 if (!(e2 & DESC_P_MASK))
1026 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1027 selector = e1 >> 16;
1028 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1029 ist = e2 & 7;
1030 if ((selector & 0xfffc) == 0)
1031 raise_exception_err(EXCP0D_GPF, 0);
1032
1033 if (load_segment(&e1, &e2, selector) != 0)
1034 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1035 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1036 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1037 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1038 if (dpl > cpl)
1039 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1040 if (!(e2 & DESC_P_MASK))
1041 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1042 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1043 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1044 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1045 /* to inner privilege */
1046 if (ist != 0)
1047 esp = get_rsp_from_tss(ist + 3);
1048 else
1049 esp = get_rsp_from_tss(dpl);
1050 esp &= ~0xfLL; /* align stack */
1051 ss = 0;
1052 new_stack = 1;
1053 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1054 /* to same privilege */
1055 if (env->eflags & VM_MASK)
1056 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1057 new_stack = 0;
1058 if (ist != 0)
1059 esp = get_rsp_from_tss(ist + 3);
1060 else
1061 esp = ESP;
1062 esp &= ~0xfLL; /* align stack */
1063 dpl = cpl;
1064 } else {
1065 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1066 new_stack = 0; /* avoid warning */
1067 esp = 0; /* avoid warning */
1068 }
1069
1070 PUSHQ(esp, env->segs[R_SS].selector);
1071 PUSHQ(esp, ESP);
1072 PUSHQ(esp, compute_eflags());
1073 PUSHQ(esp, env->segs[R_CS].selector);
1074 PUSHQ(esp, old_eip);
1075 if (has_error_code) {
1076 PUSHQ(esp, error_code);
1077 }
1078
1079 if (new_stack) {
1080 ss = 0 | dpl;
1081 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1082 }
1083 ESP = esp;
1084
1085 selector = (selector & ~3) | dpl;
1086 cpu_x86_load_seg_cache(env, R_CS, selector,
1087 get_seg_base(e1, e2),
1088 get_seg_limit(e1, e2),
1089 e2);
1090 cpu_x86_set_cpl(env, dpl);
1091 env->eip = offset;
1092
1093 /* interrupt gate clear IF mask */
1094 if ((type & 1) == 0) {
1095 env->eflags &= ~IF_MASK;
1096 }
1097 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1098}
1099#endif
1100
d9957a8b 1101#ifdef TARGET_X86_64
eaa728ee
FB
1102#if defined(CONFIG_USER_ONLY)
1103void helper_syscall(int next_eip_addend)
1104{
1105 env->exception_index = EXCP_SYSCALL;
1106 env->exception_next_eip = env->eip + next_eip_addend;
1162c041 1107 cpu_loop_exit(env);
eaa728ee
FB
1108}
1109#else
1110void helper_syscall(int next_eip_addend)
1111{
1112 int selector;
1113
1114 if (!(env->efer & MSR_EFER_SCE)) {
1115 raise_exception_err(EXCP06_ILLOP, 0);
1116 }
1117 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1118 if (env->hflags & HF_LMA_MASK) {
1119 int code64;
1120
1121 ECX = env->eip + next_eip_addend;
1122 env->regs[11] = compute_eflags();
1123
1124 code64 = env->hflags & HF_CS64_MASK;
1125
1126 cpu_x86_set_cpl(env, 0);
1127 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1128 0, 0xffffffff,
1129 DESC_G_MASK | DESC_P_MASK |
1130 DESC_S_MASK |
1131 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1132 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1133 0, 0xffffffff,
1134 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1135 DESC_S_MASK |
1136 DESC_W_MASK | DESC_A_MASK);
1137 env->eflags &= ~env->fmask;
1138 load_eflags(env->eflags, 0);
1139 if (code64)
1140 env->eip = env->lstar;
1141 else
1142 env->eip = env->cstar;
d9957a8b 1143 } else {
eaa728ee
FB
1144 ECX = (uint32_t)(env->eip + next_eip_addend);
1145
1146 cpu_x86_set_cpl(env, 0);
1147 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1148 0, 0xffffffff,
1149 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1150 DESC_S_MASK |
1151 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1152 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1153 0, 0xffffffff,
1154 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1155 DESC_S_MASK |
1156 DESC_W_MASK | DESC_A_MASK);
1157 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1158 env->eip = (uint32_t)env->star;
1159 }
1160}
1161#endif
d9957a8b 1162#endif
eaa728ee 1163
d9957a8b 1164#ifdef TARGET_X86_64
eaa728ee
FB
1165void helper_sysret(int dflag)
1166{
1167 int cpl, selector;
1168
1169 if (!(env->efer & MSR_EFER_SCE)) {
1170 raise_exception_err(EXCP06_ILLOP, 0);
1171 }
1172 cpl = env->hflags & HF_CPL_MASK;
1173 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1174 raise_exception_err(EXCP0D_GPF, 0);
1175 }
1176 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1177 if (env->hflags & HF_LMA_MASK) {
1178 if (dflag == 2) {
1179 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1180 0, 0xffffffff,
1181 DESC_G_MASK | DESC_P_MASK |
1182 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1183 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1184 DESC_L_MASK);
1185 env->eip = ECX;
1186 } else {
1187 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1188 0, 0xffffffff,
1189 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1190 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1191 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1192 env->eip = (uint32_t)ECX;
1193 }
1194 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1195 0, 0xffffffff,
1196 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1197 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1198 DESC_W_MASK | DESC_A_MASK);
1199 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1200 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1201 cpu_x86_set_cpl(env, 3);
d9957a8b 1202 } else {
eaa728ee
FB
1203 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1204 0, 0xffffffff,
1205 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1206 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1207 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1208 env->eip = (uint32_t)ECX;
1209 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1210 0, 0xffffffff,
1211 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1212 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1213 DESC_W_MASK | DESC_A_MASK);
1214 env->eflags |= IF_MASK;
1215 cpu_x86_set_cpl(env, 3);
1216 }
eaa728ee 1217}
d9957a8b 1218#endif
eaa728ee
FB
1219
1220/* real mode interrupt */
1221static void do_interrupt_real(int intno, int is_int, int error_code,
1222 unsigned int next_eip)
1223{
1224 SegmentCache *dt;
1225 target_ulong ptr, ssp;
1226 int selector;
1227 uint32_t offset, esp;
1228 uint32_t old_cs, old_eip;
eaa728ee 1229
eaa728ee
FB
1230 /* real mode (simpler !) */
1231 dt = &env->idt;
1232 if (intno * 4 + 3 > dt->limit)
1233 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1234 ptr = dt->base + intno * 4;
1235 offset = lduw_kernel(ptr);
1236 selector = lduw_kernel(ptr + 2);
1237 esp = ESP;
1238 ssp = env->segs[R_SS].base;
1239 if (is_int)
1240 old_eip = next_eip;
1241 else
1242 old_eip = env->eip;
1243 old_cs = env->segs[R_CS].selector;
1244 /* XXX: use SS segment size ? */
1245 PUSHW(ssp, esp, 0xffff, compute_eflags());
1246 PUSHW(ssp, esp, 0xffff, old_cs);
1247 PUSHW(ssp, esp, 0xffff, old_eip);
1248
1249 /* update processor state */
1250 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1251 env->eip = offset;
1252 env->segs[R_CS].selector = selector;
1253 env->segs[R_CS].base = (selector << 4);
1254 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1255}
1256
e694d4e2 1257#if defined(CONFIG_USER_ONLY)
eaa728ee 1258/* fake user mode interrupt */
e694d4e2
BS
1259static void do_interrupt_user(int intno, int is_int, int error_code,
1260 target_ulong next_eip)
eaa728ee
FB
1261{
1262 SegmentCache *dt;
1263 target_ulong ptr;
1264 int dpl, cpl, shift;
1265 uint32_t e2;
1266
1267 dt = &env->idt;
1268 if (env->hflags & HF_LMA_MASK) {
1269 shift = 4;
1270 } else {
1271 shift = 3;
1272 }
1273 ptr = dt->base + (intno << shift);
1274 e2 = ldl_kernel(ptr + 4);
1275
1276 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1277 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1278 /* check privilege if software int */
eaa728ee
FB
1279 if (is_int && dpl < cpl)
1280 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1281
1282 /* Since we emulate only user space, we cannot do more than
1283 exiting the emulation with the suitable exception and error
1284 code */
1285 if (is_int)
1286 EIP = next_eip;
1287}
1288
e694d4e2
BS
1289#else
1290
2ed51f5b
AL
1291static void handle_even_inj(int intno, int is_int, int error_code,
1292 int is_hw, int rm)
1293{
1294 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1295 if (!(event_inj & SVM_EVTINJ_VALID)) {
1296 int type;
1297 if (is_int)
1298 type = SVM_EVTINJ_TYPE_SOFT;
1299 else
1300 type = SVM_EVTINJ_TYPE_EXEPT;
1301 event_inj = intno | type | SVM_EVTINJ_VALID;
1302 if (!rm && exeption_has_error_code(intno)) {
1303 event_inj |= SVM_EVTINJ_VALID_ERR;
1304 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1305 }
1306 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1307 }
1308}
00ea18d1 1309#endif
2ed51f5b 1310
eaa728ee
FB
1311/*
1312 * Begin execution of an interruption. is_int is TRUE if coming from
1313 * the int instruction. next_eip is the EIP value AFTER the interrupt
1314 * instruction. It is only relevant if is_int is TRUE.
1315 */
e694d4e2
BS
1316static void do_interrupt_all(int intno, int is_int, int error_code,
1317 target_ulong next_eip, int is_hw)
eaa728ee 1318{
8fec2b8c 1319 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1320 if ((env->cr[0] & CR0_PE_MASK)) {
1321 static int count;
93fcfe39 1322 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
eaa728ee
FB
1323 count, intno, error_code, is_int,
1324 env->hflags & HF_CPL_MASK,
1325 env->segs[R_CS].selector, EIP,
1326 (int)env->segs[R_CS].base + EIP,
1327 env->segs[R_SS].selector, ESP);
1328 if (intno == 0x0e) {
93fcfe39 1329 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1330 } else {
93fcfe39 1331 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1332 }
93fcfe39
AL
1333 qemu_log("\n");
1334 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1335#if 0
1336 {
1337 int i;
9bd5494e 1338 target_ulong ptr;
93fcfe39 1339 qemu_log(" code=");
eaa728ee
FB
1340 ptr = env->segs[R_CS].base + env->eip;
1341 for(i = 0; i < 16; i++) {
93fcfe39 1342 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1343 }
93fcfe39 1344 qemu_log("\n");
eaa728ee
FB
1345 }
1346#endif
1347 count++;
1348 }
1349 }
1350 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1351#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1352 if (env->hflags & HF_SVMI_MASK)
1353 handle_even_inj(intno, is_int, error_code, is_hw, 0);
00ea18d1 1354#endif
eb38c52c 1355#ifdef TARGET_X86_64
eaa728ee
FB
1356 if (env->hflags & HF_LMA_MASK) {
1357 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1358 } else
1359#endif
1360 {
1361 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1362 }
1363 } else {
00ea18d1 1364#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1365 if (env->hflags & HF_SVMI_MASK)
1366 handle_even_inj(intno, is_int, error_code, is_hw, 1);
00ea18d1 1367#endif
eaa728ee
FB
1368 do_interrupt_real(intno, is_int, error_code, next_eip);
1369 }
2ed51f5b 1370
00ea18d1 1371#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1372 if (env->hflags & HF_SVMI_MASK) {
1373 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1374 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1375 }
00ea18d1 1376#endif
eaa728ee
FB
1377}
1378
317ac620 1379void do_interrupt(CPUX86State *env1)
e694d4e2 1380{
317ac620 1381 CPUX86State *saved_env;
e694d4e2
BS
1382
1383 saved_env = env;
1384 env = env1;
1385#if defined(CONFIG_USER_ONLY)
1386 /* if user mode only, we simulate a fake exception
1387 which will be handled outside the cpu execution
1388 loop */
1389 do_interrupt_user(env->exception_index,
1390 env->exception_is_int,
1391 env->error_code,
1392 env->exception_next_eip);
1393 /* successfully delivered */
1394 env->old_exception = -1;
1395#else
1396 /* simulate a real cpu exception. On i386, it can
1397 trigger new exceptions, but we do not handle
1398 double or triple faults yet. */
1399 do_interrupt_all(env->exception_index,
1400 env->exception_is_int,
1401 env->error_code,
1402 env->exception_next_eip, 0);
1403 /* successfully delivered */
1404 env->old_exception = -1;
1405#endif
1406 env = saved_env;
1407}
1408
317ac620 1409void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
e694d4e2 1410{
317ac620 1411 CPUX86State *saved_env;
e694d4e2
BS
1412
1413 saved_env = env;
1414 env = env1;
1415 do_interrupt_all(intno, 0, 0, 0, is_hw);
1416 env = saved_env;
1417}
1418
f55761a0
AL
1419/* This should come from sysemu.h - if we could include it here... */
1420void qemu_system_reset_request(void);
1421
eaa728ee
FB
1422/*
1423 * Check nested exceptions and change to double or triple fault if
1424 * needed. It should only be called, if this is not an interrupt.
1425 * Returns the new exception number.
1426 */
1427static int check_exception(int intno, int *error_code)
1428{
1429 int first_contributory = env->old_exception == 0 ||
1430 (env->old_exception >= 10 &&
1431 env->old_exception <= 13);
1432 int second_contributory = intno == 0 ||
1433 (intno >= 10 && intno <= 13);
1434
93fcfe39 1435 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
eaa728ee
FB
1436 env->old_exception, intno);
1437
f55761a0
AL
1438#if !defined(CONFIG_USER_ONLY)
1439 if (env->old_exception == EXCP08_DBLE) {
1440 if (env->hflags & HF_SVMI_MASK)
1441 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1442
680c3069 1443 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
f55761a0
AL
1444
1445 qemu_system_reset_request();
1446 return EXCP_HLT;
1447 }
1448#endif
eaa728ee
FB
1449
1450 if ((first_contributory && second_contributory)
1451 || (env->old_exception == EXCP0E_PAGE &&
1452 (second_contributory || (intno == EXCP0E_PAGE)))) {
1453 intno = EXCP08_DBLE;
1454 *error_code = 0;
1455 }
1456
1457 if (second_contributory || (intno == EXCP0E_PAGE) ||
1458 (intno == EXCP08_DBLE))
1459 env->old_exception = intno;
1460
1461 return intno;
1462}
1463
1464/*
1465 * Signal an interruption. It is executed in the main CPU loop.
1466 * is_int is TRUE if coming from the int instruction. next_eip is the
1467 * EIP value AFTER the interrupt instruction. It is only relevant if
1468 * is_int is TRUE.
1469 */
a5e50b26 1470static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1471 int next_eip_addend)
eaa728ee
FB
1472{
1473 if (!is_int) {
1474 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1475 intno = check_exception(intno, &error_code);
872929aa
FB
1476 } else {
1477 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1478 }
1479
1480 env->exception_index = intno;
1481 env->error_code = error_code;
1482 env->exception_is_int = is_int;
1483 env->exception_next_eip = env->eip + next_eip_addend;
1162c041 1484 cpu_loop_exit(env);
eaa728ee
FB
1485}
1486
eaa728ee
FB
1487/* shortcuts to generate exceptions */
1488
3e457172
BS
1489static void QEMU_NORETURN raise_exception_err(int exception_index,
1490 int error_code)
1491{
1492 raise_interrupt(exception_index, 0, error_code, 0);
1493}
1494
317ac620 1495void raise_exception_err_env(CPUX86State *nenv, int exception_index,
3e457172 1496 int error_code)
eaa728ee 1497{
3e457172 1498 env = nenv;
eaa728ee
FB
1499 raise_interrupt(exception_index, 0, error_code, 0);
1500}
1501
3e457172 1502static void QEMU_NORETURN raise_exception(int exception_index)
eaa728ee
FB
1503{
1504 raise_interrupt(exception_index, 0, 0, 0);
1505}
1506
317ac620 1507void raise_exception_env(int exception_index, CPUX86State *nenv)
63a54736
JW
1508{
1509 env = nenv;
1510 raise_exception(exception_index);
1511}
eaa728ee
FB
1512/* SMM support */
1513
1514#if defined(CONFIG_USER_ONLY)
1515
317ac620 1516void do_smm_enter(CPUX86State *env1)
eaa728ee
FB
1517{
1518}
1519
1520void helper_rsm(void)
1521{
1522}
1523
1524#else
1525
1526#ifdef TARGET_X86_64
1527#define SMM_REVISION_ID 0x00020064
1528#else
1529#define SMM_REVISION_ID 0x00020000
1530#endif
1531
317ac620 1532void do_smm_enter(CPUX86State *env1)
eaa728ee
FB
1533{
1534 target_ulong sm_state;
1535 SegmentCache *dt;
1536 int i, offset;
317ac620 1537 CPUX86State *saved_env;
e694d4e2
BS
1538
1539 saved_env = env;
1540 env = env1;
eaa728ee 1541
93fcfe39
AL
1542 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1543 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1544
1545 env->hflags |= HF_SMM_MASK;
1546 cpu_smm_update(env);
1547
1548 sm_state = env->smbase + 0x8000;
1549
1550#ifdef TARGET_X86_64
1551 for(i = 0; i < 6; i++) {
1552 dt = &env->segs[i];
1553 offset = 0x7e00 + i * 16;
1554 stw_phys(sm_state + offset, dt->selector);
1555 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1556 stl_phys(sm_state + offset + 4, dt->limit);
1557 stq_phys(sm_state + offset + 8, dt->base);
1558 }
1559
1560 stq_phys(sm_state + 0x7e68, env->gdt.base);
1561 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1562
1563 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1564 stq_phys(sm_state + 0x7e78, env->ldt.base);
1565 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1566 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1567
1568 stq_phys(sm_state + 0x7e88, env->idt.base);
1569 stl_phys(sm_state + 0x7e84, env->idt.limit);
1570
1571 stw_phys(sm_state + 0x7e90, env->tr.selector);
1572 stq_phys(sm_state + 0x7e98, env->tr.base);
1573 stl_phys(sm_state + 0x7e94, env->tr.limit);
1574 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1575
1576 stq_phys(sm_state + 0x7ed0, env->efer);
1577
1578 stq_phys(sm_state + 0x7ff8, EAX);
1579 stq_phys(sm_state + 0x7ff0, ECX);
1580 stq_phys(sm_state + 0x7fe8, EDX);
1581 stq_phys(sm_state + 0x7fe0, EBX);
1582 stq_phys(sm_state + 0x7fd8, ESP);
1583 stq_phys(sm_state + 0x7fd0, EBP);
1584 stq_phys(sm_state + 0x7fc8, ESI);
1585 stq_phys(sm_state + 0x7fc0, EDI);
1586 for(i = 8; i < 16; i++)
1587 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1588 stq_phys(sm_state + 0x7f78, env->eip);
1589 stl_phys(sm_state + 0x7f70, compute_eflags());
1590 stl_phys(sm_state + 0x7f68, env->dr[6]);
1591 stl_phys(sm_state + 0x7f60, env->dr[7]);
1592
1593 stl_phys(sm_state + 0x7f48, env->cr[4]);
1594 stl_phys(sm_state + 0x7f50, env->cr[3]);
1595 stl_phys(sm_state + 0x7f58, env->cr[0]);
1596
1597 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1598 stl_phys(sm_state + 0x7f00, env->smbase);
1599#else
1600 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1601 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1602 stl_phys(sm_state + 0x7ff4, compute_eflags());
1603 stl_phys(sm_state + 0x7ff0, env->eip);
1604 stl_phys(sm_state + 0x7fec, EDI);
1605 stl_phys(sm_state + 0x7fe8, ESI);
1606 stl_phys(sm_state + 0x7fe4, EBP);
1607 stl_phys(sm_state + 0x7fe0, ESP);
1608 stl_phys(sm_state + 0x7fdc, EBX);
1609 stl_phys(sm_state + 0x7fd8, EDX);
1610 stl_phys(sm_state + 0x7fd4, ECX);
1611 stl_phys(sm_state + 0x7fd0, EAX);
1612 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1613 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1614
1615 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1616 stl_phys(sm_state + 0x7f64, env->tr.base);
1617 stl_phys(sm_state + 0x7f60, env->tr.limit);
1618 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1619
1620 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1621 stl_phys(sm_state + 0x7f80, env->ldt.base);
1622 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1623 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1624
1625 stl_phys(sm_state + 0x7f74, env->gdt.base);
1626 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1627
1628 stl_phys(sm_state + 0x7f58, env->idt.base);
1629 stl_phys(sm_state + 0x7f54, env->idt.limit);
1630
1631 for(i = 0; i < 6; i++) {
1632 dt = &env->segs[i];
1633 if (i < 3)
1634 offset = 0x7f84 + i * 12;
1635 else
1636 offset = 0x7f2c + (i - 3) * 12;
1637 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1638 stl_phys(sm_state + offset + 8, dt->base);
1639 stl_phys(sm_state + offset + 4, dt->limit);
1640 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1641 }
1642 stl_phys(sm_state + 0x7f14, env->cr[4]);
1643
1644 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1645 stl_phys(sm_state + 0x7ef8, env->smbase);
1646#endif
1647 /* init SMM cpu state */
1648
1649#ifdef TARGET_X86_64
5efc27bb 1650 cpu_load_efer(env, 0);
eaa728ee
FB
1651#endif
1652 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1653 env->eip = 0x00008000;
1654 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1655 0xffffffff, 0);
1656 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1657 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1658 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1659 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1660 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1661
1662 cpu_x86_update_cr0(env,
1663 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1664 cpu_x86_update_cr4(env, 0);
1665 env->dr[7] = 0x00000400;
1666 CC_OP = CC_OP_EFLAGS;
e694d4e2 1667 env = saved_env;
eaa728ee
FB
1668}
1669
1670void helper_rsm(void)
1671{
1672 target_ulong sm_state;
1673 int i, offset;
1674 uint32_t val;
1675
1676 sm_state = env->smbase + 0x8000;
1677#ifdef TARGET_X86_64
5efc27bb 1678 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1679
1680 for(i = 0; i < 6; i++) {
1681 offset = 0x7e00 + i * 16;
1682 cpu_x86_load_seg_cache(env, i,
1683 lduw_phys(sm_state + offset),
1684 ldq_phys(sm_state + offset + 8),
1685 ldl_phys(sm_state + offset + 4),
1686 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1687 }
1688
1689 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1690 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1691
1692 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1693 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1694 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1695 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1696
1697 env->idt.base = ldq_phys(sm_state + 0x7e88);
1698 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1699
1700 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1701 env->tr.base = ldq_phys(sm_state + 0x7e98);
1702 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1703 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1704
1705 EAX = ldq_phys(sm_state + 0x7ff8);
1706 ECX = ldq_phys(sm_state + 0x7ff0);
1707 EDX = ldq_phys(sm_state + 0x7fe8);
1708 EBX = ldq_phys(sm_state + 0x7fe0);
1709 ESP = ldq_phys(sm_state + 0x7fd8);
1710 EBP = ldq_phys(sm_state + 0x7fd0);
1711 ESI = ldq_phys(sm_state + 0x7fc8);
1712 EDI = ldq_phys(sm_state + 0x7fc0);
1713 for(i = 8; i < 16; i++)
1714 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1715 env->eip = ldq_phys(sm_state + 0x7f78);
1716 load_eflags(ldl_phys(sm_state + 0x7f70),
1717 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1718 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1719 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1720
1721 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1722 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1723 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1724
1725 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1726 if (val & 0x20000) {
1727 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1728 }
1729#else
1730 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1731 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1732 load_eflags(ldl_phys(sm_state + 0x7ff4),
1733 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1734 env->eip = ldl_phys(sm_state + 0x7ff0);
1735 EDI = ldl_phys(sm_state + 0x7fec);
1736 ESI = ldl_phys(sm_state + 0x7fe8);
1737 EBP = ldl_phys(sm_state + 0x7fe4);
1738 ESP = ldl_phys(sm_state + 0x7fe0);
1739 EBX = ldl_phys(sm_state + 0x7fdc);
1740 EDX = ldl_phys(sm_state + 0x7fd8);
1741 ECX = ldl_phys(sm_state + 0x7fd4);
1742 EAX = ldl_phys(sm_state + 0x7fd0);
1743 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1744 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1745
1746 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1747 env->tr.base = ldl_phys(sm_state + 0x7f64);
1748 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1749 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1750
1751 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1752 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1753 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1754 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1755
1756 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1757 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1758
1759 env->idt.base = ldl_phys(sm_state + 0x7f58);
1760 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1761
1762 for(i = 0; i < 6; i++) {
1763 if (i < 3)
1764 offset = 0x7f84 + i * 12;
1765 else
1766 offset = 0x7f2c + (i - 3) * 12;
1767 cpu_x86_load_seg_cache(env, i,
1768 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1769 ldl_phys(sm_state + offset + 8),
1770 ldl_phys(sm_state + offset + 4),
1771 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1772 }
1773 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1774
1775 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1776 if (val & 0x20000) {
1777 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1778 }
1779#endif
1780 CC_OP = CC_OP_EFLAGS;
1781 env->hflags &= ~HF_SMM_MASK;
1782 cpu_smm_update(env);
1783
93fcfe39
AL
1784 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1785 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1786}
1787
1788#endif /* !CONFIG_USER_ONLY */
1789
1790
1791/* division, flags are undefined */
1792
1793void helper_divb_AL(target_ulong t0)
1794{
1795 unsigned int num, den, q, r;
1796
1797 num = (EAX & 0xffff);
1798 den = (t0 & 0xff);
1799 if (den == 0) {
1800 raise_exception(EXCP00_DIVZ);
1801 }
1802 q = (num / den);
1803 if (q > 0xff)
1804 raise_exception(EXCP00_DIVZ);
1805 q &= 0xff;
1806 r = (num % den) & 0xff;
1807 EAX = (EAX & ~0xffff) | (r << 8) | q;
1808}
1809
1810void helper_idivb_AL(target_ulong t0)
1811{
1812 int num, den, q, r;
1813
1814 num = (int16_t)EAX;
1815 den = (int8_t)t0;
1816 if (den == 0) {
1817 raise_exception(EXCP00_DIVZ);
1818 }
1819 q = (num / den);
1820 if (q != (int8_t)q)
1821 raise_exception(EXCP00_DIVZ);
1822 q &= 0xff;
1823 r = (num % den) & 0xff;
1824 EAX = (EAX & ~0xffff) | (r << 8) | q;
1825}
1826
1827void helper_divw_AX(target_ulong t0)
1828{
1829 unsigned int num, den, q, r;
1830
1831 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1832 den = (t0 & 0xffff);
1833 if (den == 0) {
1834 raise_exception(EXCP00_DIVZ);
1835 }
1836 q = (num / den);
1837 if (q > 0xffff)
1838 raise_exception(EXCP00_DIVZ);
1839 q &= 0xffff;
1840 r = (num % den) & 0xffff;
1841 EAX = (EAX & ~0xffff) | q;
1842 EDX = (EDX & ~0xffff) | r;
1843}
1844
1845void helper_idivw_AX(target_ulong t0)
1846{
1847 int num, den, q, r;
1848
1849 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1850 den = (int16_t)t0;
1851 if (den == 0) {
1852 raise_exception(EXCP00_DIVZ);
1853 }
1854 q = (num / den);
1855 if (q != (int16_t)q)
1856 raise_exception(EXCP00_DIVZ);
1857 q &= 0xffff;
1858 r = (num % den) & 0xffff;
1859 EAX = (EAX & ~0xffff) | q;
1860 EDX = (EDX & ~0xffff) | r;
1861}
1862
1863void helper_divl_EAX(target_ulong t0)
1864{
1865 unsigned int den, r;
1866 uint64_t num, q;
1867
1868 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1869 den = t0;
1870 if (den == 0) {
1871 raise_exception(EXCP00_DIVZ);
1872 }
1873 q = (num / den);
1874 r = (num % den);
1875 if (q > 0xffffffff)
1876 raise_exception(EXCP00_DIVZ);
1877 EAX = (uint32_t)q;
1878 EDX = (uint32_t)r;
1879}
1880
1881void helper_idivl_EAX(target_ulong t0)
1882{
1883 int den, r;
1884 int64_t num, q;
1885
1886 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1887 den = t0;
1888 if (den == 0) {
1889 raise_exception(EXCP00_DIVZ);
1890 }
1891 q = (num / den);
1892 r = (num % den);
1893 if (q != (int32_t)q)
1894 raise_exception(EXCP00_DIVZ);
1895 EAX = (uint32_t)q;
1896 EDX = (uint32_t)r;
1897}
1898
1899/* bcd */
1900
1901/* XXX: exception */
1902void helper_aam(int base)
1903{
1904 int al, ah;
1905 al = EAX & 0xff;
1906 ah = al / base;
1907 al = al % base;
1908 EAX = (EAX & ~0xffff) | al | (ah << 8);
1909 CC_DST = al;
1910}
1911
1912void helper_aad(int base)
1913{
1914 int al, ah;
1915 al = EAX & 0xff;
1916 ah = (EAX >> 8) & 0xff;
1917 al = ((ah * base) + al) & 0xff;
1918 EAX = (EAX & ~0xffff) | al;
1919 CC_DST = al;
1920}
1921
1922void helper_aaa(void)
1923{
1924 int icarry;
1925 int al, ah, af;
1926 int eflags;
1927
a7812ae4 1928 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1929 af = eflags & CC_A;
1930 al = EAX & 0xff;
1931 ah = (EAX >> 8) & 0xff;
1932
1933 icarry = (al > 0xf9);
1934 if (((al & 0x0f) > 9 ) || af) {
1935 al = (al + 6) & 0x0f;
1936 ah = (ah + 1 + icarry) & 0xff;
1937 eflags |= CC_C | CC_A;
1938 } else {
1939 eflags &= ~(CC_C | CC_A);
1940 al &= 0x0f;
1941 }
1942 EAX = (EAX & ~0xffff) | al | (ah << 8);
1943 CC_SRC = eflags;
eaa728ee
FB
1944}
1945
1946void helper_aas(void)
1947{
1948 int icarry;
1949 int al, ah, af;
1950 int eflags;
1951
a7812ae4 1952 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1953 af = eflags & CC_A;
1954 al = EAX & 0xff;
1955 ah = (EAX >> 8) & 0xff;
1956
1957 icarry = (al < 6);
1958 if (((al & 0x0f) > 9 ) || af) {
1959 al = (al - 6) & 0x0f;
1960 ah = (ah - 1 - icarry) & 0xff;
1961 eflags |= CC_C | CC_A;
1962 } else {
1963 eflags &= ~(CC_C | CC_A);
1964 al &= 0x0f;
1965 }
1966 EAX = (EAX & ~0xffff) | al | (ah << 8);
1967 CC_SRC = eflags;
eaa728ee
FB
1968}
1969
1970void helper_daa(void)
1971{
c6bfc164 1972 int old_al, al, af, cf;
eaa728ee
FB
1973 int eflags;
1974
a7812ae4 1975 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1976 cf = eflags & CC_C;
1977 af = eflags & CC_A;
c6bfc164 1978 old_al = al = EAX & 0xff;
eaa728ee
FB
1979
1980 eflags = 0;
1981 if (((al & 0x0f) > 9 ) || af) {
1982 al = (al + 6) & 0xff;
1983 eflags |= CC_A;
1984 }
c6bfc164 1985 if ((old_al > 0x99) || cf) {
eaa728ee
FB
1986 al = (al + 0x60) & 0xff;
1987 eflags |= CC_C;
1988 }
1989 EAX = (EAX & ~0xff) | al;
1990 /* well, speed is not an issue here, so we compute the flags by hand */
1991 eflags |= (al == 0) << 6; /* zf */
1992 eflags |= parity_table[al]; /* pf */
1993 eflags |= (al & 0x80); /* sf */
1994 CC_SRC = eflags;
eaa728ee
FB
1995}
1996
1997void helper_das(void)
1998{
1999 int al, al1, af, cf;
2000 int eflags;
2001
a7812ae4 2002 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2003 cf = eflags & CC_C;
2004 af = eflags & CC_A;
2005 al = EAX & 0xff;
2006
2007 eflags = 0;
2008 al1 = al;
2009 if (((al & 0x0f) > 9 ) || af) {
2010 eflags |= CC_A;
2011 if (al < 6 || cf)
2012 eflags |= CC_C;
2013 al = (al - 6) & 0xff;
2014 }
2015 if ((al1 > 0x99) || cf) {
2016 al = (al - 0x60) & 0xff;
2017 eflags |= CC_C;
2018 }
2019 EAX = (EAX & ~0xff) | al;
2020 /* well, speed is not an issue here, so we compute the flags by hand */
2021 eflags |= (al == 0) << 6; /* zf */
2022 eflags |= parity_table[al]; /* pf */
2023 eflags |= (al & 0x80); /* sf */
2024 CC_SRC = eflags;
eaa728ee
FB
2025}
2026
2027void helper_into(int next_eip_addend)
2028{
2029 int eflags;
a7812ae4 2030 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2031 if (eflags & CC_O) {
2032 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2033 }
2034}
2035
2036void helper_cmpxchg8b(target_ulong a0)
2037{
2038 uint64_t d;
2039 int eflags;
2040
a7812ae4 2041 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2042 d = ldq(a0);
2043 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2044 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2045 eflags |= CC_Z;
2046 } else {
278ed7c3
FB
2047 /* always do the store */
2048 stq(a0, d);
eaa728ee
FB
2049 EDX = (uint32_t)(d >> 32);
2050 EAX = (uint32_t)d;
2051 eflags &= ~CC_Z;
2052 }
2053 CC_SRC = eflags;
2054}
2055
2056#ifdef TARGET_X86_64
2057void helper_cmpxchg16b(target_ulong a0)
2058{
2059 uint64_t d0, d1;
2060 int eflags;
2061
278ed7c3
FB
2062 if ((a0 & 0xf) != 0)
2063 raise_exception(EXCP0D_GPF);
a7812ae4 2064 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
2065 d0 = ldq(a0);
2066 d1 = ldq(a0 + 8);
2067 if (d0 == EAX && d1 == EDX) {
2068 stq(a0, EBX);
2069 stq(a0 + 8, ECX);
2070 eflags |= CC_Z;
2071 } else {
278ed7c3
FB
2072 /* always do the store */
2073 stq(a0, d0);
2074 stq(a0 + 8, d1);
eaa728ee
FB
2075 EDX = d1;
2076 EAX = d0;
2077 eflags &= ~CC_Z;
2078 }
2079 CC_SRC = eflags;
2080}
2081#endif
2082
2083void helper_single_step(void)
2084{
01df040b
AL
2085#ifndef CONFIG_USER_ONLY
2086 check_hw_breakpoints(env, 1);
2087 env->dr[6] |= DR6_BS;
2088#endif
2089 raise_exception(EXCP01_DB);
eaa728ee
FB
2090}
2091
2092void helper_cpuid(void)
2093{
6fd805e1 2094 uint32_t eax, ebx, ecx, edx;
eaa728ee 2095
872929aa 2096 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 2097
e00b6f80 2098 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
2099 EAX = eax;
2100 EBX = ebx;
2101 ECX = ecx;
2102 EDX = edx;
eaa728ee
FB
2103}
2104
2105void helper_enter_level(int level, int data32, target_ulong t1)
2106{
2107 target_ulong ssp;
2108 uint32_t esp_mask, esp, ebp;
2109
2110 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2111 ssp = env->segs[R_SS].base;
2112 ebp = EBP;
2113 esp = ESP;
2114 if (data32) {
2115 /* 32 bit */
2116 esp -= 4;
2117 while (--level) {
2118 esp -= 4;
2119 ebp -= 4;
2120 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2121 }
2122 esp -= 4;
2123 stl(ssp + (esp & esp_mask), t1);
2124 } else {
2125 /* 16 bit */
2126 esp -= 2;
2127 while (--level) {
2128 esp -= 2;
2129 ebp -= 2;
2130 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2131 }
2132 esp -= 2;
2133 stw(ssp + (esp & esp_mask), t1);
2134 }
2135}
2136
2137#ifdef TARGET_X86_64
2138void helper_enter64_level(int level, int data64, target_ulong t1)
2139{
2140 target_ulong esp, ebp;
2141 ebp = EBP;
2142 esp = ESP;
2143
2144 if (data64) {
2145 /* 64 bit */
2146 esp -= 8;
2147 while (--level) {
2148 esp -= 8;
2149 ebp -= 8;
2150 stq(esp, ldq(ebp));
2151 }
2152 esp -= 8;
2153 stq(esp, t1);
2154 } else {
2155 /* 16 bit */
2156 esp -= 2;
2157 while (--level) {
2158 esp -= 2;
2159 ebp -= 2;
2160 stw(esp, lduw(ebp));
2161 }
2162 esp -= 2;
2163 stw(esp, t1);
2164 }
2165}
2166#endif
2167
2168void helper_lldt(int selector)
2169{
2170 SegmentCache *dt;
2171 uint32_t e1, e2;
2172 int index, entry_limit;
2173 target_ulong ptr;
2174
2175 selector &= 0xffff;
2176 if ((selector & 0xfffc) == 0) {
2177 /* XXX: NULL selector case: invalid LDT */
2178 env->ldt.base = 0;
2179 env->ldt.limit = 0;
2180 } else {
2181 if (selector & 0x4)
2182 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2183 dt = &env->gdt;
2184 index = selector & ~7;
2185#ifdef TARGET_X86_64
2186 if (env->hflags & HF_LMA_MASK)
2187 entry_limit = 15;
2188 else
2189#endif
2190 entry_limit = 7;
2191 if ((index + entry_limit) > dt->limit)
2192 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2193 ptr = dt->base + index;
2194 e1 = ldl_kernel(ptr);
2195 e2 = ldl_kernel(ptr + 4);
2196 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2197 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198 if (!(e2 & DESC_P_MASK))
2199 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2200#ifdef TARGET_X86_64
2201 if (env->hflags & HF_LMA_MASK) {
2202 uint32_t e3;
2203 e3 = ldl_kernel(ptr + 8);
2204 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2205 env->ldt.base |= (target_ulong)e3 << 32;
2206 } else
2207#endif
2208 {
2209 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2210 }
2211 }
2212 env->ldt.selector = selector;
2213}
2214
2215void helper_ltr(int selector)
2216{
2217 SegmentCache *dt;
2218 uint32_t e1, e2;
2219 int index, type, entry_limit;
2220 target_ulong ptr;
2221
2222 selector &= 0xffff;
2223 if ((selector & 0xfffc) == 0) {
2224 /* NULL selector case: invalid TR */
2225 env->tr.base = 0;
2226 env->tr.limit = 0;
2227 env->tr.flags = 0;
2228 } else {
2229 if (selector & 0x4)
2230 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2231 dt = &env->gdt;
2232 index = selector & ~7;
2233#ifdef TARGET_X86_64
2234 if (env->hflags & HF_LMA_MASK)
2235 entry_limit = 15;
2236 else
2237#endif
2238 entry_limit = 7;
2239 if ((index + entry_limit) > dt->limit)
2240 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2241 ptr = dt->base + index;
2242 e1 = ldl_kernel(ptr);
2243 e2 = ldl_kernel(ptr + 4);
2244 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2245 if ((e2 & DESC_S_MASK) ||
2246 (type != 1 && type != 9))
2247 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2248 if (!(e2 & DESC_P_MASK))
2249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2250#ifdef TARGET_X86_64
2251 if (env->hflags & HF_LMA_MASK) {
2252 uint32_t e3, e4;
2253 e3 = ldl_kernel(ptr + 8);
2254 e4 = ldl_kernel(ptr + 12);
2255 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2256 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2257 load_seg_cache_raw_dt(&env->tr, e1, e2);
2258 env->tr.base |= (target_ulong)e3 << 32;
2259 } else
2260#endif
2261 {
2262 load_seg_cache_raw_dt(&env->tr, e1, e2);
2263 }
2264 e2 |= DESC_TSS_BUSY_MASK;
2265 stl_kernel(ptr + 4, e2);
2266 }
2267 env->tr.selector = selector;
2268}
2269
2270/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2271void helper_load_seg(int seg_reg, int selector)
2272{
2273 uint32_t e1, e2;
2274 int cpl, dpl, rpl;
2275 SegmentCache *dt;
2276 int index;
2277 target_ulong ptr;
2278
2279 selector &= 0xffff;
2280 cpl = env->hflags & HF_CPL_MASK;
2281 if ((selector & 0xfffc) == 0) {
2282 /* null selector case */
2283 if (seg_reg == R_SS
2284#ifdef TARGET_X86_64
2285 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2286#endif
2287 )
2288 raise_exception_err(EXCP0D_GPF, 0);
2289 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2290 } else {
2291
2292 if (selector & 0x4)
2293 dt = &env->ldt;
2294 else
2295 dt = &env->gdt;
2296 index = selector & ~7;
2297 if ((index + 7) > dt->limit)
2298 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2299 ptr = dt->base + index;
2300 e1 = ldl_kernel(ptr);
2301 e2 = ldl_kernel(ptr + 4);
2302
2303 if (!(e2 & DESC_S_MASK))
2304 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2305 rpl = selector & 3;
2306 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2307 if (seg_reg == R_SS) {
2308 /* must be writable segment */
2309 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2310 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2311 if (rpl != cpl || dpl != cpl)
2312 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2313 } else {
2314 /* must be readable segment */
2315 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2316 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2317
2318 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2319 /* if not conforming code, test rights */
2320 if (dpl < cpl || dpl < rpl)
2321 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2322 }
2323 }
2324
2325 if (!(e2 & DESC_P_MASK)) {
2326 if (seg_reg == R_SS)
2327 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2328 else
2329 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2330 }
2331
2332 /* set the access bit if not already set */
2333 if (!(e2 & DESC_A_MASK)) {
2334 e2 |= DESC_A_MASK;
2335 stl_kernel(ptr + 4, e2);
2336 }
2337
2338 cpu_x86_load_seg_cache(env, seg_reg, selector,
2339 get_seg_base(e1, e2),
2340 get_seg_limit(e1, e2),
2341 e2);
2342#if 0
93fcfe39 2343 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2344 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2345#endif
2346 }
2347}
2348
2349/* protected mode jump */
2350void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2351 int next_eip_addend)
2352{
2353 int gate_cs, type;
2354 uint32_t e1, e2, cpl, dpl, rpl, limit;
2355 target_ulong next_eip;
2356
2357 if ((new_cs & 0xfffc) == 0)
2358 raise_exception_err(EXCP0D_GPF, 0);
2359 if (load_segment(&e1, &e2, new_cs) != 0)
2360 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2361 cpl = env->hflags & HF_CPL_MASK;
2362 if (e2 & DESC_S_MASK) {
2363 if (!(e2 & DESC_CS_MASK))
2364 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2365 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2366 if (e2 & DESC_C_MASK) {
2367 /* conforming code segment */
2368 if (dpl > cpl)
2369 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2370 } else {
2371 /* non conforming code segment */
2372 rpl = new_cs & 3;
2373 if (rpl > cpl)
2374 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2375 if (dpl != cpl)
2376 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2377 }
2378 if (!(e2 & DESC_P_MASK))
2379 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2380 limit = get_seg_limit(e1, e2);
2381 if (new_eip > limit &&
2382 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2383 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2385 get_seg_base(e1, e2), limit, e2);
2386 EIP = new_eip;
2387 } else {
2388 /* jump to call or task gate */
2389 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2390 rpl = new_cs & 3;
2391 cpl = env->hflags & HF_CPL_MASK;
2392 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2393 switch(type) {
2394 case 1: /* 286 TSS */
2395 case 9: /* 386 TSS */
2396 case 5: /* task gate */
2397 if (dpl < cpl || dpl < rpl)
2398 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2399 next_eip = env->eip + next_eip_addend;
2400 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2401 CC_OP = CC_OP_EFLAGS;
2402 break;
2403 case 4: /* 286 call gate */
2404 case 12: /* 386 call gate */
2405 if ((dpl < cpl) || (dpl < rpl))
2406 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407 if (!(e2 & DESC_P_MASK))
2408 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2409 gate_cs = e1 >> 16;
2410 new_eip = (e1 & 0xffff);
2411 if (type == 12)
2412 new_eip |= (e2 & 0xffff0000);
2413 if (load_segment(&e1, &e2, gate_cs) != 0)
2414 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2415 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2416 /* must be code segment */
2417 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2418 (DESC_S_MASK | DESC_CS_MASK)))
2419 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2420 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2421 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2422 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2423 if (!(e2 & DESC_P_MASK))
2424 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2425 limit = get_seg_limit(e1, e2);
2426 if (new_eip > limit)
2427 raise_exception_err(EXCP0D_GPF, 0);
2428 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2429 get_seg_base(e1, e2), limit, e2);
2430 EIP = new_eip;
2431 break;
2432 default:
2433 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2434 break;
2435 }
2436 }
2437}
2438
2439/* real mode call */
2440void helper_lcall_real(int new_cs, target_ulong new_eip1,
2441 int shift, int next_eip)
2442{
2443 int new_eip;
2444 uint32_t esp, esp_mask;
2445 target_ulong ssp;
2446
2447 new_eip = new_eip1;
2448 esp = ESP;
2449 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2450 ssp = env->segs[R_SS].base;
2451 if (shift) {
2452 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2453 PUSHL(ssp, esp, esp_mask, next_eip);
2454 } else {
2455 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2456 PUSHW(ssp, esp, esp_mask, next_eip);
2457 }
2458
2459 SET_ESP(esp, esp_mask);
2460 env->eip = new_eip;
2461 env->segs[R_CS].selector = new_cs;
2462 env->segs[R_CS].base = (new_cs << 4);
2463}
2464
2465/* protected mode call */
2466void helper_lcall_protected(int new_cs, target_ulong new_eip,
2467 int shift, int next_eip_addend)
2468{
2469 int new_stack, i;
2470 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2471 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2472 uint32_t val, limit, old_sp_mask;
2473 target_ulong ssp, old_ssp, next_eip;
2474
2475 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2476 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2477 LOG_PCALL_STATE(env);
eaa728ee
FB
2478 if ((new_cs & 0xfffc) == 0)
2479 raise_exception_err(EXCP0D_GPF, 0);
2480 if (load_segment(&e1, &e2, new_cs) != 0)
2481 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2482 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2483 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee
FB
2484 if (e2 & DESC_S_MASK) {
2485 if (!(e2 & DESC_CS_MASK))
2486 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2487 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2488 if (e2 & DESC_C_MASK) {
2489 /* conforming code segment */
2490 if (dpl > cpl)
2491 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2492 } else {
2493 /* non conforming code segment */
2494 rpl = new_cs & 3;
2495 if (rpl > cpl)
2496 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2497 if (dpl != cpl)
2498 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2499 }
2500 if (!(e2 & DESC_P_MASK))
2501 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2502
2503#ifdef TARGET_X86_64
2504 /* XXX: check 16/32 bit cases in long mode */
2505 if (shift == 2) {
2506 target_ulong rsp;
2507 /* 64 bit case */
2508 rsp = ESP;
2509 PUSHQ(rsp, env->segs[R_CS].selector);
2510 PUSHQ(rsp, next_eip);
2511 /* from this point, not restartable */
2512 ESP = rsp;
2513 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2514 get_seg_base(e1, e2),
2515 get_seg_limit(e1, e2), e2);
2516 EIP = new_eip;
2517 } else
2518#endif
2519 {
2520 sp = ESP;
2521 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2522 ssp = env->segs[R_SS].base;
2523 if (shift) {
2524 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2525 PUSHL(ssp, sp, sp_mask, next_eip);
2526 } else {
2527 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2528 PUSHW(ssp, sp, sp_mask, next_eip);
2529 }
2530
2531 limit = get_seg_limit(e1, e2);
2532 if (new_eip > limit)
2533 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2534 /* from this point, not restartable */
2535 SET_ESP(sp, sp_mask);
2536 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2537 get_seg_base(e1, e2), limit, e2);
2538 EIP = new_eip;
2539 }
2540 } else {
2541 /* check gate type */
2542 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2543 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2544 rpl = new_cs & 3;
2545 switch(type) {
2546 case 1: /* available 286 TSS */
2547 case 9: /* available 386 TSS */
2548 case 5: /* task gate */
2549 if (dpl < cpl || dpl < rpl)
2550 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2551 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2552 CC_OP = CC_OP_EFLAGS;
2553 return;
2554 case 4: /* 286 call gate */
2555 case 12: /* 386 call gate */
2556 break;
2557 default:
2558 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2559 break;
2560 }
2561 shift = type >> 3;
2562
2563 if (dpl < cpl || dpl < rpl)
2564 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2565 /* check valid bit */
2566 if (!(e2 & DESC_P_MASK))
2567 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2568 selector = e1 >> 16;
2569 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2570 param_count = e2 & 0x1f;
2571 if ((selector & 0xfffc) == 0)
2572 raise_exception_err(EXCP0D_GPF, 0);
2573
2574 if (load_segment(&e1, &e2, selector) != 0)
2575 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2576 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2577 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2578 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2579 if (dpl > cpl)
2580 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2581 if (!(e2 & DESC_P_MASK))
2582 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2583
2584 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2585 /* to inner privilege */
2586 get_ss_esp_from_tss(&ss, &sp, dpl);
d12d51d5 2587 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
eaa728ee 2588 ss, sp, param_count, ESP);
eaa728ee
FB
2589 if ((ss & 0xfffc) == 0)
2590 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2591 if ((ss & 3) != dpl)
2592 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2593 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2594 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2595 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2596 if (ss_dpl != dpl)
2597 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2598 if (!(ss_e2 & DESC_S_MASK) ||
2599 (ss_e2 & DESC_CS_MASK) ||
2600 !(ss_e2 & DESC_W_MASK))
2601 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2602 if (!(ss_e2 & DESC_P_MASK))
2603 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2604
2605 // push_size = ((param_count * 2) + 8) << shift;
2606
2607 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2608 old_ssp = env->segs[R_SS].base;
2609
2610 sp_mask = get_sp_mask(ss_e2);
2611 ssp = get_seg_base(ss_e1, ss_e2);
2612 if (shift) {
2613 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2614 PUSHL(ssp, sp, sp_mask, ESP);
2615 for(i = param_count - 1; i >= 0; i--) {
2616 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2617 PUSHL(ssp, sp, sp_mask, val);
2618 }
2619 } else {
2620 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2621 PUSHW(ssp, sp, sp_mask, ESP);
2622 for(i = param_count - 1; i >= 0; i--) {
2623 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2624 PUSHW(ssp, sp, sp_mask, val);
2625 }
2626 }
2627 new_stack = 1;
2628 } else {
2629 /* to same privilege */
2630 sp = ESP;
2631 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2632 ssp = env->segs[R_SS].base;
2633 // push_size = (4 << shift);
2634 new_stack = 0;
2635 }
2636
2637 if (shift) {
2638 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2639 PUSHL(ssp, sp, sp_mask, next_eip);
2640 } else {
2641 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2642 PUSHW(ssp, sp, sp_mask, next_eip);
2643 }
2644
2645 /* from this point, not restartable */
2646
2647 if (new_stack) {
2648 ss = (ss & ~3) | dpl;
2649 cpu_x86_load_seg_cache(env, R_SS, ss,
2650 ssp,
2651 get_seg_limit(ss_e1, ss_e2),
2652 ss_e2);
2653 }
2654
2655 selector = (selector & ~3) | dpl;
2656 cpu_x86_load_seg_cache(env, R_CS, selector,
2657 get_seg_base(e1, e2),
2658 get_seg_limit(e1, e2),
2659 e2);
2660 cpu_x86_set_cpl(env, dpl);
2661 SET_ESP(sp, sp_mask);
2662 EIP = offset;
2663 }
eaa728ee
FB
2664}
2665
2666/* real and vm86 mode iret */
2667void helper_iret_real(int shift)
2668{
2669 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2670 target_ulong ssp;
2671 int eflags_mask;
2672
2673 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2674 sp = ESP;
2675 ssp = env->segs[R_SS].base;
2676 if (shift == 1) {
2677 /* 32 bits */
2678 POPL(ssp, sp, sp_mask, new_eip);
2679 POPL(ssp, sp, sp_mask, new_cs);
2680 new_cs &= 0xffff;
2681 POPL(ssp, sp, sp_mask, new_eflags);
2682 } else {
2683 /* 16 bits */
2684 POPW(ssp, sp, sp_mask, new_eip);
2685 POPW(ssp, sp, sp_mask, new_cs);
2686 POPW(ssp, sp, sp_mask, new_eflags);
2687 }
2688 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2689 env->segs[R_CS].selector = new_cs;
2690 env->segs[R_CS].base = (new_cs << 4);
eaa728ee
FB
2691 env->eip = new_eip;
2692 if (env->eflags & VM_MASK)
2693 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2694 else
2695 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2696 if (shift == 0)
2697 eflags_mask &= 0xffff;
2698 load_eflags(new_eflags, eflags_mask);
db620f46 2699 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2700}
2701
2702static inline void validate_seg(int seg_reg, int cpl)
2703{
2704 int dpl;
2705 uint32_t e2;
2706
2707 /* XXX: on x86_64, we do not want to nullify FS and GS because
2708 they may still contain a valid base. I would be interested to
2709 know how a real x86_64 CPU behaves */
2710 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2711 (env->segs[seg_reg].selector & 0xfffc) == 0)
2712 return;
2713
2714 e2 = env->segs[seg_reg].flags;
2715 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2716 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2717 /* data or non conforming code segment */
2718 if (dpl < cpl) {
2719 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2720 }
2721 }
2722}
2723
2724/* protected mode iret */
2725static inline void helper_ret_protected(int shift, int is_iret, int addend)
2726{
2727 uint32_t new_cs, new_eflags, new_ss;
2728 uint32_t new_es, new_ds, new_fs, new_gs;
2729 uint32_t e1, e2, ss_e1, ss_e2;
2730 int cpl, dpl, rpl, eflags_mask, iopl;
2731 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2732
2733#ifdef TARGET_X86_64
2734 if (shift == 2)
2735 sp_mask = -1;
2736 else
2737#endif
2738 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2739 sp = ESP;
2740 ssp = env->segs[R_SS].base;
2741 new_eflags = 0; /* avoid warning */
2742#ifdef TARGET_X86_64
2743 if (shift == 2) {
2744 POPQ(sp, new_eip);
2745 POPQ(sp, new_cs);
2746 new_cs &= 0xffff;
2747 if (is_iret) {
2748 POPQ(sp, new_eflags);
2749 }
2750 } else
2751#endif
2752 if (shift == 1) {
2753 /* 32 bits */
2754 POPL(ssp, sp, sp_mask, new_eip);
2755 POPL(ssp, sp, sp_mask, new_cs);
2756 new_cs &= 0xffff;
2757 if (is_iret) {
2758 POPL(ssp, sp, sp_mask, new_eflags);
2759 if (new_eflags & VM_MASK)
2760 goto return_to_vm86;
2761 }
2762 } else {
2763 /* 16 bits */
2764 POPW(ssp, sp, sp_mask, new_eip);
2765 POPW(ssp, sp, sp_mask, new_cs);
2766 if (is_iret)
2767 POPW(ssp, sp, sp_mask, new_eflags);
2768 }
d12d51d5
AL
2769 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2770 new_cs, new_eip, shift, addend);
2771 LOG_PCALL_STATE(env);
eaa728ee
FB
2772 if ((new_cs & 0xfffc) == 0)
2773 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2774 if (load_segment(&e1, &e2, new_cs) != 0)
2775 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2776 if (!(e2 & DESC_S_MASK) ||
2777 !(e2 & DESC_CS_MASK))
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 cpl = env->hflags & HF_CPL_MASK;
2780 rpl = new_cs & 3;
2781 if (rpl < cpl)
2782 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2783 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2784 if (e2 & DESC_C_MASK) {
2785 if (dpl > rpl)
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 } else {
2788 if (dpl != rpl)
2789 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2790 }
2791 if (!(e2 & DESC_P_MASK))
2792 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2793
2794 sp += addend;
2795 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2796 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2797 /* return to same privilege level */
eaa728ee
FB
2798 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2799 get_seg_base(e1, e2),
2800 get_seg_limit(e1, e2),
2801 e2);
2802 } else {
2803 /* return to different privilege level */
2804#ifdef TARGET_X86_64
2805 if (shift == 2) {
2806 POPQ(sp, new_esp);
2807 POPQ(sp, new_ss);
2808 new_ss &= 0xffff;
2809 } else
2810#endif
2811 if (shift == 1) {
2812 /* 32 bits */
2813 POPL(ssp, sp, sp_mask, new_esp);
2814 POPL(ssp, sp, sp_mask, new_ss);
2815 new_ss &= 0xffff;
2816 } else {
2817 /* 16 bits */
2818 POPW(ssp, sp, sp_mask, new_esp);
2819 POPW(ssp, sp, sp_mask, new_ss);
2820 }
d12d51d5 2821 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
eaa728ee 2822 new_ss, new_esp);
eaa728ee
FB
2823 if ((new_ss & 0xfffc) == 0) {
2824#ifdef TARGET_X86_64
2825 /* NULL ss is allowed in long mode if cpl != 3*/
2826 /* XXX: test CS64 ? */
2827 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2828 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2829 0, 0xffffffff,
2830 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2831 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2832 DESC_W_MASK | DESC_A_MASK);
2833 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2834 } else
2835#endif
2836 {
2837 raise_exception_err(EXCP0D_GPF, 0);
2838 }
2839 } else {
2840 if ((new_ss & 3) != rpl)
2841 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2842 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2843 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2844 if (!(ss_e2 & DESC_S_MASK) ||
2845 (ss_e2 & DESC_CS_MASK) ||
2846 !(ss_e2 & DESC_W_MASK))
2847 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2848 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2849 if (dpl != rpl)
2850 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2851 if (!(ss_e2 & DESC_P_MASK))
2852 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2853 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2854 get_seg_base(ss_e1, ss_e2),
2855 get_seg_limit(ss_e1, ss_e2),
2856 ss_e2);
2857 }
2858
2859 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2860 get_seg_base(e1, e2),
2861 get_seg_limit(e1, e2),
2862 e2);
2863 cpu_x86_set_cpl(env, rpl);
2864 sp = new_esp;
2865#ifdef TARGET_X86_64
2866 if (env->hflags & HF_CS64_MASK)
2867 sp_mask = -1;
2868 else
2869#endif
2870 sp_mask = get_sp_mask(ss_e2);
2871
2872 /* validate data segments */
2873 validate_seg(R_ES, rpl);
2874 validate_seg(R_DS, rpl);
2875 validate_seg(R_FS, rpl);
2876 validate_seg(R_GS, rpl);
2877
2878 sp += addend;
2879 }
2880 SET_ESP(sp, sp_mask);
2881 env->eip = new_eip;
2882 if (is_iret) {
2883 /* NOTE: 'cpl' is the _old_ CPL */
2884 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2885 if (cpl == 0)
2886 eflags_mask |= IOPL_MASK;
2887 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2888 if (cpl <= iopl)
2889 eflags_mask |= IF_MASK;
2890 if (shift == 0)
2891 eflags_mask &= 0xffff;
2892 load_eflags(new_eflags, eflags_mask);
2893 }
2894 return;
2895
2896 return_to_vm86:
2897 POPL(ssp, sp, sp_mask, new_esp);
2898 POPL(ssp, sp, sp_mask, new_ss);
2899 POPL(ssp, sp, sp_mask, new_es);
2900 POPL(ssp, sp, sp_mask, new_ds);
2901 POPL(ssp, sp, sp_mask, new_fs);
2902 POPL(ssp, sp, sp_mask, new_gs);
2903
2904 /* modify processor state */
2905 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2906 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2907 load_seg_vm(R_CS, new_cs & 0xffff);
2908 cpu_x86_set_cpl(env, 3);
2909 load_seg_vm(R_SS, new_ss & 0xffff);
2910 load_seg_vm(R_ES, new_es & 0xffff);
2911 load_seg_vm(R_DS, new_ds & 0xffff);
2912 load_seg_vm(R_FS, new_fs & 0xffff);
2913 load_seg_vm(R_GS, new_gs & 0xffff);
2914
2915 env->eip = new_eip & 0xffff;
2916 ESP = new_esp;
2917}
2918
2919void helper_iret_protected(int shift, int next_eip)
2920{
2921 int tss_selector, type;
2922 uint32_t e1, e2;
2923
2924 /* specific case for TSS */
2925 if (env->eflags & NT_MASK) {
2926#ifdef TARGET_X86_64
2927 if (env->hflags & HF_LMA_MASK)
2928 raise_exception_err(EXCP0D_GPF, 0);
2929#endif
2930 tss_selector = lduw_kernel(env->tr.base + 0);
2931 if (tss_selector & 4)
2932 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2933 if (load_segment(&e1, &e2, tss_selector) != 0)
2934 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2935 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2936 /* NOTE: we check both segment and busy TSS */
2937 if (type != 3)
2938 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2939 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2940 } else {
2941 helper_ret_protected(shift, 1, 0);
2942 }
db620f46 2943 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2944}
2945
2946void helper_lret_protected(int shift, int addend)
2947{
2948 helper_ret_protected(shift, 0, addend);
eaa728ee
FB
2949}
2950
2951void helper_sysenter(void)
2952{
2953 if (env->sysenter_cs == 0) {
2954 raise_exception_err(EXCP0D_GPF, 0);
2955 }
2956 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2957 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2958
2959#ifdef TARGET_X86_64
2960 if (env->hflags & HF_LMA_MASK) {
2961 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2962 0, 0xffffffff,
2963 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2964 DESC_S_MASK |
2965 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2966 } else
2967#endif
2968 {
2969 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2970 0, 0xffffffff,
2971 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2972 DESC_S_MASK |
2973 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2974 }
eaa728ee
FB
2975 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2976 0, 0xffffffff,
2977 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2978 DESC_S_MASK |
2979 DESC_W_MASK | DESC_A_MASK);
2980 ESP = env->sysenter_esp;
2981 EIP = env->sysenter_eip;
2982}
2983
2436b61a 2984void helper_sysexit(int dflag)
eaa728ee
FB
2985{
2986 int cpl;
2987
2988 cpl = env->hflags & HF_CPL_MASK;
2989 if (env->sysenter_cs == 0 || cpl != 0) {
2990 raise_exception_err(EXCP0D_GPF, 0);
2991 }
2992 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2993#ifdef TARGET_X86_64
2994 if (dflag == 2) {
2995 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2996 0, 0xffffffff,
2997 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2998 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2999 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3000 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3001 0, 0xffffffff,
3002 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3003 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3004 DESC_W_MASK | DESC_A_MASK);
3005 } else
3006#endif
3007 {
3008 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3009 0, 0xffffffff,
3010 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3011 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3012 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3013 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3014 0, 0xffffffff,
3015 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3016 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3017 DESC_W_MASK | DESC_A_MASK);
3018 }
eaa728ee
FB
3019 ESP = ECX;
3020 EIP = EDX;
eaa728ee
FB
3021}
3022
872929aa
FB
3023#if defined(CONFIG_USER_ONLY)
3024target_ulong helper_read_crN(int reg)
eaa728ee 3025{
872929aa
FB
3026 return 0;
3027}
3028
3029void helper_write_crN(int reg, target_ulong t0)
3030{
3031}
01df040b
AL
3032
3033void helper_movl_drN_T0(int reg, target_ulong t0)
3034{
3035}
872929aa
FB
3036#else
3037target_ulong helper_read_crN(int reg)
3038{
3039 target_ulong val;
3040
3041 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3042 switch(reg) {
3043 default:
3044 val = env->cr[reg];
3045 break;
3046 case 8:
db620f46 3047 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 3048 val = cpu_get_apic_tpr(env->apic_state);
db620f46
FB
3049 } else {
3050 val = env->v_tpr;
3051 }
872929aa
FB
3052 break;
3053 }
3054 return val;
3055}
3056
3057void helper_write_crN(int reg, target_ulong t0)
3058{
3059 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
3060 switch(reg) {
3061 case 0:
3062 cpu_x86_update_cr0(env, t0);
3063 break;
3064 case 3:
3065 cpu_x86_update_cr3(env, t0);
3066 break;
3067 case 4:
3068 cpu_x86_update_cr4(env, t0);
3069 break;
3070 case 8:
db620f46 3071 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 3072 cpu_set_apic_tpr(env->apic_state, t0);
db620f46
FB
3073 }
3074 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
3075 break;
3076 default:
3077 env->cr[reg] = t0;
3078 break;
3079 }
eaa728ee 3080}
01df040b
AL
3081
3082void helper_movl_drN_T0(int reg, target_ulong t0)
3083{
3084 int i;
3085
3086 if (reg < 4) {
3087 hw_breakpoint_remove(env, reg);
3088 env->dr[reg] = t0;
3089 hw_breakpoint_insert(env, reg);
3090 } else if (reg == 7) {
3091 for (i = 0; i < 4; i++)
3092 hw_breakpoint_remove(env, i);
3093 env->dr[7] = t0;
3094 for (i = 0; i < 4; i++)
3095 hw_breakpoint_insert(env, i);
3096 } else
3097 env->dr[reg] = t0;
3098}
872929aa 3099#endif
eaa728ee
FB
3100
3101void helper_lmsw(target_ulong t0)
3102{
3103 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3104 if already set to one. */
3105 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 3106 helper_write_crN(0, t0);
eaa728ee
FB
3107}
3108
3109void helper_clts(void)
3110{
3111 env->cr[0] &= ~CR0_TS_MASK;
3112 env->hflags &= ~HF_TS_MASK;
3113}
3114
eaa728ee
FB
3115void helper_invlpg(target_ulong addr)
3116{
872929aa 3117 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 3118 tlb_flush_page(env, addr);
eaa728ee
FB
3119}
3120
3121void helper_rdtsc(void)
3122{
3123 uint64_t val;
3124
3125 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3126 raise_exception(EXCP0D_GPF);
3127 }
872929aa
FB
3128 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3129
33c263df 3130 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
3131 EAX = (uint32_t)(val);
3132 EDX = (uint32_t)(val >> 32);
3133}
3134
1b050077
AP
3135void helper_rdtscp(void)
3136{
3137 helper_rdtsc();
3138 ECX = (uint32_t)(env->tsc_aux);
3139}
3140
eaa728ee
FB
3141void helper_rdpmc(void)
3142{
3143 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3144 raise_exception(EXCP0D_GPF);
3145 }
eaa728ee
FB
3146 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3147
3148 /* currently unimplemented */
71547a3b 3149 qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
eaa728ee
FB
3150 raise_exception_err(EXCP06_ILLOP, 0);
3151}
3152
3153#if defined(CONFIG_USER_ONLY)
3154void helper_wrmsr(void)
3155{
3156}
3157
3158void helper_rdmsr(void)
3159{
3160}
3161#else
3162void helper_wrmsr(void)
3163{
3164 uint64_t val;
3165
872929aa
FB
3166 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3167
eaa728ee
FB
3168 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3169
3170 switch((uint32_t)ECX) {
3171 case MSR_IA32_SYSENTER_CS:
3172 env->sysenter_cs = val & 0xffff;
3173 break;
3174 case MSR_IA32_SYSENTER_ESP:
3175 env->sysenter_esp = val;
3176 break;
3177 case MSR_IA32_SYSENTER_EIP:
3178 env->sysenter_eip = val;
3179 break;
3180 case MSR_IA32_APICBASE:
4a942cea 3181 cpu_set_apic_base(env->apic_state, val);
eaa728ee
FB
3182 break;
3183 case MSR_EFER:
3184 {
3185 uint64_t update_mask;
3186 update_mask = 0;
3187 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3188 update_mask |= MSR_EFER_SCE;
3189 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3190 update_mask |= MSR_EFER_LME;
3191 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3192 update_mask |= MSR_EFER_FFXSR;
3193 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3194 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3195 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3196 update_mask |= MSR_EFER_SVME;
eef26553
AL
3197 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3198 update_mask |= MSR_EFER_FFXSR;
5efc27bb
FB
3199 cpu_load_efer(env, (env->efer & ~update_mask) |
3200 (val & update_mask));
eaa728ee
FB
3201 }
3202 break;
3203 case MSR_STAR:
3204 env->star = val;
3205 break;
3206 case MSR_PAT:
3207 env->pat = val;
3208 break;
3209 case MSR_VM_HSAVE_PA:
3210 env->vm_hsave = val;
3211 break;
3212#ifdef TARGET_X86_64
3213 case MSR_LSTAR:
3214 env->lstar = val;
3215 break;
3216 case MSR_CSTAR:
3217 env->cstar = val;
3218 break;
3219 case MSR_FMASK:
3220 env->fmask = val;
3221 break;
3222 case MSR_FSBASE:
3223 env->segs[R_FS].base = val;
3224 break;
3225 case MSR_GSBASE:
3226 env->segs[R_GS].base = val;
3227 break;
3228 case MSR_KERNELGSBASE:
3229 env->kernelgsbase = val;
3230 break;
3231#endif
165d9b82
AL
3232 case MSR_MTRRphysBase(0):
3233 case MSR_MTRRphysBase(1):
3234 case MSR_MTRRphysBase(2):
3235 case MSR_MTRRphysBase(3):
3236 case MSR_MTRRphysBase(4):
3237 case MSR_MTRRphysBase(5):
3238 case MSR_MTRRphysBase(6):
3239 case MSR_MTRRphysBase(7):
3240 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3241 break;
3242 case MSR_MTRRphysMask(0):
3243 case MSR_MTRRphysMask(1):
3244 case MSR_MTRRphysMask(2):
3245 case MSR_MTRRphysMask(3):
3246 case MSR_MTRRphysMask(4):
3247 case MSR_MTRRphysMask(5):
3248 case MSR_MTRRphysMask(6):
3249 case MSR_MTRRphysMask(7):
3250 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3251 break;
3252 case MSR_MTRRfix64K_00000:
3253 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3254 break;
3255 case MSR_MTRRfix16K_80000:
3256 case MSR_MTRRfix16K_A0000:
3257 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3258 break;
3259 case MSR_MTRRfix4K_C0000:
3260 case MSR_MTRRfix4K_C8000:
3261 case MSR_MTRRfix4K_D0000:
3262 case MSR_MTRRfix4K_D8000:
3263 case MSR_MTRRfix4K_E0000:
3264 case MSR_MTRRfix4K_E8000:
3265 case MSR_MTRRfix4K_F0000:
3266 case MSR_MTRRfix4K_F8000:
3267 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3268 break;
3269 case MSR_MTRRdefType:
3270 env->mtrr_deftype = val;
3271 break;
79c4f6b0
HY
3272 case MSR_MCG_STATUS:
3273 env->mcg_status = val;
3274 break;
3275 case MSR_MCG_CTL:
3276 if ((env->mcg_cap & MCG_CTL_P)
3277 && (val == 0 || val == ~(uint64_t)0))
3278 env->mcg_ctl = val;
3279 break;
1b050077
AP
3280 case MSR_TSC_AUX:
3281 env->tsc_aux = val;
3282 break;
21e87c46
AK
3283 case MSR_IA32_MISC_ENABLE:
3284 env->msr_ia32_misc_enable = val;
3285 break;
eaa728ee 3286 default:
79c4f6b0
HY
3287 if ((uint32_t)ECX >= MSR_MC0_CTL
3288 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3289 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3290 if ((offset & 0x3) != 0
3291 || (val == 0 || val == ~(uint64_t)0))
3292 env->mce_banks[offset] = val;
3293 break;
3294 }
eaa728ee
FB
3295 /* XXX: exception ? */
3296 break;
3297 }
3298}
3299
3300void helper_rdmsr(void)
3301{
3302 uint64_t val;
872929aa
FB
3303
3304 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3305
eaa728ee
FB
3306 switch((uint32_t)ECX) {
3307 case MSR_IA32_SYSENTER_CS:
3308 val = env->sysenter_cs;
3309 break;
3310 case MSR_IA32_SYSENTER_ESP:
3311 val = env->sysenter_esp;
3312 break;
3313 case MSR_IA32_SYSENTER_EIP:
3314 val = env->sysenter_eip;
3315 break;
3316 case MSR_IA32_APICBASE:
4a942cea 3317 val = cpu_get_apic_base(env->apic_state);
eaa728ee
FB
3318 break;
3319 case MSR_EFER:
3320 val = env->efer;
3321 break;
3322 case MSR_STAR:
3323 val = env->star;
3324 break;
3325 case MSR_PAT:
3326 val = env->pat;
3327 break;
3328 case MSR_VM_HSAVE_PA:
3329 val = env->vm_hsave;
3330 break;
d5e49a81
AZ
3331 case MSR_IA32_PERF_STATUS:
3332 /* tsc_increment_by_tick */
3333 val = 1000ULL;
3334 /* CPU multiplier */
3335 val |= (((uint64_t)4ULL) << 40);
3336 break;
eaa728ee
FB
3337#ifdef TARGET_X86_64
3338 case MSR_LSTAR:
3339 val = env->lstar;
3340 break;
3341 case MSR_CSTAR:
3342 val = env->cstar;
3343 break;
3344 case MSR_FMASK:
3345 val = env->fmask;
3346 break;
3347 case MSR_FSBASE:
3348 val = env->segs[R_FS].base;
3349 break;
3350 case MSR_GSBASE:
3351 val = env->segs[R_GS].base;
3352 break;
3353 case MSR_KERNELGSBASE:
3354 val = env->kernelgsbase;
3355 break;
1b050077
AP
3356 case MSR_TSC_AUX:
3357 val = env->tsc_aux;
3358 break;
eaa728ee 3359#endif
165d9b82
AL
3360 case MSR_MTRRphysBase(0):
3361 case MSR_MTRRphysBase(1):
3362 case MSR_MTRRphysBase(2):
3363 case MSR_MTRRphysBase(3):
3364 case MSR_MTRRphysBase(4):
3365 case MSR_MTRRphysBase(5):
3366 case MSR_MTRRphysBase(6):
3367 case MSR_MTRRphysBase(7):
3368 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3369 break;
3370 case MSR_MTRRphysMask(0):
3371 case MSR_MTRRphysMask(1):
3372 case MSR_MTRRphysMask(2):
3373 case MSR_MTRRphysMask(3):
3374 case MSR_MTRRphysMask(4):
3375 case MSR_MTRRphysMask(5):
3376 case MSR_MTRRphysMask(6):
3377 case MSR_MTRRphysMask(7):
3378 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3379 break;
3380 case MSR_MTRRfix64K_00000:
3381 val = env->mtrr_fixed[0];
3382 break;
3383 case MSR_MTRRfix16K_80000:
3384 case MSR_MTRRfix16K_A0000:
3385 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3386 break;
3387 case MSR_MTRRfix4K_C0000:
3388 case MSR_MTRRfix4K_C8000:
3389 case MSR_MTRRfix4K_D0000:
3390 case MSR_MTRRfix4K_D8000:
3391 case MSR_MTRRfix4K_E0000:
3392 case MSR_MTRRfix4K_E8000:
3393 case MSR_MTRRfix4K_F0000:
3394 case MSR_MTRRfix4K_F8000:
3395 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3396 break;
3397 case MSR_MTRRdefType:
3398 val = env->mtrr_deftype;
3399 break;
dd5e3b17
AL
3400 case MSR_MTRRcap:
3401 if (env->cpuid_features & CPUID_MTRR)
3402 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3403 else
3404 /* XXX: exception ? */
3405 val = 0;
3406 break;
79c4f6b0
HY
3407 case MSR_MCG_CAP:
3408 val = env->mcg_cap;
3409 break;
3410 case MSR_MCG_CTL:
3411 if (env->mcg_cap & MCG_CTL_P)
3412 val = env->mcg_ctl;
3413 else
3414 val = 0;
3415 break;
3416 case MSR_MCG_STATUS:
3417 val = env->mcg_status;
3418 break;
21e87c46
AK
3419 case MSR_IA32_MISC_ENABLE:
3420 val = env->msr_ia32_misc_enable;
3421 break;
eaa728ee 3422 default:
79c4f6b0
HY
3423 if ((uint32_t)ECX >= MSR_MC0_CTL
3424 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3425 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3426 val = env->mce_banks[offset];
3427 break;
3428 }
eaa728ee
FB
3429 /* XXX: exception ? */
3430 val = 0;
3431 break;
3432 }
3433 EAX = (uint32_t)(val);
3434 EDX = (uint32_t)(val >> 32);
3435}
3436#endif
3437
3438target_ulong helper_lsl(target_ulong selector1)
3439{
3440 unsigned int limit;
3441 uint32_t e1, e2, eflags, selector;
3442 int rpl, dpl, cpl, type;
3443
3444 selector = selector1 & 0xffff;
a7812ae4 3445 eflags = helper_cc_compute_all(CC_OP);
dc1ded53
AL
3446 if ((selector & 0xfffc) == 0)
3447 goto fail;
eaa728ee
FB
3448 if (load_segment(&e1, &e2, selector) != 0)
3449 goto fail;
3450 rpl = selector & 3;
3451 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3452 cpl = env->hflags & HF_CPL_MASK;
3453 if (e2 & DESC_S_MASK) {
3454 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3455 /* conforming */
3456 } else {
3457 if (dpl < cpl || dpl < rpl)
3458 goto fail;
3459 }
3460 } else {
3461 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3462 switch(type) {
3463 case 1:
3464 case 2:
3465 case 3:
3466 case 9:
3467 case 11:
3468 break;
3469 default:
3470 goto fail;
3471 }
3472 if (dpl < cpl || dpl < rpl) {
3473 fail:
3474 CC_SRC = eflags & ~CC_Z;
3475 return 0;
3476 }
3477 }
3478 limit = get_seg_limit(e1, e2);
3479 CC_SRC = eflags | CC_Z;
3480 return limit;
3481}
3482
3483target_ulong helper_lar(target_ulong selector1)
3484{
3485 uint32_t e1, e2, eflags, selector;
3486 int rpl, dpl, cpl, type;
3487
3488 selector = selector1 & 0xffff;
a7812ae4 3489 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3490 if ((selector & 0xfffc) == 0)
3491 goto fail;
3492 if (load_segment(&e1, &e2, selector) != 0)
3493 goto fail;
3494 rpl = selector & 3;
3495 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3496 cpl = env->hflags & HF_CPL_MASK;
3497 if (e2 & DESC_S_MASK) {
3498 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3499 /* conforming */
3500 } else {
3501 if (dpl < cpl || dpl < rpl)
3502 goto fail;
3503 }
3504 } else {
3505 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3506 switch(type) {
3507 case 1:
3508 case 2:
3509 case 3:
3510 case 4:
3511 case 5:
3512 case 9:
3513 case 11:
3514 case 12:
3515 break;
3516 default:
3517 goto fail;
3518 }
3519 if (dpl < cpl || dpl < rpl) {
3520 fail:
3521 CC_SRC = eflags & ~CC_Z;
3522 return 0;
3523 }
3524 }
3525 CC_SRC = eflags | CC_Z;
3526 return e2 & 0x00f0ff00;
3527}
3528
3529void helper_verr(target_ulong selector1)
3530{
3531 uint32_t e1, e2, eflags, selector;
3532 int rpl, dpl, cpl;
3533
3534 selector = selector1 & 0xffff;
a7812ae4 3535 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3536 if ((selector & 0xfffc) == 0)
3537 goto fail;
3538 if (load_segment(&e1, &e2, selector) != 0)
3539 goto fail;
3540 if (!(e2 & DESC_S_MASK))
3541 goto fail;
3542 rpl = selector & 3;
3543 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3544 cpl = env->hflags & HF_CPL_MASK;
3545 if (e2 & DESC_CS_MASK) {
3546 if (!(e2 & DESC_R_MASK))
3547 goto fail;
3548 if (!(e2 & DESC_C_MASK)) {
3549 if (dpl < cpl || dpl < rpl)
3550 goto fail;
3551 }
3552 } else {
3553 if (dpl < cpl || dpl < rpl) {
3554 fail:
3555 CC_SRC = eflags & ~CC_Z;
3556 return;
3557 }
3558 }
3559 CC_SRC = eflags | CC_Z;
3560}
3561
3562void helper_verw(target_ulong selector1)
3563{
3564 uint32_t e1, e2, eflags, selector;
3565 int rpl, dpl, cpl;
3566
3567 selector = selector1 & 0xffff;
a7812ae4 3568 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3569 if ((selector & 0xfffc) == 0)
3570 goto fail;
3571 if (load_segment(&e1, &e2, selector) != 0)
3572 goto fail;
3573 if (!(e2 & DESC_S_MASK))
3574 goto fail;
3575 rpl = selector & 3;
3576 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3577 cpl = env->hflags & HF_CPL_MASK;
3578 if (e2 & DESC_CS_MASK) {
3579 goto fail;
3580 } else {
3581 if (dpl < cpl || dpl < rpl)
3582 goto fail;
3583 if (!(e2 & DESC_W_MASK)) {
3584 fail:
3585 CC_SRC = eflags & ~CC_Z;
3586 return;
3587 }
3588 }
3589 CC_SRC = eflags | CC_Z;
3590}
3591
3592/* x87 FPU helpers */
3593
c31da136 3594static inline double floatx80_to_double(floatx80 a)
47c0143c
AJ
3595{
3596 union {
3597 float64 f64;
3598 double d;
3599 } u;
3600
c31da136 3601 u.f64 = floatx80_to_float64(a, &env->fp_status);
47c0143c
AJ
3602 return u.d;
3603}
3604
c31da136 3605static inline floatx80 double_to_floatx80(double a)
47c0143c
AJ
3606{
3607 union {
3608 float64 f64;
3609 double d;
3610 } u;
3611
3612 u.d = a;
c31da136 3613 return float64_to_floatx80(u.f64, &env->fp_status);
47c0143c
AJ
3614}
3615
eaa728ee
FB
3616static void fpu_set_exception(int mask)
3617{
3618 env->fpus |= mask;
3619 if (env->fpus & (~env->fpuc & FPUC_EM))
3620 env->fpus |= FPUS_SE | FPUS_B;
3621}
3622
c31da136 3623static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
eaa728ee 3624{
c31da136 3625 if (floatx80_is_zero(b)) {
eaa728ee 3626 fpu_set_exception(FPUS_ZE);
13822781 3627 }
c31da136 3628 return floatx80_div(a, b, &env->fp_status);
eaa728ee
FB
3629}
3630
d9957a8b 3631static void fpu_raise_exception(void)
eaa728ee
FB
3632{
3633 if (env->cr[0] & CR0_NE_MASK) {
3634 raise_exception(EXCP10_COPR);
3635 }
3636#if !defined(CONFIG_USER_ONLY)
3637 else {
3638 cpu_set_ferr(env);
3639 }
3640#endif
3641}
3642
3643void helper_flds_FT0(uint32_t val)
3644{
3645 union {
3646 float32 f;
3647 uint32_t i;
3648 } u;
3649 u.i = val;
c31da136 3650 FT0 = float32_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3651}
3652
3653void helper_fldl_FT0(uint64_t val)
3654{
3655 union {
3656 float64 f;
3657 uint64_t i;
3658 } u;
3659 u.i = val;
c31da136 3660 FT0 = float64_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3661}
3662
3663void helper_fildl_FT0(int32_t val)
3664{
c31da136 3665 FT0 = int32_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3666}
3667
3668void helper_flds_ST0(uint32_t val)
3669{
3670 int new_fpstt;
3671 union {
3672 float32 f;
3673 uint32_t i;
3674 } u;
3675 new_fpstt = (env->fpstt - 1) & 7;
3676 u.i = val;
c31da136 3677 env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3678 env->fpstt = new_fpstt;
3679 env->fptags[new_fpstt] = 0; /* validate stack entry */
3680}
3681
3682void helper_fldl_ST0(uint64_t val)
3683{
3684 int new_fpstt;
3685 union {
3686 float64 f;
3687 uint64_t i;
3688 } u;
3689 new_fpstt = (env->fpstt - 1) & 7;
3690 u.i = val;
c31da136 3691 env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3692 env->fpstt = new_fpstt;
3693 env->fptags[new_fpstt] = 0; /* validate stack entry */
3694}
3695
3696void helper_fildl_ST0(int32_t val)
3697{
3698 int new_fpstt;
3699 new_fpstt = (env->fpstt - 1) & 7;
c31da136 3700 env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3701 env->fpstt = new_fpstt;
3702 env->fptags[new_fpstt] = 0; /* validate stack entry */
3703}
3704
3705void helper_fildll_ST0(int64_t val)
3706{
3707 int new_fpstt;
3708 new_fpstt = (env->fpstt - 1) & 7;
c31da136 3709 env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3710 env->fpstt = new_fpstt;
3711 env->fptags[new_fpstt] = 0; /* validate stack entry */
3712}
3713
3714uint32_t helper_fsts_ST0(void)
3715{
3716 union {
3717 float32 f;
3718 uint32_t i;
3719 } u;
c31da136 3720 u.f = floatx80_to_float32(ST0, &env->fp_status);
eaa728ee
FB
3721 return u.i;
3722}
3723
3724uint64_t helper_fstl_ST0(void)
3725{
3726 union {
3727 float64 f;
3728 uint64_t i;
3729 } u;
c31da136 3730 u.f = floatx80_to_float64(ST0, &env->fp_status);
eaa728ee
FB
3731 return u.i;
3732}
3733
3734int32_t helper_fist_ST0(void)
3735{
3736 int32_t val;
c31da136 3737 val = floatx80_to_int32(ST0, &env->fp_status);
eaa728ee
FB
3738 if (val != (int16_t)val)
3739 val = -32768;
3740 return val;
3741}
3742
3743int32_t helper_fistl_ST0(void)
3744{
3745 int32_t val;
c31da136 3746 val = floatx80_to_int32(ST0, &env->fp_status);
eaa728ee
FB
3747 return val;
3748}
3749
3750int64_t helper_fistll_ST0(void)
3751{
3752 int64_t val;
c31da136 3753 val = floatx80_to_int64(ST0, &env->fp_status);
eaa728ee
FB
3754 return val;
3755}
3756
3757int32_t helper_fistt_ST0(void)
3758{
3759 int32_t val;
c31da136 3760 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3761 if (val != (int16_t)val)
3762 val = -32768;
3763 return val;
3764}
3765
3766int32_t helper_fisttl_ST0(void)
3767{
3768 int32_t val;
c31da136 3769 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3770 return val;
3771}
3772
3773int64_t helper_fisttll_ST0(void)
3774{
3775 int64_t val;
c31da136 3776 val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3777 return val;
3778}
3779
3780void helper_fldt_ST0(target_ulong ptr)
3781{
3782 int new_fpstt;
3783 new_fpstt = (env->fpstt - 1) & 7;
3784 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3785 env->fpstt = new_fpstt;
3786 env->fptags[new_fpstt] = 0; /* validate stack entry */
3787}
3788
3789void helper_fstt_ST0(target_ulong ptr)
3790{
3791 helper_fstt(ST0, ptr);
3792}
3793
3794void helper_fpush(void)
3795{
3796 fpush();
3797}
3798
3799void helper_fpop(void)
3800{
3801 fpop();
3802}
3803
3804void helper_fdecstp(void)
3805{
3806 env->fpstt = (env->fpstt - 1) & 7;
3807 env->fpus &= (~0x4700);
3808}
3809
3810void helper_fincstp(void)
3811{
3812 env->fpstt = (env->fpstt + 1) & 7;
3813 env->fpus &= (~0x4700);
3814}
3815
3816/* FPU move */
3817
3818void helper_ffree_STN(int st_index)
3819{
3820 env->fptags[(env->fpstt + st_index) & 7] = 1;
3821}
3822
3823void helper_fmov_ST0_FT0(void)
3824{
3825 ST0 = FT0;
3826}
3827
3828void helper_fmov_FT0_STN(int st_index)
3829{
3830 FT0 = ST(st_index);
3831}
3832
3833void helper_fmov_ST0_STN(int st_index)
3834{
3835 ST0 = ST(st_index);
3836}
3837
3838void helper_fmov_STN_ST0(int st_index)
3839{
3840 ST(st_index) = ST0;
3841}
3842
3843void helper_fxchg_ST0_STN(int st_index)
3844{
c31da136 3845 floatx80 tmp;
eaa728ee
FB
3846 tmp = ST(st_index);
3847 ST(st_index) = ST0;
3848 ST0 = tmp;
3849}
3850
3851/* FPU operations */
3852
3853static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3854
3855void helper_fcom_ST0_FT0(void)
3856{
3857 int ret;
3858
c31da136 3859 ret = floatx80_compare(ST0, FT0, &env->fp_status);
eaa728ee 3860 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
eaa728ee
FB
3861}
3862
3863void helper_fucom_ST0_FT0(void)
3864{
3865 int ret;
3866
c31da136 3867 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
eaa728ee 3868 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
eaa728ee
FB
3869}
3870
3871static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3872
3873void helper_fcomi_ST0_FT0(void)
3874{
3875 int eflags;
3876 int ret;
3877
c31da136 3878 ret = floatx80_compare(ST0, FT0, &env->fp_status);
a7812ae4 3879 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3880 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3881 CC_SRC = eflags;
eaa728ee
FB
3882}
3883
3884void helper_fucomi_ST0_FT0(void)
3885{
3886 int eflags;
3887 int ret;
3888
c31da136 3889 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
a7812ae4 3890 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3891 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3892 CC_SRC = eflags;
eaa728ee
FB
3893}
3894
3895void helper_fadd_ST0_FT0(void)
3896{
c31da136 3897 ST0 = floatx80_add(ST0, FT0, &env->fp_status);
eaa728ee
FB
3898}
3899
3900void helper_fmul_ST0_FT0(void)
3901{
c31da136 3902 ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
eaa728ee
FB
3903}
3904
3905void helper_fsub_ST0_FT0(void)
3906{
c31da136 3907 ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
eaa728ee
FB
3908}
3909
3910void helper_fsubr_ST0_FT0(void)
3911{
c31da136 3912 ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
eaa728ee
FB
3913}
3914
3915void helper_fdiv_ST0_FT0(void)
3916{
3917 ST0 = helper_fdiv(ST0, FT0);
3918}
3919
3920void helper_fdivr_ST0_FT0(void)
3921{
3922 ST0 = helper_fdiv(FT0, ST0);
3923}
3924
3925/* fp operations between STN and ST0 */
3926
3927void helper_fadd_STN_ST0(int st_index)
3928{
c31da136 3929 ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3930}
3931
3932void helper_fmul_STN_ST0(int st_index)
3933{
c31da136 3934 ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3935}
3936
3937void helper_fsub_STN_ST0(int st_index)
3938{
c31da136 3939 ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3940}
3941
3942void helper_fsubr_STN_ST0(int st_index)
3943{
c31da136 3944 ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
eaa728ee
FB
3945}
3946
3947void helper_fdiv_STN_ST0(int st_index)
3948{
c31da136 3949 floatx80 *p;
eaa728ee
FB
3950 p = &ST(st_index);
3951 *p = helper_fdiv(*p, ST0);
3952}
3953
3954void helper_fdivr_STN_ST0(int st_index)
3955{
c31da136 3956 floatx80 *p;
eaa728ee
FB
3957 p = &ST(st_index);
3958 *p = helper_fdiv(ST0, *p);
3959}
3960
3961/* misc FPU operations */
3962void helper_fchs_ST0(void)
3963{
c31da136 3964 ST0 = floatx80_chs(ST0);
eaa728ee
FB
3965}
3966
3967void helper_fabs_ST0(void)
3968{
c31da136 3969 ST0 = floatx80_abs(ST0);
eaa728ee
FB
3970}
3971
3972void helper_fld1_ST0(void)
3973{
66fcf8ff 3974 ST0 = floatx80_one;
eaa728ee
FB
3975}
3976
3977void helper_fldl2t_ST0(void)
3978{
66fcf8ff 3979 ST0 = floatx80_l2t;
eaa728ee
FB
3980}
3981
3982void helper_fldl2e_ST0(void)
3983{
66fcf8ff 3984 ST0 = floatx80_l2e;
eaa728ee
FB
3985}
3986
3987void helper_fldpi_ST0(void)
3988{
66fcf8ff 3989 ST0 = floatx80_pi;
eaa728ee
FB
3990}
3991
3992void helper_fldlg2_ST0(void)
3993{
66fcf8ff 3994 ST0 = floatx80_lg2;
eaa728ee
FB
3995}
3996
3997void helper_fldln2_ST0(void)
3998{
66fcf8ff 3999 ST0 = floatx80_ln2;
eaa728ee
FB
4000}
4001
4002void helper_fldz_ST0(void)
4003{
66fcf8ff 4004 ST0 = floatx80_zero;
eaa728ee
FB
4005}
4006
4007void helper_fldz_FT0(void)
4008{
66fcf8ff 4009 FT0 = floatx80_zero;
eaa728ee
FB
4010}
4011
4012uint32_t helper_fnstsw(void)
4013{
4014 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4015}
4016
4017uint32_t helper_fnstcw(void)
4018{
4019 return env->fpuc;
4020}
4021
4022static void update_fp_status(void)
4023{
4024 int rnd_type;
4025
4026 /* set rounding mode */
2355c16e 4027 switch(env->fpuc & FPU_RC_MASK) {
eaa728ee 4028 default:
2355c16e 4029 case FPU_RC_NEAR:
eaa728ee
FB
4030 rnd_type = float_round_nearest_even;
4031 break;
2355c16e 4032 case FPU_RC_DOWN:
eaa728ee
FB
4033 rnd_type = float_round_down;
4034 break;
2355c16e 4035 case FPU_RC_UP:
eaa728ee
FB
4036 rnd_type = float_round_up;
4037 break;
2355c16e 4038 case FPU_RC_CHOP:
eaa728ee
FB
4039 rnd_type = float_round_to_zero;
4040 break;
4041 }
4042 set_float_rounding_mode(rnd_type, &env->fp_status);
eaa728ee
FB
4043 switch((env->fpuc >> 8) & 3) {
4044 case 0:
4045 rnd_type = 32;
4046 break;
4047 case 2:
4048 rnd_type = 64;
4049 break;
4050 case 3:
4051 default:
4052 rnd_type = 80;
4053 break;
4054 }
4055 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
eaa728ee
FB
4056}
4057
4058void helper_fldcw(uint32_t val)
4059{
4060 env->fpuc = val;
4061 update_fp_status();
4062}
4063
4064void helper_fclex(void)
4065{
4066 env->fpus &= 0x7f00;
4067}
4068
4069void helper_fwait(void)
4070{
4071 if (env->fpus & FPUS_SE)
4072 fpu_raise_exception();
eaa728ee
FB
4073}
4074
4075void helper_fninit(void)
4076{
4077 env->fpus = 0;
4078 env->fpstt = 0;
4079 env->fpuc = 0x37f;
4080 env->fptags[0] = 1;
4081 env->fptags[1] = 1;
4082 env->fptags[2] = 1;
4083 env->fptags[3] = 1;
4084 env->fptags[4] = 1;
4085 env->fptags[5] = 1;
4086 env->fptags[6] = 1;
4087 env->fptags[7] = 1;
4088}
4089
4090/* BCD ops */
4091
4092void helper_fbld_ST0(target_ulong ptr)
4093{
c31da136 4094 floatx80 tmp;
eaa728ee
FB
4095 uint64_t val;
4096 unsigned int v;
4097 int i;
4098
4099 val = 0;
4100 for(i = 8; i >= 0; i--) {
4101 v = ldub(ptr + i);
4102 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4103 }
c31da136 4104 tmp = int64_to_floatx80(val, &env->fp_status);
788e7336 4105 if (ldub(ptr + 9) & 0x80) {
c31da136 4106 floatx80_chs(tmp);
788e7336 4107 }
eaa728ee
FB
4108 fpush();
4109 ST0 = tmp;
4110}
4111
4112void helper_fbst_ST0(target_ulong ptr)
4113{
4114 int v;
4115 target_ulong mem_ref, mem_end;
4116 int64_t val;
4117
c31da136 4118 val = floatx80_to_int64(ST0, &env->fp_status);
eaa728ee
FB
4119 mem_ref = ptr;
4120 mem_end = mem_ref + 9;
4121 if (val < 0) {
4122 stb(mem_end, 0x80);
4123 val = -val;
4124 } else {
4125 stb(mem_end, 0x00);
4126 }
4127 while (mem_ref < mem_end) {
4128 if (val == 0)
4129 break;
4130 v = val % 100;
4131 val = val / 100;
4132 v = ((v / 10) << 4) | (v % 10);
4133 stb(mem_ref++, v);
4134 }
4135 while (mem_ref < mem_end) {
4136 stb(mem_ref++, 0);
4137 }
4138}
4139
4140void helper_f2xm1(void)
4141{
c31da136 4142 double val = floatx80_to_double(ST0);
a2c9ed3c 4143 val = pow(2.0, val) - 1.0;
c31da136 4144 ST0 = double_to_floatx80(val);
eaa728ee
FB
4145}
4146
4147void helper_fyl2x(void)
4148{
c31da136 4149 double fptemp = floatx80_to_double(ST0);
eaa728ee 4150
eaa728ee 4151 if (fptemp>0.0){
a2c9ed3c 4152 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
c31da136
AJ
4153 fptemp *= floatx80_to_double(ST1);
4154 ST1 = double_to_floatx80(fptemp);
eaa728ee
FB
4155 fpop();
4156 } else {
4157 env->fpus &= (~0x4700);
4158 env->fpus |= 0x400;
4159 }
4160}
4161
4162void helper_fptan(void)
4163{
c31da136 4164 double fptemp = floatx80_to_double(ST0);
eaa728ee 4165
eaa728ee
FB
4166 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4167 env->fpus |= 0x400;
4168 } else {
a2c9ed3c 4169 fptemp = tan(fptemp);
c31da136 4170 ST0 = double_to_floatx80(fptemp);
eaa728ee 4171 fpush();
c31da136 4172 ST0 = floatx80_one;
eaa728ee
FB
4173 env->fpus &= (~0x400); /* C2 <-- 0 */
4174 /* the above code is for |arg| < 2**52 only */
4175 }
4176}
4177
4178void helper_fpatan(void)
4179{
a2c9ed3c 4180 double fptemp, fpsrcop;
eaa728ee 4181
c31da136
AJ
4182 fpsrcop = floatx80_to_double(ST1);
4183 fptemp = floatx80_to_double(ST0);
4184 ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
eaa728ee
FB
4185 fpop();
4186}
4187
4188void helper_fxtract(void)
4189{
c31da136 4190 CPU_LDoubleU temp;
eaa728ee
FB
4191
4192 temp.d = ST0;
c9ad19c5 4193
c31da136 4194 if (floatx80_is_zero(ST0)) {
c9ad19c5 4195 /* Easy way to generate -inf and raising division by 0 exception */
c31da136 4196 ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
c9ad19c5
AJ
4197 fpush();
4198 ST0 = temp.d;
4199 } else {
4200 int expdif;
4201
4202 expdif = EXPD(temp) - EXPBIAS;
4203 /*DP exponent bias*/
c31da136 4204 ST0 = int32_to_floatx80(expdif, &env->fp_status);
c9ad19c5
AJ
4205 fpush();
4206 BIASEXPONENT(temp);
4207 ST0 = temp.d;
4208 }
eaa728ee
FB
4209}
4210
4211void helper_fprem1(void)
4212{
bcb5fec5 4213 double st0, st1, dblq, fpsrcop, fptemp;
c31da136 4214 CPU_LDoubleU fpsrcop1, fptemp1;
eaa728ee
FB
4215 int expdif;
4216 signed long long int q;
4217
c31da136
AJ
4218 st0 = floatx80_to_double(ST0);
4219 st1 = floatx80_to_double(ST1);
bcb5fec5
AJ
4220
4221 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
c31da136 4222 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
eaa728ee
FB
4223 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4224 return;
4225 }
4226
bcb5fec5
AJ
4227 fpsrcop = st0;
4228 fptemp = st1;
4229 fpsrcop1.d = ST0;
4230 fptemp1.d = ST1;
eaa728ee
FB
4231 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4232
4233 if (expdif < 0) {
4234 /* optimisation? taken from the AMD docs */
4235 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4236 /* ST0 is unchanged */
4237 return;
4238 }
4239
4240 if (expdif < 53) {
4241 dblq = fpsrcop / fptemp;
4242 /* round dblq towards nearest integer */
4243 dblq = rint(dblq);
bcb5fec5 4244 st0 = fpsrcop - fptemp * dblq;
eaa728ee
FB
4245
4246 /* convert dblq to q by truncating towards zero */
4247 if (dblq < 0.0)
4248 q = (signed long long int)(-dblq);
4249 else
4250 q = (signed long long int)dblq;
4251
4252 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4253 /* (C0,C3,C1) <-- (q2,q1,q0) */
4254 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4255 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4256 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4257 } else {
4258 env->fpus |= 0x400; /* C2 <-- 1 */
4259 fptemp = pow(2.0, expdif - 50);
bcb5fec5 4260 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4261 /* fpsrcop = integer obtained by chopping */
4262 fpsrcop = (fpsrcop < 0.0) ?
4263 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4264 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4265 }
c31da136 4266 ST0 = double_to_floatx80(st0);
eaa728ee
FB
4267}
4268
4269void helper_fprem(void)
4270{
bcb5fec5 4271 double st0, st1, dblq, fpsrcop, fptemp;
c31da136 4272 CPU_LDoubleU fpsrcop1, fptemp1;
eaa728ee
FB
4273 int expdif;
4274 signed long long int q;
4275
c31da136
AJ
4276 st0 = floatx80_to_double(ST0);
4277 st1 = floatx80_to_double(ST1);
bcb5fec5
AJ
4278
4279 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
c31da136 4280 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
eaa728ee
FB
4281 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4282 return;
4283 }
4284
bcb5fec5
AJ
4285 fpsrcop = st0;
4286 fptemp = st1;
4287 fpsrcop1.d = ST0;
4288 fptemp1.d = ST1;
eaa728ee
FB
4289 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4290
4291 if (expdif < 0) {
4292 /* optimisation? taken from the AMD docs */
4293 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4294 /* ST0 is unchanged */
4295 return;
4296 }
4297
4298 if ( expdif < 53 ) {
4299 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4300 /* round dblq towards zero */
4301 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
bcb5fec5 4302 st0 = fpsrcop/*ST0*/ - fptemp * dblq;
eaa728ee
FB
4303
4304 /* convert dblq to q by truncating towards zero */
4305 if (dblq < 0.0)
4306 q = (signed long long int)(-dblq);
4307 else
4308 q = (signed long long int)dblq;
4309
4310 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4311 /* (C0,C3,C1) <-- (q2,q1,q0) */
4312 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4313 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4314 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4315 } else {
4316 int N = 32 + (expdif % 32); /* as per AMD docs */
4317 env->fpus |= 0x400; /* C2 <-- 1 */
4318 fptemp = pow(2.0, (double)(expdif - N));
bcb5fec5 4319 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4320 /* fpsrcop = integer obtained by chopping */
4321 fpsrcop = (fpsrcop < 0.0) ?
4322 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4323 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4324 }
c31da136 4325 ST0 = double_to_floatx80(st0);
eaa728ee
FB
4326}
4327
4328void helper_fyl2xp1(void)
4329{
c31da136 4330 double fptemp = floatx80_to_double(ST0);
eaa728ee 4331
eaa728ee
FB
4332 if ((fptemp+1.0)>0.0) {
4333 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
c31da136
AJ
4334 fptemp *= floatx80_to_double(ST1);
4335 ST1 = double_to_floatx80(fptemp);
eaa728ee
FB
4336 fpop();
4337 } else {
4338 env->fpus &= (~0x4700);
4339 env->fpus |= 0x400;
4340 }
4341}
4342
4343void helper_fsqrt(void)
4344{
c31da136 4345 if (floatx80_is_neg(ST0)) {
eaa728ee
FB
4346 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4347 env->fpus |= 0x400;
4348 }
c31da136 4349 ST0 = floatx80_sqrt(ST0, &env->fp_status);
eaa728ee
FB
4350}
4351
4352void helper_fsincos(void)
4353{
c31da136 4354 double fptemp = floatx80_to_double(ST0);
eaa728ee 4355
eaa728ee
FB
4356 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4357 env->fpus |= 0x400;
4358 } else {
c31da136 4359 ST0 = double_to_floatx80(sin(fptemp));
eaa728ee 4360 fpush();
c31da136 4361 ST0 = double_to_floatx80(cos(fptemp));
eaa728ee
FB
4362 env->fpus &= (~0x400); /* C2 <-- 0 */
4363 /* the above code is for |arg| < 2**63 only */
4364 }
4365}
4366
4367void helper_frndint(void)
4368{
c31da136 4369 ST0 = floatx80_round_to_int(ST0, &env->fp_status);
eaa728ee
FB
4370}
4371
4372void helper_fscale(void)
4373{
c31da136 4374 if (floatx80_is_any_nan(ST1)) {
be1c17c7
AJ
4375 ST0 = ST1;
4376 } else {
c31da136
AJ
4377 int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4378 ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
be1c17c7 4379 }
eaa728ee
FB
4380}
4381
4382void helper_fsin(void)
4383{
c31da136 4384 double fptemp = floatx80_to_double(ST0);
eaa728ee 4385
eaa728ee
FB
4386 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4387 env->fpus |= 0x400;
4388 } else {
c31da136 4389 ST0 = double_to_floatx80(sin(fptemp));
eaa728ee
FB
4390 env->fpus &= (~0x400); /* C2 <-- 0 */
4391 /* the above code is for |arg| < 2**53 only */
4392 }
4393}
4394
4395void helper_fcos(void)
4396{
c31da136 4397 double fptemp = floatx80_to_double(ST0);
eaa728ee 4398
eaa728ee
FB
4399 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4400 env->fpus |= 0x400;
4401 } else {
c31da136 4402 ST0 = double_to_floatx80(cos(fptemp));
eaa728ee
FB
4403 env->fpus &= (~0x400); /* C2 <-- 0 */
4404 /* the above code is for |arg5 < 2**63 only */
4405 }
4406}
4407
4408void helper_fxam_ST0(void)
4409{
c31da136 4410 CPU_LDoubleU temp;
eaa728ee
FB
4411 int expdif;
4412
4413 temp.d = ST0;
4414
4415 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4416 if (SIGND(temp))
4417 env->fpus |= 0x200; /* C1 <-- 1 */
4418
4419 /* XXX: test fptags too */
4420 expdif = EXPD(temp);
4421 if (expdif == MAXEXPD) {
eaa728ee 4422 if (MANTD(temp) == 0x8000000000000000ULL)
eaa728ee
FB
4423 env->fpus |= 0x500 /*Infinity*/;
4424 else
4425 env->fpus |= 0x100 /*NaN*/;
4426 } else if (expdif == 0) {
4427 if (MANTD(temp) == 0)
4428 env->fpus |= 0x4000 /*Zero*/;
4429 else
4430 env->fpus |= 0x4400 /*Denormal*/;
4431 } else {
4432 env->fpus |= 0x400;
4433 }
4434}
4435
4436void helper_fstenv(target_ulong ptr, int data32)
4437{
4438 int fpus, fptag, exp, i;
4439 uint64_t mant;
c31da136 4440 CPU_LDoubleU tmp;
eaa728ee
FB
4441
4442 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4443 fptag = 0;
4444 for (i=7; i>=0; i--) {
4445 fptag <<= 2;
4446 if (env->fptags[i]) {
4447 fptag |= 3;
4448 } else {
4449 tmp.d = env->fpregs[i].d;
4450 exp = EXPD(tmp);
4451 mant = MANTD(tmp);
4452 if (exp == 0 && mant == 0) {
4453 /* zero */
4454 fptag |= 1;
4455 } else if (exp == 0 || exp == MAXEXPD
eaa728ee 4456 || (mant & (1LL << 63)) == 0
eaa728ee
FB
4457 ) {
4458 /* NaNs, infinity, denormal */
4459 fptag |= 2;
4460 }
4461 }
4462 }
4463 if (data32) {
4464 /* 32 bit */
4465 stl(ptr, env->fpuc);
4466 stl(ptr + 4, fpus);
4467 stl(ptr + 8, fptag);
4468 stl(ptr + 12, 0); /* fpip */
4469 stl(ptr + 16, 0); /* fpcs */
4470 stl(ptr + 20, 0); /* fpoo */
4471 stl(ptr + 24, 0); /* fpos */
4472 } else {
4473 /* 16 bit */
4474 stw(ptr, env->fpuc);
4475 stw(ptr + 2, fpus);
4476 stw(ptr + 4, fptag);
4477 stw(ptr + 6, 0);
4478 stw(ptr + 8, 0);
4479 stw(ptr + 10, 0);
4480 stw(ptr + 12, 0);
4481 }
4482}
4483
4484void helper_fldenv(target_ulong ptr, int data32)
4485{
4486 int i, fpus, fptag;
4487
4488 if (data32) {
4489 env->fpuc = lduw(ptr);
4490 fpus = lduw(ptr + 4);
4491 fptag = lduw(ptr + 8);
4492 }
4493 else {
4494 env->fpuc = lduw(ptr);
4495 fpus = lduw(ptr + 2);
4496 fptag = lduw(ptr + 4);
4497 }
4498 env->fpstt = (fpus >> 11) & 7;
4499 env->fpus = fpus & ~0x3800;
4500 for(i = 0;i < 8; i++) {
4501 env->fptags[i] = ((fptag & 3) == 3);
4502 fptag >>= 2;
4503 }
4504}
4505
4506void helper_fsave(target_ulong ptr, int data32)
4507{
c31da136 4508 floatx80 tmp;
eaa728ee
FB
4509 int i;
4510
4511 helper_fstenv(ptr, data32);
4512
4513 ptr += (14 << data32);
4514 for(i = 0;i < 8; i++) {
4515 tmp = ST(i);
4516 helper_fstt(tmp, ptr);
4517 ptr += 10;
4518 }
4519
4520 /* fninit */
4521 env->fpus = 0;
4522 env->fpstt = 0;
4523 env->fpuc = 0x37f;
4524 env->fptags[0] = 1;
4525 env->fptags[1] = 1;
4526 env->fptags[2] = 1;
4527 env->fptags[3] = 1;
4528 env->fptags[4] = 1;
4529 env->fptags[5] = 1;
4530 env->fptags[6] = 1;
4531 env->fptags[7] = 1;
4532}
4533
4534void helper_frstor(target_ulong ptr, int data32)
4535{
c31da136 4536 floatx80 tmp;
eaa728ee
FB
4537 int i;
4538
4539 helper_fldenv(ptr, data32);
4540 ptr += (14 << data32);
4541
4542 for(i = 0;i < 8; i++) {
4543 tmp = helper_fldt(ptr);
4544 ST(i) = tmp;
4545 ptr += 10;
4546 }
4547}
4548
3e457172
BS
4549
4550#if defined(CONFIG_USER_ONLY)
4551void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
4552{
4553 CPUX86State *saved_env;
4554
4555 saved_env = env;
4556 env = s;
4557 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
4558 selector &= 0xffff;
4559 cpu_x86_load_seg_cache(env, seg_reg, selector,
4560 (selector << 4), 0xffff, 0);
4561 } else {
4562 helper_load_seg(seg_reg, selector);
4563 }
4564 env = saved_env;
4565}
4566
4567void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
4568{
4569 CPUX86State *saved_env;
4570
4571 saved_env = env;
4572 env = s;
4573
4574 helper_fsave(ptr, data32);
4575
4576 env = saved_env;
4577}
4578
4579void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
4580{
4581 CPUX86State *saved_env;
4582
4583 saved_env = env;
4584 env = s;
4585
4586 helper_frstor(ptr, data32);
4587
4588 env = saved_env;
4589}
4590#endif
4591
eaa728ee
FB
4592void helper_fxsave(target_ulong ptr, int data64)
4593{
4594 int fpus, fptag, i, nb_xmm_regs;
c31da136 4595 floatx80 tmp;
eaa728ee
FB
4596 target_ulong addr;
4597
09d85fb8
KW
4598 /* The operand must be 16 byte aligned */
4599 if (ptr & 0xf) {
4600 raise_exception(EXCP0D_GPF);
4601 }
4602
eaa728ee
FB
4603 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4604 fptag = 0;
4605 for(i = 0; i < 8; i++) {
4606 fptag |= (env->fptags[i] << i);
4607 }
4608 stw(ptr, env->fpuc);
4609 stw(ptr + 2, fpus);
4610 stw(ptr + 4, fptag ^ 0xff);
4611#ifdef TARGET_X86_64
4612 if (data64) {
4613 stq(ptr + 0x08, 0); /* rip */
4614 stq(ptr + 0x10, 0); /* rdp */
4615 } else
4616#endif
4617 {
4618 stl(ptr + 0x08, 0); /* eip */
4619 stl(ptr + 0x0c, 0); /* sel */
4620 stl(ptr + 0x10, 0); /* dp */
4621 stl(ptr + 0x14, 0); /* sel */
4622 }
4623
4624 addr = ptr + 0x20;
4625 for(i = 0;i < 8; i++) {
4626 tmp = ST(i);
4627 helper_fstt(tmp, addr);
4628 addr += 16;
4629 }
4630
4631 if (env->cr[4] & CR4_OSFXSR_MASK) {
4632 /* XXX: finish it */
4633 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4634 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4635 if (env->hflags & HF_CS64_MASK)
4636 nb_xmm_regs = 16;
4637 else
4638 nb_xmm_regs = 8;
4639 addr = ptr + 0xa0;
eef26553
AL
4640 /* Fast FXSAVE leaves out the XMM registers */
4641 if (!(env->efer & MSR_EFER_FFXSR)
4642 || (env->hflags & HF_CPL_MASK)
4643 || !(env->hflags & HF_LMA_MASK)) {
4644 for(i = 0; i < nb_xmm_regs; i++) {
4645 stq(addr, env->xmm_regs[i].XMM_Q(0));
4646 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4647 addr += 16;
4648 }
eaa728ee
FB
4649 }
4650 }
4651}
4652
4653void helper_fxrstor(target_ulong ptr, int data64)
4654{
4655 int i, fpus, fptag, nb_xmm_regs;
c31da136 4656 floatx80 tmp;
eaa728ee
FB
4657 target_ulong addr;
4658
09d85fb8
KW
4659 /* The operand must be 16 byte aligned */
4660 if (ptr & 0xf) {
4661 raise_exception(EXCP0D_GPF);
4662 }
4663
eaa728ee
FB
4664 env->fpuc = lduw(ptr);
4665 fpus = lduw(ptr + 2);
4666 fptag = lduw(ptr + 4);
4667 env->fpstt = (fpus >> 11) & 7;
4668 env->fpus = fpus & ~0x3800;
4669 fptag ^= 0xff;
4670 for(i = 0;i < 8; i++) {
4671 env->fptags[i] = ((fptag >> i) & 1);
4672 }
4673
4674 addr = ptr + 0x20;
4675 for(i = 0;i < 8; i++) {
4676 tmp = helper_fldt(addr);
4677 ST(i) = tmp;
4678 addr += 16;
4679 }
4680
4681 if (env->cr[4] & CR4_OSFXSR_MASK) {
4682 /* XXX: finish it */
4683 env->mxcsr = ldl(ptr + 0x18);
4684 //ldl(ptr + 0x1c);
4685 if (env->hflags & HF_CS64_MASK)
4686 nb_xmm_regs = 16;
4687 else
4688 nb_xmm_regs = 8;
4689 addr = ptr + 0xa0;
eef26553
AL
4690 /* Fast FXRESTORE leaves out the XMM registers */
4691 if (!(env->efer & MSR_EFER_FFXSR)
4692 || (env->hflags & HF_CPL_MASK)
4693 || !(env->hflags & HF_LMA_MASK)) {
4694 for(i = 0; i < nb_xmm_regs; i++) {
4695 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4696 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4697 addr += 16;
4698 }
eaa728ee
FB
4699 }
4700 }
4701}
4702
c31da136 4703void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
eaa728ee 4704{
c31da136 4705 CPU_LDoubleU temp;
eaa728ee
FB
4706
4707 temp.d = f;
4708 *pmant = temp.l.lower;
4709 *pexp = temp.l.upper;
4710}
4711
c31da136 4712floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
eaa728ee 4713{
c31da136 4714 CPU_LDoubleU temp;
eaa728ee
FB
4715
4716 temp.l.upper = upper;
4717 temp.l.lower = mant;
4718 return temp.d;
4719}
eaa728ee
FB
4720
4721#ifdef TARGET_X86_64
4722
4723//#define DEBUG_MULDIV
4724
4725static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4726{
4727 *plow += a;
4728 /* carry test */
4729 if (*plow < a)
4730 (*phigh)++;
4731 *phigh += b;
4732}
4733
4734static void neg128(uint64_t *plow, uint64_t *phigh)
4735{
4736 *plow = ~ *plow;
4737 *phigh = ~ *phigh;
4738 add128(plow, phigh, 1, 0);
4739}
4740
4741/* return TRUE if overflow */
4742static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4743{
4744 uint64_t q, r, a1, a0;
4745 int i, qb, ab;
4746
4747 a0 = *plow;
4748 a1 = *phigh;
4749 if (a1 == 0) {
4750 q = a0 / b;
4751 r = a0 % b;
4752 *plow = q;
4753 *phigh = r;
4754 } else {
4755 if (a1 >= b)
4756 return 1;
4757 /* XXX: use a better algorithm */
4758 for(i = 0; i < 64; i++) {
4759 ab = a1 >> 63;
4760 a1 = (a1 << 1) | (a0 >> 63);
4761 if (ab || a1 >= b) {
4762 a1 -= b;
4763 qb = 1;
4764 } else {
4765 qb = 0;
4766 }
4767 a0 = (a0 << 1) | qb;
4768 }
4769#if defined(DEBUG_MULDIV)
4770 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4771 *phigh, *plow, b, a0, a1);
4772#endif
4773 *plow = a0;
4774 *phigh = a1;
4775 }
4776 return 0;
4777}
4778
4779/* return TRUE if overflow */
4780static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4781{
4782 int sa, sb;
4783 sa = ((int64_t)*phigh < 0);
4784 if (sa)
4785 neg128(plow, phigh);
4786 sb = (b < 0);
4787 if (sb)
4788 b = -b;
4789 if (div64(plow, phigh, b) != 0)
4790 return 1;
4791 if (sa ^ sb) {
4792 if (*plow > (1ULL << 63))
4793 return 1;
4794 *plow = - *plow;
4795 } else {
4796 if (*plow >= (1ULL << 63))
4797 return 1;
4798 }
4799 if (sa)
4800 *phigh = - *phigh;
4801 return 0;
4802}
4803
4804void helper_mulq_EAX_T0(target_ulong t0)
4805{
4806 uint64_t r0, r1;
4807
4808 mulu64(&r0, &r1, EAX, t0);
4809 EAX = r0;
4810 EDX = r1;
4811 CC_DST = r0;
4812 CC_SRC = r1;
4813}
4814
4815void helper_imulq_EAX_T0(target_ulong t0)
4816{
4817 uint64_t r0, r1;
4818
4819 muls64(&r0, &r1, EAX, t0);
4820 EAX = r0;
4821 EDX = r1;
4822 CC_DST = r0;
4823 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4824}
4825
4826target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4827{
4828 uint64_t r0, r1;
4829
4830 muls64(&r0, &r1, t0, t1);
4831 CC_DST = r0;
4832 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4833 return r0;
4834}
4835
4836void helper_divq_EAX(target_ulong t0)
4837{
4838 uint64_t r0, r1;
4839 if (t0 == 0) {
4840 raise_exception(EXCP00_DIVZ);
4841 }
4842 r0 = EAX;
4843 r1 = EDX;
4844 if (div64(&r0, &r1, t0))
4845 raise_exception(EXCP00_DIVZ);
4846 EAX = r0;
4847 EDX = r1;
4848}
4849
4850void helper_idivq_EAX(target_ulong t0)
4851{
4852 uint64_t r0, r1;
4853 if (t0 == 0) {
4854 raise_exception(EXCP00_DIVZ);
4855 }
4856 r0 = EAX;
4857 r1 = EDX;
4858 if (idiv64(&r0, &r1, t0))
4859 raise_exception(EXCP00_DIVZ);
4860 EAX = r0;
4861 EDX = r1;
4862}
4863#endif
4864
94451178 4865static void do_hlt(void)
eaa728ee
FB
4866{
4867 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4868 env->halted = 1;
eaa728ee 4869 env->exception_index = EXCP_HLT;
1162c041 4870 cpu_loop_exit(env);
eaa728ee
FB
4871}
4872
94451178
FB
4873void helper_hlt(int next_eip_addend)
4874{
4875 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4876 EIP += next_eip_addend;
4877
4878 do_hlt();
4879}
4880
eaa728ee
FB
4881void helper_monitor(target_ulong ptr)
4882{
4883 if ((uint32_t)ECX != 0)
4884 raise_exception(EXCP0D_GPF);
4885 /* XXX: store address ? */
872929aa 4886 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4887}
4888
94451178 4889void helper_mwait(int next_eip_addend)
eaa728ee
FB
4890{
4891 if ((uint32_t)ECX != 0)
4892 raise_exception(EXCP0D_GPF);
872929aa 4893 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4894 EIP += next_eip_addend;
4895
eaa728ee
FB
4896 /* XXX: not complete but not completely erroneous */
4897 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4898 /* more than one CPU: do not sleep because another CPU may
4899 wake this one */
4900 } else {
94451178 4901 do_hlt();
eaa728ee
FB
4902 }
4903}
4904
4905void helper_debug(void)
4906{
4907 env->exception_index = EXCP_DEBUG;
1162c041 4908 cpu_loop_exit(env);
eaa728ee
FB
4909}
4910
a2397807
JK
4911void helper_reset_rf(void)
4912{
4913 env->eflags &= ~RF_MASK;
4914}
4915
eaa728ee
FB
4916void helper_raise_interrupt(int intno, int next_eip_addend)
4917{
4918 raise_interrupt(intno, 1, 0, next_eip_addend);
4919}
4920
4921void helper_raise_exception(int exception_index)
4922{
4923 raise_exception(exception_index);
4924}
4925
4926void helper_cli(void)
4927{
4928 env->eflags &= ~IF_MASK;
4929}
4930
4931void helper_sti(void)
4932{
4933 env->eflags |= IF_MASK;
4934}
4935
4936#if 0
4937/* vm86plus instructions */
4938void helper_cli_vm(void)
4939{
4940 env->eflags &= ~VIF_MASK;
4941}
4942
4943void helper_sti_vm(void)
4944{
4945 env->eflags |= VIF_MASK;
4946 if (env->eflags & VIP_MASK) {
4947 raise_exception(EXCP0D_GPF);
4948 }
4949}
4950#endif
4951
4952void helper_set_inhibit_irq(void)
4953{
4954 env->hflags |= HF_INHIBIT_IRQ_MASK;
4955}
4956
4957void helper_reset_inhibit_irq(void)
4958{
4959 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4960}
4961
4962void helper_boundw(target_ulong a0, int v)
4963{
4964 int low, high;
4965 low = ldsw(a0);
4966 high = ldsw(a0 + 2);
4967 v = (int16_t)v;
4968 if (v < low || v > high) {
4969 raise_exception(EXCP05_BOUND);
4970 }
eaa728ee
FB
4971}
4972
4973void helper_boundl(target_ulong a0, int v)
4974{
4975 int low, high;
4976 low = ldl(a0);
4977 high = ldl(a0 + 4);
4978 if (v < low || v > high) {
4979 raise_exception(EXCP05_BOUND);
4980 }
eaa728ee
FB
4981}
4982
eaa728ee
FB
4983#if !defined(CONFIG_USER_ONLY)
4984
4985#define MMUSUFFIX _mmu
4986
4987#define SHIFT 0
4988#include "softmmu_template.h"
4989
4990#define SHIFT 1
4991#include "softmmu_template.h"
4992
4993#define SHIFT 2
4994#include "softmmu_template.h"
4995
4996#define SHIFT 3
4997#include "softmmu_template.h"
4998
4999#endif
5000
d9957a8b 5001#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
5002/* try to fill the TLB and return an exception if error. If retaddr is
5003 NULL, it means that the function was called in C code (i.e. not
5004 from generated code or from helper.c) */
5005/* XXX: fix it to restore all registers */
317ac620 5006void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
20503968 5007 uintptr_t retaddr)
eaa728ee
FB
5008{
5009 TranslationBlock *tb;
5010 int ret;
eaa728ee
FB
5011 CPUX86State *saved_env;
5012
eaa728ee 5013 saved_env = env;
bccd9ec5 5014 env = env1;
eaa728ee 5015
97b348e7 5016 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
eaa728ee
FB
5017 if (ret) {
5018 if (retaddr) {
5019 /* now we have a real cpu fault */
20503968 5020 tb = tb_find_pc(retaddr);
eaa728ee
FB
5021 if (tb) {
5022 /* the PC is inside the translated code. It means that we have
5023 a virtual CPU fault */
20503968 5024 cpu_restore_state(tb, env, retaddr);
eaa728ee
FB
5025 }
5026 }
872929aa 5027 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
5028 }
5029 env = saved_env;
5030}
d9957a8b 5031#endif
eaa728ee
FB
5032
5033/* Secure Virtual Machine helpers */
5034
eaa728ee
FB
5035#if defined(CONFIG_USER_ONLY)
5036
db620f46 5037void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
5038{
5039}
5040void helper_vmmcall(void)
5041{
5042}
914178d3 5043void helper_vmload(int aflag)
eaa728ee
FB
5044{
5045}
914178d3 5046void helper_vmsave(int aflag)
eaa728ee
FB
5047{
5048}
872929aa
FB
5049void helper_stgi(void)
5050{
5051}
5052void helper_clgi(void)
5053{
5054}
eaa728ee
FB
5055void helper_skinit(void)
5056{
5057}
914178d3 5058void helper_invlpga(int aflag)
eaa728ee
FB
5059{
5060}
5061void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5062{
5063}
5064void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5065{
5066}
5067
317ac620 5068void svm_check_intercept(CPUX86State *env1, uint32_t type)
e694d4e2
BS
5069{
5070}
5071
eaa728ee
FB
5072void helper_svm_check_io(uint32_t port, uint32_t param,
5073 uint32_t next_eip_addend)
5074{
5075}
5076#else
5077
c227f099 5078static inline void svm_save_seg(target_phys_addr_t addr,
872929aa 5079 const SegmentCache *sc)
eaa728ee 5080{
872929aa
FB
5081 stw_phys(addr + offsetof(struct vmcb_seg, selector),
5082 sc->selector);
5083 stq_phys(addr + offsetof(struct vmcb_seg, base),
5084 sc->base);
5085 stl_phys(addr + offsetof(struct vmcb_seg, limit),
5086 sc->limit);
5087 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 5088 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
5089}
5090
c227f099 5091static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
872929aa
FB
5092{
5093 unsigned int flags;
5094
5095 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
5096 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
5097 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
5098 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
5099 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
5100}
5101
c227f099 5102static inline void svm_load_seg_cache(target_phys_addr_t addr,
317ac620 5103 CPUX86State *env, int seg_reg)
eaa728ee 5104{
872929aa
FB
5105 SegmentCache sc1, *sc = &sc1;
5106 svm_load_seg(addr, sc);
5107 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
5108 sc->base, sc->limit, sc->flags);
eaa728ee
FB
5109}
5110
db620f46 5111void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
5112{
5113 target_ulong addr;
5114 uint32_t event_inj;
5115 uint32_t int_ctl;
5116
872929aa
FB
5117 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
5118
914178d3
FB
5119 if (aflag == 2)
5120 addr = EAX;
5121 else
5122 addr = (uint32_t)EAX;
5123
93fcfe39 5124 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
5125
5126 env->vm_vmcb = addr;
5127
5128 /* save the current CPU state in the hsave page */
5129 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5130 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5131
5132 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5133 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5134
5135 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
5136 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
5137 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
5138 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
5139 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
5140 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
5141
5142 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
5143 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
5144
872929aa
FB
5145 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
5146 &env->segs[R_ES]);
5147 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
5148 &env->segs[R_CS]);
5149 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
5150 &env->segs[R_SS]);
5151 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
5152 &env->segs[R_DS]);
eaa728ee 5153
db620f46
FB
5154 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
5155 EIP + next_eip_addend);
eaa728ee
FB
5156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
5157 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
5158
5159 /* load the interception bitmaps so we do not need to access the
5160 vmcb in svm mode */
872929aa 5161 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
5162 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5163 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5164 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5165 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5166 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5167
872929aa
FB
5168 /* enable intercepts */
5169 env->hflags |= HF_SVMI_MASK;
5170
33c263df
FB
5171 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5172
eaa728ee
FB
5173 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5174 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5175
5176 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5177 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5178
5179 /* clear exit_info_2 so we behave like the real hardware */
5180 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5181
5182 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5183 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5184 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5185 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5186 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 5187 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 5188 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
5189 env->v_tpr = int_ctl & V_TPR_MASK;
5190 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 5191 if (env->eflags & IF_MASK)
db620f46 5192 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
5193 }
5194
5efc27bb
FB
5195 cpu_load_efer(env,
5196 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5197 env->eflags = 0;
5198 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5199 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5200 CC_OP = CC_OP_EFLAGS;
eaa728ee 5201
872929aa
FB
5202 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5203 env, R_ES);
5204 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5205 env, R_CS);
5206 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5207 env, R_SS);
5208 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5209 env, R_DS);
eaa728ee
FB
5210
5211 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5212 env->eip = EIP;
5213 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5214 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5215 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5216 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5217 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5218
5219 /* FIXME: guest state consistency checks */
5220
5221 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5222 case TLB_CONTROL_DO_NOTHING:
5223 break;
5224 case TLB_CONTROL_FLUSH_ALL_ASID:
5225 /* FIXME: this is not 100% correct but should work for now */
5226 tlb_flush(env, 1);
5227 break;
5228 }
5229
960540b4 5230 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 5231
db620f46
FB
5232 if (int_ctl & V_IRQ_MASK) {
5233 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5234 }
5235
eaa728ee
FB
5236 /* maybe we need to inject an event */
5237 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5238 if (event_inj & SVM_EVTINJ_VALID) {
5239 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5240 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5241 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
eaa728ee 5242
93fcfe39 5243 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
5244 /* FIXME: need to implement valid_err */
5245 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5246 case SVM_EVTINJ_TYPE_INTR:
5247 env->exception_index = vector;
5248 env->error_code = event_inj_err;
5249 env->exception_is_int = 0;
5250 env->exception_next_eip = -1;
93fcfe39 5251 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
db620f46 5252 /* XXX: is it always correct ? */
e694d4e2 5253 do_interrupt_all(vector, 0, 0, 0, 1);
eaa728ee
FB
5254 break;
5255 case SVM_EVTINJ_TYPE_NMI:
db620f46 5256 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5257 env->error_code = event_inj_err;
5258 env->exception_is_int = 0;
5259 env->exception_next_eip = EIP;
93fcfe39 5260 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
1162c041 5261 cpu_loop_exit(env);
eaa728ee
FB
5262 break;
5263 case SVM_EVTINJ_TYPE_EXEPT:
5264 env->exception_index = vector;
5265 env->error_code = event_inj_err;
5266 env->exception_is_int = 0;
5267 env->exception_next_eip = -1;
93fcfe39 5268 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
1162c041 5269 cpu_loop_exit(env);
eaa728ee
FB
5270 break;
5271 case SVM_EVTINJ_TYPE_SOFT:
5272 env->exception_index = vector;
5273 env->error_code = event_inj_err;
5274 env->exception_is_int = 1;
5275 env->exception_next_eip = EIP;
93fcfe39 5276 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
1162c041 5277 cpu_loop_exit(env);
eaa728ee
FB
5278 break;
5279 }
93fcfe39 5280 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
eaa728ee 5281 }
eaa728ee
FB
5282}
5283
5284void helper_vmmcall(void)
5285{
872929aa
FB
5286 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5287 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5288}
5289
914178d3 5290void helper_vmload(int aflag)
eaa728ee
FB
5291{
5292 target_ulong addr;
872929aa
FB
5293 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5294
914178d3
FB
5295 if (aflag == 2)
5296 addr = EAX;
5297 else
5298 addr = (uint32_t)EAX;
5299
93fcfe39 5300 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5301 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5302 env->segs[R_FS].base);
5303
872929aa
FB
5304 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5305 env, R_FS);
5306 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5307 env, R_GS);
5308 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5309 &env->tr);
5310 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5311 &env->ldt);
eaa728ee
FB
5312
5313#ifdef TARGET_X86_64
5314 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5315 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5316 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5317 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5318#endif
5319 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5320 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5321 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5322 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5323}
5324
914178d3 5325void helper_vmsave(int aflag)
eaa728ee
FB
5326{
5327 target_ulong addr;
872929aa 5328 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5329
5330 if (aflag == 2)
5331 addr = EAX;
5332 else
5333 addr = (uint32_t)EAX;
5334
93fcfe39 5335 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5336 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5337 env->segs[R_FS].base);
5338
872929aa
FB
5339 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5340 &env->segs[R_FS]);
5341 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5342 &env->segs[R_GS]);
5343 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5344 &env->tr);
5345 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5346 &env->ldt);
eaa728ee
FB
5347
5348#ifdef TARGET_X86_64
5349 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5350 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5351 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5352 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5353#endif
5354 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5355 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5356 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5357 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5358}
5359
872929aa
FB
5360void helper_stgi(void)
5361{
5362 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5363 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5364}
5365
5366void helper_clgi(void)
5367{
5368 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5369 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5370}
5371
eaa728ee
FB
5372void helper_skinit(void)
5373{
872929aa
FB
5374 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5375 /* XXX: not implemented */
872929aa 5376 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5377}
5378
914178d3 5379void helper_invlpga(int aflag)
eaa728ee 5380{
914178d3 5381 target_ulong addr;
872929aa 5382 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5383
5384 if (aflag == 2)
5385 addr = EAX;
5386 else
5387 addr = (uint32_t)EAX;
5388
5389 /* XXX: could use the ASID to see if it is needed to do the
5390 flush */
5391 tlb_flush_page(env, addr);
eaa728ee
FB
5392}
5393
5394void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5395{
872929aa
FB
5396 if (likely(!(env->hflags & HF_SVMI_MASK)))
5397 return;
eaa728ee
FB
5398 switch(type) {
5399 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5400 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5401 helper_vmexit(type, param);
5402 }
5403 break;
872929aa
FB
5404 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5405 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5406 helper_vmexit(type, param);
5407 }
5408 break;
872929aa
FB
5409 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5410 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5411 helper_vmexit(type, param);
5412 }
5413 break;
872929aa
FB
5414 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5415 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5416 helper_vmexit(type, param);
5417 }
5418 break;
872929aa
FB
5419 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5420 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5421 helper_vmexit(type, param);
5422 }
5423 break;
eaa728ee 5424 case SVM_EXIT_MSR:
872929aa 5425 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5426 /* FIXME: this should be read in at vmrun (faster this way?) */
5427 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5428 uint32_t t0, t1;
5429 switch((uint32_t)ECX) {
5430 case 0 ... 0x1fff:
5431 t0 = (ECX * 2) % 8;
583cd3cb 5432 t1 = (ECX * 2) / 8;
eaa728ee
FB
5433 break;
5434 case 0xc0000000 ... 0xc0001fff:
5435 t0 = (8192 + ECX - 0xc0000000) * 2;
5436 t1 = (t0 / 8);
5437 t0 %= 8;
5438 break;
5439 case 0xc0010000 ... 0xc0011fff:
5440 t0 = (16384 + ECX - 0xc0010000) * 2;
5441 t1 = (t0 / 8);
5442 t0 %= 8;
5443 break;
5444 default:
5445 helper_vmexit(type, param);
5446 t0 = 0;
5447 t1 = 0;
5448 break;
5449 }
5450 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5451 helper_vmexit(type, param);
5452 }
5453 break;
5454 default:
872929aa 5455 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5456 helper_vmexit(type, param);
5457 }
5458 break;
5459 }
5460}
5461
317ac620 5462void svm_check_intercept(CPUX86State *env1, uint32_t type)
e694d4e2 5463{
317ac620 5464 CPUX86State *saved_env;
e694d4e2
BS
5465
5466 saved_env = env;
5467 env = env1;
5468 helper_svm_check_intercept_param(type, 0);
5469 env = saved_env;
5470}
5471
eaa728ee
FB
5472void helper_svm_check_io(uint32_t port, uint32_t param,
5473 uint32_t next_eip_addend)
5474{
872929aa 5475 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5476 /* FIXME: this should be read in at vmrun (faster this way?) */
5477 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5478 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5479 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5480 /* next EIP */
5481 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5482 env->eip + next_eip_addend);
5483 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5484 }
5485 }
5486}
5487
5488/* Note: currently only 32 bits of exit_code are used */
5489void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5490{
5491 uint32_t int_ctl;
5492
93fcfe39 5493 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
eaa728ee
FB
5494 exit_code, exit_info_1,
5495 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5496 EIP);
5497
5498 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5499 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5500 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5501 } else {
5502 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5503 }
5504
5505 /* Save the VM state in the vmcb */
872929aa
FB
5506 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5507 &env->segs[R_ES]);
5508 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5509 &env->segs[R_CS]);
5510 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5511 &env->segs[R_SS]);
5512 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5513 &env->segs[R_DS]);
eaa728ee
FB
5514
5515 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5516 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5517
5518 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5519 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5520
5521 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5522 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5523 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5524 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5525 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5526
db620f46
FB
5527 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5528 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5529 int_ctl |= env->v_tpr & V_TPR_MASK;
5530 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5531 int_ctl |= V_IRQ_MASK;
5532 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5533
5534 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5535 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5536 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5537 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5538 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5539 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5540 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5541
5542 /* Reload the host state from vm_hsave */
db620f46 5543 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5544 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5545 env->intercept = 0;
5546 env->intercept_exceptions = 0;
5547 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5548 env->tsc_offset = 0;
eaa728ee
FB
5549
5550 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5551 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5552
5553 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5554 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5555
5556 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5557 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5558 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5559 /* we need to set the efer after the crs so the hidden flags get
5560 set properly */
5efc27bb
FB
5561 cpu_load_efer(env,
5562 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5563 env->eflags = 0;
5564 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5565 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5566 CC_OP = CC_OP_EFLAGS;
5567
872929aa
FB
5568 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5569 env, R_ES);
5570 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5571 env, R_CS);
5572 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5573 env, R_SS);
5574 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5575 env, R_DS);
eaa728ee
FB
5576
5577 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5578 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5579 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5580
5581 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5582 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5583
5584 /* other setups */
5585 cpu_x86_set_cpl(env, 0);
5586 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5587 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5588
2ed51f5b
AL
5589 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5590 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5591 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5592 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
ab5ea558 5593 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
2ed51f5b 5594
960540b4 5595 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5596 /* FIXME: Resets the current ASID register to zero (host ASID). */
5597
5598 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5599
5600 /* Clears the TSC_OFFSET inside the processor. */
5601
5602 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5603 from the page table indicated the host's CR3. If the PDPEs contain
5604 illegal state, the processor causes a shutdown. */
5605
5606 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5607 env->cr[0] |= CR0_PE_MASK;
5608 env->eflags &= ~VM_MASK;
5609
5610 /* Disables all breakpoints in the host DR7 register. */
5611
5612 /* Checks the reloaded host state for consistency. */
5613
5614 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5615 host's code segment or non-canonical (in the case of long mode), a
5616 #GP fault is delivered inside the host.) */
5617
5618 /* remove any pending exception */
5619 env->exception_index = -1;
5620 env->error_code = 0;
5621 env->old_exception = -1;
5622
1162c041 5623 cpu_loop_exit(env);
eaa728ee
FB
5624}
5625
5626#endif
5627
5628/* MMX/SSE */
5629/* XXX: optimize by storing fptt and fptags in the static cpu state */
2355c16e
AJ
5630
5631#define SSE_DAZ 0x0040
5632#define SSE_RC_MASK 0x6000
5633#define SSE_RC_NEAR 0x0000
5634#define SSE_RC_DOWN 0x2000
5635#define SSE_RC_UP 0x4000
5636#define SSE_RC_CHOP 0x6000
5637#define SSE_FZ 0x8000
5638
5639static void update_sse_status(void)
5640{
5641 int rnd_type;
5642
5643 /* set rounding mode */
5644 switch(env->mxcsr & SSE_RC_MASK) {
5645 default:
5646 case SSE_RC_NEAR:
5647 rnd_type = float_round_nearest_even;
5648 break;
5649 case SSE_RC_DOWN:
5650 rnd_type = float_round_down;
5651 break;
5652 case SSE_RC_UP:
5653 rnd_type = float_round_up;
5654 break;
5655 case SSE_RC_CHOP:
5656 rnd_type = float_round_to_zero;
5657 break;
5658 }
5659 set_float_rounding_mode(rnd_type, &env->sse_status);
5660
5661 /* set denormals are zero */
5662 set_flush_inputs_to_zero((env->mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
5663
5664 /* set flush to zero */
5665 set_flush_to_zero((env->mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
5666}
5667
5668void helper_ldmxcsr(uint32_t val)
5669{
5670 env->mxcsr = val;
5671 update_sse_status();
5672}
5673
eaa728ee
FB
5674void helper_enter_mmx(void)
5675{
5676 env->fpstt = 0;
5677 *(uint32_t *)(env->fptags) = 0;
5678 *(uint32_t *)(env->fptags + 4) = 0;
5679}
5680
5681void helper_emms(void)
5682{
5683 /* set to empty state */
5684 *(uint32_t *)(env->fptags) = 0x01010101;
5685 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5686}
5687
5688/* XXX: suppress */
a7812ae4 5689void helper_movq(void *d, void *s)
eaa728ee 5690{
a7812ae4 5691 *(uint64_t *)d = *(uint64_t *)s;
eaa728ee
FB
5692}
5693
5694#define SHIFT 0
5695#include "ops_sse.h"
5696
5697#define SHIFT 1
5698#include "ops_sse.h"
5699
5700#define SHIFT 0
5701#include "helper_template.h"
5702#undef SHIFT
5703
5704#define SHIFT 1
5705#include "helper_template.h"
5706#undef SHIFT
5707
5708#define SHIFT 2
5709#include "helper_template.h"
5710#undef SHIFT
5711
5712#ifdef TARGET_X86_64
5713
5714#define SHIFT 3
5715#include "helper_template.h"
5716#undef SHIFT
5717
5718#endif
5719
5720/* bit operations */
5721target_ulong helper_bsf(target_ulong t0)
5722{
5723 int count;
5724 target_ulong res;
5725
5726 res = t0;
5727 count = 0;
5728 while ((res & 1) == 0) {
5729 count++;
5730 res >>= 1;
5731 }
5732 return count;
5733}
5734
31501a71 5735target_ulong helper_lzcnt(target_ulong t0, int wordsize)
eaa728ee
FB
5736{
5737 int count;
5738 target_ulong res, mask;
31501a71
AP
5739
5740 if (wordsize > 0 && t0 == 0) {
5741 return wordsize;
5742 }
eaa728ee
FB
5743 res = t0;
5744 count = TARGET_LONG_BITS - 1;
5745 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5746 while ((res & mask) == 0) {
5747 count--;
5748 res <<= 1;
5749 }
31501a71
AP
5750 if (wordsize > 0) {
5751 return wordsize - 1 - count;
5752 }
eaa728ee
FB
5753 return count;
5754}
5755
31501a71
AP
5756target_ulong helper_bsr(target_ulong t0)
5757{
5758 return helper_lzcnt(t0, 0);
5759}
eaa728ee
FB
5760
5761static int compute_all_eflags(void)
5762{
5763 return CC_SRC;
5764}
5765
5766static int compute_c_eflags(void)
5767{
5768 return CC_SRC & CC_C;
5769}
5770
a7812ae4
PB
5771uint32_t helper_cc_compute_all(int op)
5772{
5773 switch (op) {
5774 default: /* should never happen */ return 0;
eaa728ee 5775
a7812ae4 5776 case CC_OP_EFLAGS: return compute_all_eflags();
eaa728ee 5777
a7812ae4
PB
5778 case CC_OP_MULB: return compute_all_mulb();
5779 case CC_OP_MULW: return compute_all_mulw();
5780 case CC_OP_MULL: return compute_all_mull();
eaa728ee 5781
a7812ae4
PB
5782 case CC_OP_ADDB: return compute_all_addb();
5783 case CC_OP_ADDW: return compute_all_addw();
5784 case CC_OP_ADDL: return compute_all_addl();
eaa728ee 5785
a7812ae4
PB
5786 case CC_OP_ADCB: return compute_all_adcb();
5787 case CC_OP_ADCW: return compute_all_adcw();
5788 case CC_OP_ADCL: return compute_all_adcl();
eaa728ee 5789
a7812ae4
PB
5790 case CC_OP_SUBB: return compute_all_subb();
5791 case CC_OP_SUBW: return compute_all_subw();
5792 case CC_OP_SUBL: return compute_all_subl();
eaa728ee 5793
a7812ae4
PB
5794 case CC_OP_SBBB: return compute_all_sbbb();
5795 case CC_OP_SBBW: return compute_all_sbbw();
5796 case CC_OP_SBBL: return compute_all_sbbl();
eaa728ee 5797
a7812ae4
PB
5798 case CC_OP_LOGICB: return compute_all_logicb();
5799 case CC_OP_LOGICW: return compute_all_logicw();
5800 case CC_OP_LOGICL: return compute_all_logicl();
eaa728ee 5801
a7812ae4
PB
5802 case CC_OP_INCB: return compute_all_incb();
5803 case CC_OP_INCW: return compute_all_incw();
5804 case CC_OP_INCL: return compute_all_incl();
eaa728ee 5805
a7812ae4
PB
5806 case CC_OP_DECB: return compute_all_decb();
5807 case CC_OP_DECW: return compute_all_decw();
5808 case CC_OP_DECL: return compute_all_decl();
eaa728ee 5809
a7812ae4
PB
5810 case CC_OP_SHLB: return compute_all_shlb();
5811 case CC_OP_SHLW: return compute_all_shlw();
5812 case CC_OP_SHLL: return compute_all_shll();
eaa728ee 5813
a7812ae4
PB
5814 case CC_OP_SARB: return compute_all_sarb();
5815 case CC_OP_SARW: return compute_all_sarw();
5816 case CC_OP_SARL: return compute_all_sarl();
eaa728ee
FB
5817
5818#ifdef TARGET_X86_64
a7812ae4 5819 case CC_OP_MULQ: return compute_all_mulq();
eaa728ee 5820
a7812ae4 5821 case CC_OP_ADDQ: return compute_all_addq();
eaa728ee 5822
a7812ae4 5823 case CC_OP_ADCQ: return compute_all_adcq();
eaa728ee 5824
a7812ae4 5825 case CC_OP_SUBQ: return compute_all_subq();
eaa728ee 5826
a7812ae4 5827 case CC_OP_SBBQ: return compute_all_sbbq();
eaa728ee 5828
a7812ae4 5829 case CC_OP_LOGICQ: return compute_all_logicq();
eaa728ee 5830
a7812ae4 5831 case CC_OP_INCQ: return compute_all_incq();
eaa728ee 5832
a7812ae4 5833 case CC_OP_DECQ: return compute_all_decq();
eaa728ee 5834
a7812ae4 5835 case CC_OP_SHLQ: return compute_all_shlq();
eaa728ee 5836
a7812ae4 5837 case CC_OP_SARQ: return compute_all_sarq();
eaa728ee 5838#endif
a7812ae4
PB
5839 }
5840}
5841
317ac620 5842uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
e694d4e2 5843{
317ac620 5844 CPUX86State *saved_env;
e694d4e2
BS
5845 uint32_t ret;
5846
5847 saved_env = env;
5848 env = env1;
5849 ret = helper_cc_compute_all(op);
5850 env = saved_env;
5851 return ret;
5852}
5853
a7812ae4
PB
5854uint32_t helper_cc_compute_c(int op)
5855{
5856 switch (op) {
5857 default: /* should never happen */ return 0;
5858
5859 case CC_OP_EFLAGS: return compute_c_eflags();
5860
5861 case CC_OP_MULB: return compute_c_mull();
5862 case CC_OP_MULW: return compute_c_mull();
5863 case CC_OP_MULL: return compute_c_mull();
5864
5865 case CC_OP_ADDB: return compute_c_addb();
5866 case CC_OP_ADDW: return compute_c_addw();
5867 case CC_OP_ADDL: return compute_c_addl();
5868
5869 case CC_OP_ADCB: return compute_c_adcb();
5870 case CC_OP_ADCW: return compute_c_adcw();
5871 case CC_OP_ADCL: return compute_c_adcl();
5872
5873 case CC_OP_SUBB: return compute_c_subb();
5874 case CC_OP_SUBW: return compute_c_subw();
5875 case CC_OP_SUBL: return compute_c_subl();
5876
5877 case CC_OP_SBBB: return compute_c_sbbb();
5878 case CC_OP_SBBW: return compute_c_sbbw();
5879 case CC_OP_SBBL: return compute_c_sbbl();
5880
5881 case CC_OP_LOGICB: return compute_c_logicb();
5882 case CC_OP_LOGICW: return compute_c_logicw();
5883 case CC_OP_LOGICL: return compute_c_logicl();
5884
5885 case CC_OP_INCB: return compute_c_incl();
5886 case CC_OP_INCW: return compute_c_incl();
5887 case CC_OP_INCL: return compute_c_incl();
5888
5889 case CC_OP_DECB: return compute_c_incl();
5890 case CC_OP_DECW: return compute_c_incl();
5891 case CC_OP_DECL: return compute_c_incl();
eaa728ee 5892
a7812ae4
PB
5893 case CC_OP_SHLB: return compute_c_shlb();
5894 case CC_OP_SHLW: return compute_c_shlw();
5895 case CC_OP_SHLL: return compute_c_shll();
5896
5897 case CC_OP_SARB: return compute_c_sarl();
5898 case CC_OP_SARW: return compute_c_sarl();
5899 case CC_OP_SARL: return compute_c_sarl();
5900
5901#ifdef TARGET_X86_64
5902 case CC_OP_MULQ: return compute_c_mull();
5903
5904 case CC_OP_ADDQ: return compute_c_addq();
5905
5906 case CC_OP_ADCQ: return compute_c_adcq();
5907
5908 case CC_OP_SUBQ: return compute_c_subq();
5909
5910 case CC_OP_SBBQ: return compute_c_sbbq();
5911
5912 case CC_OP_LOGICQ: return compute_c_logicq();
5913
5914 case CC_OP_INCQ: return compute_c_incl();
5915
5916 case CC_OP_DECQ: return compute_c_incl();
5917
5918 case CC_OP_SHLQ: return compute_c_shlq();
5919
5920 case CC_OP_SARQ: return compute_c_sarl();
5921#endif
5922 }
5923}