]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/op_helper.c
Replace noreturn with QEMU_NORETURN
[mirror_qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
eaa728ee
FB
19 */
20#define CPU_NO_GLOBAL_REGS
21#include "exec.h"
d9957a8b 22#include "exec-all.h"
eaa728ee
FB
23#include "host-utils.h"
24
25//#define DEBUG_PCALL
26
d12d51d5
AL
27
28#ifdef DEBUG_PCALL
93fcfe39
AL
29# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30# define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5
AL
32#else
33# define LOG_PCALL(...) do { } while (0)
34# define LOG_PCALL_STATE(env) do { } while (0)
35#endif
36
37
eaa728ee
FB
38#if 0
39#define raise_exception_err(a, b)\
40do {\
93fcfe39 41 qemu_log("raise_exception line=%d\n", __LINE__);\
eaa728ee
FB
42 (raise_exception_err)(a, b);\
43} while (0)
44#endif
45
d9957a8b 46static const uint8_t parity_table[256] = {
eaa728ee
FB
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79};
80
81/* modulo 17 table */
d9957a8b 82static const uint8_t rclw_table[32] = {
eaa728ee
FB
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
87};
88
89/* modulo 9 table */
d9957a8b 90static const uint8_t rclb_table[32] = {
eaa728ee
FB
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
95};
96
d9957a8b 97static const CPU86_LDouble f15rk[7] =
eaa728ee
FB
98{
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
106};
107
108/* broken thread support */
109
b1d8e52e 110static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
111
112void helper_lock(void)
113{
114 spin_lock(&global_cpu_lock);
115}
116
117void helper_unlock(void)
118{
119 spin_unlock(&global_cpu_lock);
120}
121
122void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123{
124 load_eflags(t0, update_mask);
125}
126
127target_ulong helper_read_eflags(void)
128{
129 uint32_t eflags;
a7812ae4 130 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
131 eflags |= (DF & DF_MASK);
132 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133 return eflags;
134}
135
136/* return non zero if error */
137static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138 int selector)
139{
140 SegmentCache *dt;
141 int index;
142 target_ulong ptr;
143
144 if (selector & 0x4)
145 dt = &env->ldt;
146 else
147 dt = &env->gdt;
148 index = selector & ~7;
149 if ((index + 7) > dt->limit)
150 return -1;
151 ptr = dt->base + index;
152 *e1_ptr = ldl_kernel(ptr);
153 *e2_ptr = ldl_kernel(ptr + 4);
154 return 0;
155}
156
157static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158{
159 unsigned int limit;
160 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161 if (e2 & DESC_G_MASK)
162 limit = (limit << 12) | 0xfff;
163 return limit;
164}
165
166static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167{
168 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169}
170
171static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172{
173 sc->base = get_seg_base(e1, e2);
174 sc->limit = get_seg_limit(e1, e2);
175 sc->flags = e2;
176}
177
178/* init the segment cache in vm86 mode. */
179static inline void load_seg_vm(int seg, int selector)
180{
181 selector &= 0xffff;
182 cpu_x86_load_seg_cache(env, seg, selector,
183 (selector << 4), 0xffff, 0);
184}
185
186static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187 uint32_t *esp_ptr, int dpl)
188{
189 int type, index, shift;
190
191#if 0
192 {
193 int i;
194 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195 for(i=0;i<env->tr.limit;i++) {
196 printf("%02x ", env->tr.base[i]);
197 if ((i & 7) == 7) printf("\n");
198 }
199 printf("\n");
200 }
201#endif
202
203 if (!(env->tr.flags & DESC_P_MASK))
204 cpu_abort(env, "invalid tss");
205 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206 if ((type & 7) != 1)
207 cpu_abort(env, "invalid tss type");
208 shift = type >> 3;
209 index = (dpl * 4 + 2) << shift;
210 if (index + (4 << shift) - 1 > env->tr.limit)
211 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212 if (shift == 0) {
213 *esp_ptr = lduw_kernel(env->tr.base + index);
214 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215 } else {
216 *esp_ptr = ldl_kernel(env->tr.base + index);
217 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218 }
219}
220
221/* XXX: merge with load_seg() */
222static void tss_load_seg(int seg_reg, int selector)
223{
224 uint32_t e1, e2;
225 int rpl, dpl, cpl;
226
227 if ((selector & 0xfffc) != 0) {
228 if (load_segment(&e1, &e2, selector) != 0)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if (!(e2 & DESC_S_MASK))
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 rpl = selector & 3;
233 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234 cpl = env->hflags & HF_CPL_MASK;
235 if (seg_reg == R_CS) {
236 if (!(e2 & DESC_CS_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 /* XXX: is it correct ? */
239 if (dpl != rpl)
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if ((e2 & DESC_C_MASK) && dpl > rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 } else if (seg_reg == R_SS) {
244 /* SS must be writable data */
245 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if (dpl != cpl || dpl != rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 } else {
250 /* not readable code */
251 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255 if (dpl < cpl || dpl < rpl)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257 }
258 }
259 if (!(e2 & DESC_P_MASK))
260 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261 cpu_x86_load_seg_cache(env, seg_reg, selector,
262 get_seg_base(e1, e2),
263 get_seg_limit(e1, e2),
264 e2);
265 } else {
266 if (seg_reg == R_SS || seg_reg == R_CS)
267 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268 }
269}
270
271#define SWITCH_TSS_JMP 0
272#define SWITCH_TSS_IRET 1
273#define SWITCH_TSS_CALL 2
274
275/* XXX: restore CPU state in registers (PowerPC case) */
276static void switch_tss(int tss_selector,
277 uint32_t e1, uint32_t e2, int source,
278 uint32_t next_eip)
279{
280 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281 target_ulong tss_base;
282 uint32_t new_regs[8], new_segs[6];
283 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284 uint32_t old_eflags, eflags_mask;
285 SegmentCache *dt;
286 int index;
287 target_ulong ptr;
288
289 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
d12d51d5 290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
eaa728ee
FB
291
292 /* if task gate, we read the TSS segment and we load it */
293 if (type == 5) {
294 if (!(e2 & DESC_P_MASK))
295 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296 tss_selector = e1 >> 16;
297 if (tss_selector & 4)
298 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299 if (load_segment(&e1, &e2, tss_selector) != 0)
300 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301 if (e2 & DESC_S_MASK)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304 if ((type & 7) != 1)
305 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306 }
307
308 if (!(e2 & DESC_P_MASK))
309 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310
311 if (type & 8)
312 tss_limit_max = 103;
313 else
314 tss_limit_max = 43;
315 tss_limit = get_seg_limit(e1, e2);
316 tss_base = get_seg_base(e1, e2);
317 if ((tss_selector & 4) != 0 ||
318 tss_limit < tss_limit_max)
319 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321 if (old_type & 8)
322 old_tss_limit_max = 103;
323 else
324 old_tss_limit_max = 43;
325
326 /* read all the registers from the new TSS */
327 if (type & 8) {
328 /* 32 bit */
329 new_cr3 = ldl_kernel(tss_base + 0x1c);
330 new_eip = ldl_kernel(tss_base + 0x20);
331 new_eflags = ldl_kernel(tss_base + 0x24);
332 for(i = 0; i < 8; i++)
333 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334 for(i = 0; i < 6; i++)
335 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336 new_ldt = lduw_kernel(tss_base + 0x60);
337 new_trap = ldl_kernel(tss_base + 0x64);
338 } else {
339 /* 16 bit */
340 new_cr3 = 0;
341 new_eip = lduw_kernel(tss_base + 0x0e);
342 new_eflags = lduw_kernel(tss_base + 0x10);
343 for(i = 0; i < 8; i++)
344 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345 for(i = 0; i < 4; i++)
346 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347 new_ldt = lduw_kernel(tss_base + 0x2a);
348 new_segs[R_FS] = 0;
349 new_segs[R_GS] = 0;
350 new_trap = 0;
351 }
352
353 /* NOTE: we must avoid memory exceptions during the task switch,
354 so we make dummy accesses before */
355 /* XXX: it can still fail in some cases, so a bigger hack is
356 necessary to valid the TLB after having done the accesses */
357
358 v1 = ldub_kernel(env->tr.base);
359 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
360 stb_kernel(env->tr.base, v1);
361 stb_kernel(env->tr.base + old_tss_limit_max, v2);
362
363 /* clear busy bit (it is restartable) */
364 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
365 target_ulong ptr;
366 uint32_t e2;
367 ptr = env->gdt.base + (env->tr.selector & ~7);
368 e2 = ldl_kernel(ptr + 4);
369 e2 &= ~DESC_TSS_BUSY_MASK;
370 stl_kernel(ptr + 4, e2);
371 }
372 old_eflags = compute_eflags();
373 if (source == SWITCH_TSS_IRET)
374 old_eflags &= ~NT_MASK;
375
376 /* save the current state in the old TSS */
377 if (type & 8) {
378 /* 32 bit */
379 stl_kernel(env->tr.base + 0x20, next_eip);
380 stl_kernel(env->tr.base + 0x24, old_eflags);
381 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
382 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
383 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
384 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
385 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
386 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
387 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
388 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
389 for(i = 0; i < 6; i++)
390 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391 } else {
392 /* 16 bit */
393 stw_kernel(env->tr.base + 0x0e, next_eip);
394 stw_kernel(env->tr.base + 0x10, old_eflags);
395 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
396 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
397 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
398 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
399 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
400 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
401 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
402 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
403 for(i = 0; i < 4; i++)
404 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
405 }
406
407 /* now if an exception occurs, it will occurs in the next task
408 context */
409
410 if (source == SWITCH_TSS_CALL) {
411 stw_kernel(tss_base, env->tr.selector);
412 new_eflags |= NT_MASK;
413 }
414
415 /* set busy bit */
416 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
417 target_ulong ptr;
418 uint32_t e2;
419 ptr = env->gdt.base + (tss_selector & ~7);
420 e2 = ldl_kernel(ptr + 4);
421 e2 |= DESC_TSS_BUSY_MASK;
422 stl_kernel(ptr + 4, e2);
423 }
424
425 /* set the new CPU state */
426 /* from this point, any exception which occurs can give problems */
427 env->cr[0] |= CR0_TS_MASK;
428 env->hflags |= HF_TS_MASK;
429 env->tr.selector = tss_selector;
430 env->tr.base = tss_base;
431 env->tr.limit = tss_limit;
432 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
433
434 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
435 cpu_x86_update_cr3(env, new_cr3);
436 }
437
438 /* load all registers without an exception, then reload them with
439 possible exception */
440 env->eip = new_eip;
441 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
442 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
443 if (!(type & 8))
444 eflags_mask &= 0xffff;
445 load_eflags(new_eflags, eflags_mask);
446 /* XXX: what to do in 16 bit case ? */
447 EAX = new_regs[0];
448 ECX = new_regs[1];
449 EDX = new_regs[2];
450 EBX = new_regs[3];
451 ESP = new_regs[4];
452 EBP = new_regs[5];
453 ESI = new_regs[6];
454 EDI = new_regs[7];
455 if (new_eflags & VM_MASK) {
456 for(i = 0; i < 6; i++)
457 load_seg_vm(i, new_segs[i]);
458 /* in vm86, CPL is always 3 */
459 cpu_x86_set_cpl(env, 3);
460 } else {
461 /* CPL is set the RPL of CS */
462 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
463 /* first just selectors as the rest may trigger exceptions */
464 for(i = 0; i < 6; i++)
465 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
466 }
467
468 env->ldt.selector = new_ldt & ~4;
469 env->ldt.base = 0;
470 env->ldt.limit = 0;
471 env->ldt.flags = 0;
472
473 /* load the LDT */
474 if (new_ldt & 4)
475 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476
477 if ((new_ldt & 0xfffc) != 0) {
478 dt = &env->gdt;
479 index = new_ldt & ~7;
480 if ((index + 7) > dt->limit)
481 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482 ptr = dt->base + index;
483 e1 = ldl_kernel(ptr);
484 e2 = ldl_kernel(ptr + 4);
485 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 if (!(e2 & DESC_P_MASK))
488 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489 load_seg_cache_raw_dt(&env->ldt, e1, e2);
490 }
491
492 /* load the segments */
493 if (!(new_eflags & VM_MASK)) {
494 tss_load_seg(R_CS, new_segs[R_CS]);
495 tss_load_seg(R_SS, new_segs[R_SS]);
496 tss_load_seg(R_ES, new_segs[R_ES]);
497 tss_load_seg(R_DS, new_segs[R_DS]);
498 tss_load_seg(R_FS, new_segs[R_FS]);
499 tss_load_seg(R_GS, new_segs[R_GS]);
500 }
501
502 /* check that EIP is in the CS segment limits */
503 if (new_eip > env->segs[R_CS].limit) {
504 /* XXX: different exception if CALL ? */
505 raise_exception_err(EXCP0D_GPF, 0);
506 }
01df040b
AL
507
508#ifndef CONFIG_USER_ONLY
509 /* reset local breakpoints */
510 if (env->dr[7] & 0x55) {
511 for (i = 0; i < 4; i++) {
512 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
513 hw_breakpoint_remove(env, i);
514 }
515 env->dr[7] &= ~0x55;
516 }
517#endif
eaa728ee
FB
518}
519
520/* check if Port I/O is allowed in TSS */
521static inline void check_io(int addr, int size)
522{
523 int io_offset, val, mask;
524
525 /* TSS must be a valid 32 bit one */
526 if (!(env->tr.flags & DESC_P_MASK) ||
527 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
528 env->tr.limit < 103)
529 goto fail;
530 io_offset = lduw_kernel(env->tr.base + 0x66);
531 io_offset += (addr >> 3);
532 /* Note: the check needs two bytes */
533 if ((io_offset + 1) > env->tr.limit)
534 goto fail;
535 val = lduw_kernel(env->tr.base + io_offset);
536 val >>= (addr & 7);
537 mask = (1 << size) - 1;
538 /* all bits must be zero to allow the I/O */
539 if ((val & mask) != 0) {
540 fail:
541 raise_exception_err(EXCP0D_GPF, 0);
542 }
543}
544
545void helper_check_iob(uint32_t t0)
546{
547 check_io(t0, 1);
548}
549
550void helper_check_iow(uint32_t t0)
551{
552 check_io(t0, 2);
553}
554
555void helper_check_iol(uint32_t t0)
556{
557 check_io(t0, 4);
558}
559
560void helper_outb(uint32_t port, uint32_t data)
561{
562 cpu_outb(env, port, data & 0xff);
563}
564
565target_ulong helper_inb(uint32_t port)
566{
567 return cpu_inb(env, port);
568}
569
570void helper_outw(uint32_t port, uint32_t data)
571{
572 cpu_outw(env, port, data & 0xffff);
573}
574
575target_ulong helper_inw(uint32_t port)
576{
577 return cpu_inw(env, port);
578}
579
580void helper_outl(uint32_t port, uint32_t data)
581{
582 cpu_outl(env, port, data);
583}
584
585target_ulong helper_inl(uint32_t port)
586{
587 return cpu_inl(env, port);
588}
589
590static inline unsigned int get_sp_mask(unsigned int e2)
591{
592 if (e2 & DESC_B_MASK)
593 return 0xffffffff;
594 else
595 return 0xffff;
596}
597
598#ifdef TARGET_X86_64
599#define SET_ESP(val, sp_mask)\
600do {\
601 if ((sp_mask) == 0xffff)\
602 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
603 else if ((sp_mask) == 0xffffffffLL)\
604 ESP = (uint32_t)(val);\
605 else\
606 ESP = (val);\
607} while (0)
608#else
609#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
610#endif
611
c0a04f0e
AL
612/* in 64-bit machines, this can overflow. So this segment addition macro
613 * can be used to trim the value to 32-bit whenever needed */
614#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
615
eaa728ee
FB
616/* XXX: add a is_user flag to have proper security support */
617#define PUSHW(ssp, sp, sp_mask, val)\
618{\
619 sp -= 2;\
620 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
621}
622
623#define PUSHL(ssp, sp, sp_mask, val)\
624{\
625 sp -= 4;\
c0a04f0e 626 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
627}
628
629#define POPW(ssp, sp, sp_mask, val)\
630{\
631 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
632 sp += 2;\
633}
634
635#define POPL(ssp, sp, sp_mask, val)\
636{\
c0a04f0e 637 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
638 sp += 4;\
639}
640
641/* protected mode interrupt */
642static void do_interrupt_protected(int intno, int is_int, int error_code,
643 unsigned int next_eip, int is_hw)
644{
645 SegmentCache *dt;
646 target_ulong ptr, ssp;
647 int type, dpl, selector, ss_dpl, cpl;
648 int has_error_code, new_stack, shift;
1c918eba 649 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 650 uint32_t old_eip, sp_mask;
eaa728ee 651
eaa728ee
FB
652 has_error_code = 0;
653 if (!is_int && !is_hw) {
654 switch(intno) {
655 case 8:
656 case 10:
657 case 11:
658 case 12:
659 case 13:
660 case 14:
661 case 17:
662 has_error_code = 1;
663 break;
664 }
665 }
666 if (is_int)
667 old_eip = next_eip;
668 else
669 old_eip = env->eip;
670
671 dt = &env->idt;
672 if (intno * 8 + 7 > dt->limit)
673 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
674 ptr = dt->base + intno * 8;
675 e1 = ldl_kernel(ptr);
676 e2 = ldl_kernel(ptr + 4);
677 /* check gate type */
678 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
679 switch(type) {
680 case 5: /* task gate */
681 /* must do that check here to return the correct error code */
682 if (!(e2 & DESC_P_MASK))
683 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
684 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
685 if (has_error_code) {
686 int type;
687 uint32_t mask;
688 /* push the error code */
689 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
690 shift = type >> 3;
691 if (env->segs[R_SS].flags & DESC_B_MASK)
692 mask = 0xffffffff;
693 else
694 mask = 0xffff;
695 esp = (ESP - (2 << shift)) & mask;
696 ssp = env->segs[R_SS].base + esp;
697 if (shift)
698 stl_kernel(ssp, error_code);
699 else
700 stw_kernel(ssp, error_code);
701 SET_ESP(esp, mask);
702 }
703 return;
704 case 6: /* 286 interrupt gate */
705 case 7: /* 286 trap gate */
706 case 14: /* 386 interrupt gate */
707 case 15: /* 386 trap gate */
708 break;
709 default:
710 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
711 break;
712 }
713 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
714 cpl = env->hflags & HF_CPL_MASK;
1235fc06 715 /* check privilege if software int */
eaa728ee
FB
716 if (is_int && dpl < cpl)
717 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
718 /* check valid bit */
719 if (!(e2 & DESC_P_MASK))
720 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
721 selector = e1 >> 16;
722 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
723 if ((selector & 0xfffc) == 0)
724 raise_exception_err(EXCP0D_GPF, 0);
725
726 if (load_segment(&e1, &e2, selector) != 0)
727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
728 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
731 if (dpl > cpl)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_P_MASK))
734 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
735 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
736 /* to inner privilege */
737 get_ss_esp_from_tss(&ss, &esp, dpl);
738 if ((ss & 0xfffc) == 0)
739 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
740 if ((ss & 3) != dpl)
741 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
745 if (ss_dpl != dpl)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (!(ss_e2 & DESC_S_MASK) ||
748 (ss_e2 & DESC_CS_MASK) ||
749 !(ss_e2 & DESC_W_MASK))
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (!(ss_e2 & DESC_P_MASK))
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753 new_stack = 1;
754 sp_mask = get_sp_mask(ss_e2);
755 ssp = get_seg_base(ss_e1, ss_e2);
756 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
757 /* to same privilege */
758 if (env->eflags & VM_MASK)
759 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
760 new_stack = 0;
761 sp_mask = get_sp_mask(env->segs[R_SS].flags);
762 ssp = env->segs[R_SS].base;
763 esp = ESP;
764 dpl = cpl;
765 } else {
766 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
767 new_stack = 0; /* avoid warning */
768 sp_mask = 0; /* avoid warning */
769 ssp = 0; /* avoid warning */
770 esp = 0; /* avoid warning */
771 }
772
773 shift = type >> 3;
774
775#if 0
776 /* XXX: check that enough room is available */
777 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
778 if (env->eflags & VM_MASK)
779 push_size += 8;
780 push_size <<= shift;
781#endif
782 if (shift == 1) {
783 if (new_stack) {
784 if (env->eflags & VM_MASK) {
785 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
786 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
787 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
788 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
789 }
790 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
791 PUSHL(ssp, esp, sp_mask, ESP);
792 }
793 PUSHL(ssp, esp, sp_mask, compute_eflags());
794 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
795 PUSHL(ssp, esp, sp_mask, old_eip);
796 if (has_error_code) {
797 PUSHL(ssp, esp, sp_mask, error_code);
798 }
799 } else {
800 if (new_stack) {
801 if (env->eflags & VM_MASK) {
802 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
803 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
804 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
805 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
806 }
807 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
808 PUSHW(ssp, esp, sp_mask, ESP);
809 }
810 PUSHW(ssp, esp, sp_mask, compute_eflags());
811 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
812 PUSHW(ssp, esp, sp_mask, old_eip);
813 if (has_error_code) {
814 PUSHW(ssp, esp, sp_mask, error_code);
815 }
816 }
817
818 if (new_stack) {
819 if (env->eflags & VM_MASK) {
820 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
821 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
822 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
824 }
825 ss = (ss & ~3) | dpl;
826 cpu_x86_load_seg_cache(env, R_SS, ss,
827 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
828 }
829 SET_ESP(esp, sp_mask);
830
831 selector = (selector & ~3) | dpl;
832 cpu_x86_load_seg_cache(env, R_CS, selector,
833 get_seg_base(e1, e2),
834 get_seg_limit(e1, e2),
835 e2);
836 cpu_x86_set_cpl(env, dpl);
837 env->eip = offset;
838
839 /* interrupt gate clear IF mask */
840 if ((type & 1) == 0) {
841 env->eflags &= ~IF_MASK;
842 }
843 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
844}
845
846#ifdef TARGET_X86_64
847
848#define PUSHQ(sp, val)\
849{\
850 sp -= 8;\
851 stq_kernel(sp, (val));\
852}
853
854#define POPQ(sp, val)\
855{\
856 val = ldq_kernel(sp);\
857 sp += 8;\
858}
859
860static inline target_ulong get_rsp_from_tss(int level)
861{
862 int index;
863
864#if 0
865 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
866 env->tr.base, env->tr.limit);
867#endif
868
869 if (!(env->tr.flags & DESC_P_MASK))
870 cpu_abort(env, "invalid tss");
871 index = 8 * level + 4;
872 if ((index + 7) > env->tr.limit)
873 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
874 return ldq_kernel(env->tr.base + index);
875}
876
877/* 64 bit interrupt */
878static void do_interrupt64(int intno, int is_int, int error_code,
879 target_ulong next_eip, int is_hw)
880{
881 SegmentCache *dt;
882 target_ulong ptr;
883 int type, dpl, selector, cpl, ist;
884 int has_error_code, new_stack;
885 uint32_t e1, e2, e3, ss;
886 target_ulong old_eip, esp, offset;
eaa728ee 887
eaa728ee
FB
888 has_error_code = 0;
889 if (!is_int && !is_hw) {
890 switch(intno) {
891 case 8:
892 case 10:
893 case 11:
894 case 12:
895 case 13:
896 case 14:
897 case 17:
898 has_error_code = 1;
899 break;
900 }
901 }
902 if (is_int)
903 old_eip = next_eip;
904 else
905 old_eip = env->eip;
906
907 dt = &env->idt;
908 if (intno * 16 + 15 > dt->limit)
909 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910 ptr = dt->base + intno * 16;
911 e1 = ldl_kernel(ptr);
912 e2 = ldl_kernel(ptr + 4);
913 e3 = ldl_kernel(ptr + 8);
914 /* check gate type */
915 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
916 switch(type) {
917 case 14: /* 386 interrupt gate */
918 case 15: /* 386 trap gate */
919 break;
920 default:
921 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
922 break;
923 }
924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925 cpl = env->hflags & HF_CPL_MASK;
1235fc06 926 /* check privilege if software int */
eaa728ee
FB
927 if (is_int && dpl < cpl)
928 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
929 /* check valid bit */
930 if (!(e2 & DESC_P_MASK))
931 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
932 selector = e1 >> 16;
933 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
934 ist = e2 & 7;
935 if ((selector & 0xfffc) == 0)
936 raise_exception_err(EXCP0D_GPF, 0);
937
938 if (load_segment(&e1, &e2, selector) != 0)
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
941 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
943 if (dpl > cpl)
944 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
945 if (!(e2 & DESC_P_MASK))
946 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
947 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
948 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
949 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
950 /* to inner privilege */
951 if (ist != 0)
952 esp = get_rsp_from_tss(ist + 3);
953 else
954 esp = get_rsp_from_tss(dpl);
955 esp &= ~0xfLL; /* align stack */
956 ss = 0;
957 new_stack = 1;
958 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
959 /* to same privilege */
960 if (env->eflags & VM_MASK)
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 new_stack = 0;
963 if (ist != 0)
964 esp = get_rsp_from_tss(ist + 3);
965 else
966 esp = ESP;
967 esp &= ~0xfLL; /* align stack */
968 dpl = cpl;
969 } else {
970 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
971 new_stack = 0; /* avoid warning */
972 esp = 0; /* avoid warning */
973 }
974
975 PUSHQ(esp, env->segs[R_SS].selector);
976 PUSHQ(esp, ESP);
977 PUSHQ(esp, compute_eflags());
978 PUSHQ(esp, env->segs[R_CS].selector);
979 PUSHQ(esp, old_eip);
980 if (has_error_code) {
981 PUSHQ(esp, error_code);
982 }
983
984 if (new_stack) {
985 ss = 0 | dpl;
986 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
987 }
988 ESP = esp;
989
990 selector = (selector & ~3) | dpl;
991 cpu_x86_load_seg_cache(env, R_CS, selector,
992 get_seg_base(e1, e2),
993 get_seg_limit(e1, e2),
994 e2);
995 cpu_x86_set_cpl(env, dpl);
996 env->eip = offset;
997
998 /* interrupt gate clear IF mask */
999 if ((type & 1) == 0) {
1000 env->eflags &= ~IF_MASK;
1001 }
1002 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1003}
1004#endif
1005
d9957a8b 1006#ifdef TARGET_X86_64
eaa728ee
FB
1007#if defined(CONFIG_USER_ONLY)
1008void helper_syscall(int next_eip_addend)
1009{
1010 env->exception_index = EXCP_SYSCALL;
1011 env->exception_next_eip = env->eip + next_eip_addend;
1012 cpu_loop_exit();
1013}
1014#else
1015void helper_syscall(int next_eip_addend)
1016{
1017 int selector;
1018
1019 if (!(env->efer & MSR_EFER_SCE)) {
1020 raise_exception_err(EXCP06_ILLOP, 0);
1021 }
1022 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1023 if (env->hflags & HF_LMA_MASK) {
1024 int code64;
1025
1026 ECX = env->eip + next_eip_addend;
1027 env->regs[11] = compute_eflags();
1028
1029 code64 = env->hflags & HF_CS64_MASK;
1030
1031 cpu_x86_set_cpl(env, 0);
1032 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1033 0, 0xffffffff,
1034 DESC_G_MASK | DESC_P_MASK |
1035 DESC_S_MASK |
1036 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1037 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1038 0, 0xffffffff,
1039 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1040 DESC_S_MASK |
1041 DESC_W_MASK | DESC_A_MASK);
1042 env->eflags &= ~env->fmask;
1043 load_eflags(env->eflags, 0);
1044 if (code64)
1045 env->eip = env->lstar;
1046 else
1047 env->eip = env->cstar;
d9957a8b 1048 } else {
eaa728ee
FB
1049 ECX = (uint32_t)(env->eip + next_eip_addend);
1050
1051 cpu_x86_set_cpl(env, 0);
1052 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1053 0, 0xffffffff,
1054 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1055 DESC_S_MASK |
1056 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1057 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1058 0, 0xffffffff,
1059 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060 DESC_S_MASK |
1061 DESC_W_MASK | DESC_A_MASK);
1062 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1063 env->eip = (uint32_t)env->star;
1064 }
1065}
1066#endif
d9957a8b 1067#endif
eaa728ee 1068
d9957a8b 1069#ifdef TARGET_X86_64
eaa728ee
FB
1070void helper_sysret(int dflag)
1071{
1072 int cpl, selector;
1073
1074 if (!(env->efer & MSR_EFER_SCE)) {
1075 raise_exception_err(EXCP06_ILLOP, 0);
1076 }
1077 cpl = env->hflags & HF_CPL_MASK;
1078 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1079 raise_exception_err(EXCP0D_GPF, 0);
1080 }
1081 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1082 if (env->hflags & HF_LMA_MASK) {
1083 if (dflag == 2) {
1084 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1085 0, 0xffffffff,
1086 DESC_G_MASK | DESC_P_MASK |
1087 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1089 DESC_L_MASK);
1090 env->eip = ECX;
1091 } else {
1092 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093 0, 0xffffffff,
1094 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097 env->eip = (uint32_t)ECX;
1098 }
1099 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1100 0, 0xffffffff,
1101 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1102 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1103 DESC_W_MASK | DESC_A_MASK);
1104 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1105 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1106 cpu_x86_set_cpl(env, 3);
d9957a8b 1107 } else {
eaa728ee
FB
1108 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1109 0, 0xffffffff,
1110 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1111 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1112 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1113 env->eip = (uint32_t)ECX;
1114 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1115 0, 0xffffffff,
1116 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1117 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1118 DESC_W_MASK | DESC_A_MASK);
1119 env->eflags |= IF_MASK;
1120 cpu_x86_set_cpl(env, 3);
1121 }
1122#ifdef USE_KQEMU
1123 if (kqemu_is_ok(env)) {
1124 if (env->hflags & HF_LMA_MASK)
1125 CC_OP = CC_OP_EFLAGS;
1126 env->exception_index = -1;
1127 cpu_loop_exit();
1128 }
1129#endif
1130}
d9957a8b 1131#endif
eaa728ee
FB
1132
1133/* real mode interrupt */
1134static void do_interrupt_real(int intno, int is_int, int error_code,
1135 unsigned int next_eip)
1136{
1137 SegmentCache *dt;
1138 target_ulong ptr, ssp;
1139 int selector;
1140 uint32_t offset, esp;
1141 uint32_t old_cs, old_eip;
eaa728ee 1142
eaa728ee
FB
1143 /* real mode (simpler !) */
1144 dt = &env->idt;
1145 if (intno * 4 + 3 > dt->limit)
1146 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1147 ptr = dt->base + intno * 4;
1148 offset = lduw_kernel(ptr);
1149 selector = lduw_kernel(ptr + 2);
1150 esp = ESP;
1151 ssp = env->segs[R_SS].base;
1152 if (is_int)
1153 old_eip = next_eip;
1154 else
1155 old_eip = env->eip;
1156 old_cs = env->segs[R_CS].selector;
1157 /* XXX: use SS segment size ? */
1158 PUSHW(ssp, esp, 0xffff, compute_eflags());
1159 PUSHW(ssp, esp, 0xffff, old_cs);
1160 PUSHW(ssp, esp, 0xffff, old_eip);
1161
1162 /* update processor state */
1163 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1164 env->eip = offset;
1165 env->segs[R_CS].selector = selector;
1166 env->segs[R_CS].base = (selector << 4);
1167 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1168}
1169
1170/* fake user mode interrupt */
1171void do_interrupt_user(int intno, int is_int, int error_code,
1172 target_ulong next_eip)
1173{
1174 SegmentCache *dt;
1175 target_ulong ptr;
1176 int dpl, cpl, shift;
1177 uint32_t e2;
1178
1179 dt = &env->idt;
1180 if (env->hflags & HF_LMA_MASK) {
1181 shift = 4;
1182 } else {
1183 shift = 3;
1184 }
1185 ptr = dt->base + (intno << shift);
1186 e2 = ldl_kernel(ptr + 4);
1187
1188 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1189 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1190 /* check privilege if software int */
eaa728ee
FB
1191 if (is_int && dpl < cpl)
1192 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1193
1194 /* Since we emulate only user space, we cannot do more than
1195 exiting the emulation with the suitable exception and error
1196 code */
1197 if (is_int)
1198 EIP = next_eip;
1199}
1200
1201/*
1202 * Begin execution of an interruption. is_int is TRUE if coming from
1203 * the int instruction. next_eip is the EIP value AFTER the interrupt
1204 * instruction. It is only relevant if is_int is TRUE.
1205 */
1206void do_interrupt(int intno, int is_int, int error_code,
1207 target_ulong next_eip, int is_hw)
1208{
8fec2b8c 1209 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1210 if ((env->cr[0] & CR0_PE_MASK)) {
1211 static int count;
93fcfe39 1212 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
eaa728ee
FB
1213 count, intno, error_code, is_int,
1214 env->hflags & HF_CPL_MASK,
1215 env->segs[R_CS].selector, EIP,
1216 (int)env->segs[R_CS].base + EIP,
1217 env->segs[R_SS].selector, ESP);
1218 if (intno == 0x0e) {
93fcfe39 1219 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1220 } else {
93fcfe39 1221 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1222 }
93fcfe39
AL
1223 qemu_log("\n");
1224 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1225#if 0
1226 {
1227 int i;
1228 uint8_t *ptr;
93fcfe39 1229 qemu_log(" code=");
eaa728ee
FB
1230 ptr = env->segs[R_CS].base + env->eip;
1231 for(i = 0; i < 16; i++) {
93fcfe39 1232 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1233 }
93fcfe39 1234 qemu_log("\n");
eaa728ee
FB
1235 }
1236#endif
1237 count++;
1238 }
1239 }
1240 if (env->cr[0] & CR0_PE_MASK) {
eb38c52c 1241#ifdef TARGET_X86_64
eaa728ee
FB
1242 if (env->hflags & HF_LMA_MASK) {
1243 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1244 } else
1245#endif
1246 {
1247 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1248 }
1249 } else {
1250 do_interrupt_real(intno, is_int, error_code, next_eip);
1251 }
1252}
1253
f55761a0
AL
1254/* This should come from sysemu.h - if we could include it here... */
1255void qemu_system_reset_request(void);
1256
eaa728ee
FB
1257/*
1258 * Check nested exceptions and change to double or triple fault if
1259 * needed. It should only be called, if this is not an interrupt.
1260 * Returns the new exception number.
1261 */
1262static int check_exception(int intno, int *error_code)
1263{
1264 int first_contributory = env->old_exception == 0 ||
1265 (env->old_exception >= 10 &&
1266 env->old_exception <= 13);
1267 int second_contributory = intno == 0 ||
1268 (intno >= 10 && intno <= 13);
1269
93fcfe39 1270 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
eaa728ee
FB
1271 env->old_exception, intno);
1272
f55761a0
AL
1273#if !defined(CONFIG_USER_ONLY)
1274 if (env->old_exception == EXCP08_DBLE) {
1275 if (env->hflags & HF_SVMI_MASK)
1276 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1277
6e48a40d 1278 if (qemu_loglevel_mask(CPU_LOG_RESET))
f55761a0
AL
1279 fprintf(logfile, "Triple fault\n");
1280
1281 qemu_system_reset_request();
1282 return EXCP_HLT;
1283 }
1284#endif
eaa728ee
FB
1285
1286 if ((first_contributory && second_contributory)
1287 || (env->old_exception == EXCP0E_PAGE &&
1288 (second_contributory || (intno == EXCP0E_PAGE)))) {
1289 intno = EXCP08_DBLE;
1290 *error_code = 0;
1291 }
1292
1293 if (second_contributory || (intno == EXCP0E_PAGE) ||
1294 (intno == EXCP08_DBLE))
1295 env->old_exception = intno;
1296
1297 return intno;
1298}
1299
1300/*
1301 * Signal an interruption. It is executed in the main CPU loop.
1302 * is_int is TRUE if coming from the int instruction. next_eip is the
1303 * EIP value AFTER the interrupt instruction. It is only relevant if
1304 * is_int is TRUE.
1305 */
a5e50b26 1306static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1307 int next_eip_addend)
eaa728ee
FB
1308{
1309 if (!is_int) {
1310 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1311 intno = check_exception(intno, &error_code);
872929aa
FB
1312 } else {
1313 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1314 }
1315
1316 env->exception_index = intno;
1317 env->error_code = error_code;
1318 env->exception_is_int = is_int;
1319 env->exception_next_eip = env->eip + next_eip_addend;
1320 cpu_loop_exit();
1321}
1322
eaa728ee
FB
1323/* shortcuts to generate exceptions */
1324
d9957a8b 1325void raise_exception_err(int exception_index, int error_code)
eaa728ee
FB
1326{
1327 raise_interrupt(exception_index, 0, error_code, 0);
1328}
1329
1330void raise_exception(int exception_index)
1331{
1332 raise_interrupt(exception_index, 0, 0, 0);
1333}
1334
1335/* SMM support */
1336
1337#if defined(CONFIG_USER_ONLY)
1338
1339void do_smm_enter(void)
1340{
1341}
1342
1343void helper_rsm(void)
1344{
1345}
1346
1347#else
1348
1349#ifdef TARGET_X86_64
1350#define SMM_REVISION_ID 0x00020064
1351#else
1352#define SMM_REVISION_ID 0x00020000
1353#endif
1354
1355void do_smm_enter(void)
1356{
1357 target_ulong sm_state;
1358 SegmentCache *dt;
1359 int i, offset;
1360
93fcfe39
AL
1361 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1362 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1363
1364 env->hflags |= HF_SMM_MASK;
1365 cpu_smm_update(env);
1366
1367 sm_state = env->smbase + 0x8000;
1368
1369#ifdef TARGET_X86_64
1370 for(i = 0; i < 6; i++) {
1371 dt = &env->segs[i];
1372 offset = 0x7e00 + i * 16;
1373 stw_phys(sm_state + offset, dt->selector);
1374 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1375 stl_phys(sm_state + offset + 4, dt->limit);
1376 stq_phys(sm_state + offset + 8, dt->base);
1377 }
1378
1379 stq_phys(sm_state + 0x7e68, env->gdt.base);
1380 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1381
1382 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1383 stq_phys(sm_state + 0x7e78, env->ldt.base);
1384 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1385 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1386
1387 stq_phys(sm_state + 0x7e88, env->idt.base);
1388 stl_phys(sm_state + 0x7e84, env->idt.limit);
1389
1390 stw_phys(sm_state + 0x7e90, env->tr.selector);
1391 stq_phys(sm_state + 0x7e98, env->tr.base);
1392 stl_phys(sm_state + 0x7e94, env->tr.limit);
1393 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1394
1395 stq_phys(sm_state + 0x7ed0, env->efer);
1396
1397 stq_phys(sm_state + 0x7ff8, EAX);
1398 stq_phys(sm_state + 0x7ff0, ECX);
1399 stq_phys(sm_state + 0x7fe8, EDX);
1400 stq_phys(sm_state + 0x7fe0, EBX);
1401 stq_phys(sm_state + 0x7fd8, ESP);
1402 stq_phys(sm_state + 0x7fd0, EBP);
1403 stq_phys(sm_state + 0x7fc8, ESI);
1404 stq_phys(sm_state + 0x7fc0, EDI);
1405 for(i = 8; i < 16; i++)
1406 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1407 stq_phys(sm_state + 0x7f78, env->eip);
1408 stl_phys(sm_state + 0x7f70, compute_eflags());
1409 stl_phys(sm_state + 0x7f68, env->dr[6]);
1410 stl_phys(sm_state + 0x7f60, env->dr[7]);
1411
1412 stl_phys(sm_state + 0x7f48, env->cr[4]);
1413 stl_phys(sm_state + 0x7f50, env->cr[3]);
1414 stl_phys(sm_state + 0x7f58, env->cr[0]);
1415
1416 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1417 stl_phys(sm_state + 0x7f00, env->smbase);
1418#else
1419 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1420 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1421 stl_phys(sm_state + 0x7ff4, compute_eflags());
1422 stl_phys(sm_state + 0x7ff0, env->eip);
1423 stl_phys(sm_state + 0x7fec, EDI);
1424 stl_phys(sm_state + 0x7fe8, ESI);
1425 stl_phys(sm_state + 0x7fe4, EBP);
1426 stl_phys(sm_state + 0x7fe0, ESP);
1427 stl_phys(sm_state + 0x7fdc, EBX);
1428 stl_phys(sm_state + 0x7fd8, EDX);
1429 stl_phys(sm_state + 0x7fd4, ECX);
1430 stl_phys(sm_state + 0x7fd0, EAX);
1431 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1432 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1433
1434 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1435 stl_phys(sm_state + 0x7f64, env->tr.base);
1436 stl_phys(sm_state + 0x7f60, env->tr.limit);
1437 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1438
1439 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1440 stl_phys(sm_state + 0x7f80, env->ldt.base);
1441 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1442 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1443
1444 stl_phys(sm_state + 0x7f74, env->gdt.base);
1445 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1446
1447 stl_phys(sm_state + 0x7f58, env->idt.base);
1448 stl_phys(sm_state + 0x7f54, env->idt.limit);
1449
1450 for(i = 0; i < 6; i++) {
1451 dt = &env->segs[i];
1452 if (i < 3)
1453 offset = 0x7f84 + i * 12;
1454 else
1455 offset = 0x7f2c + (i - 3) * 12;
1456 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1457 stl_phys(sm_state + offset + 8, dt->base);
1458 stl_phys(sm_state + offset + 4, dt->limit);
1459 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1460 }
1461 stl_phys(sm_state + 0x7f14, env->cr[4]);
1462
1463 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1464 stl_phys(sm_state + 0x7ef8, env->smbase);
1465#endif
1466 /* init SMM cpu state */
1467
1468#ifdef TARGET_X86_64
5efc27bb 1469 cpu_load_efer(env, 0);
eaa728ee
FB
1470#endif
1471 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1472 env->eip = 0x00008000;
1473 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1474 0xffffffff, 0);
1475 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1476 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1477 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1478 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1479 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1480
1481 cpu_x86_update_cr0(env,
1482 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1483 cpu_x86_update_cr4(env, 0);
1484 env->dr[7] = 0x00000400;
1485 CC_OP = CC_OP_EFLAGS;
1486}
1487
1488void helper_rsm(void)
1489{
1490 target_ulong sm_state;
1491 int i, offset;
1492 uint32_t val;
1493
1494 sm_state = env->smbase + 0x8000;
1495#ifdef TARGET_X86_64
5efc27bb 1496 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1497
1498 for(i = 0; i < 6; i++) {
1499 offset = 0x7e00 + i * 16;
1500 cpu_x86_load_seg_cache(env, i,
1501 lduw_phys(sm_state + offset),
1502 ldq_phys(sm_state + offset + 8),
1503 ldl_phys(sm_state + offset + 4),
1504 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1505 }
1506
1507 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1508 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1509
1510 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1511 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1512 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1513 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1514
1515 env->idt.base = ldq_phys(sm_state + 0x7e88);
1516 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1517
1518 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1519 env->tr.base = ldq_phys(sm_state + 0x7e98);
1520 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1521 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1522
1523 EAX = ldq_phys(sm_state + 0x7ff8);
1524 ECX = ldq_phys(sm_state + 0x7ff0);
1525 EDX = ldq_phys(sm_state + 0x7fe8);
1526 EBX = ldq_phys(sm_state + 0x7fe0);
1527 ESP = ldq_phys(sm_state + 0x7fd8);
1528 EBP = ldq_phys(sm_state + 0x7fd0);
1529 ESI = ldq_phys(sm_state + 0x7fc8);
1530 EDI = ldq_phys(sm_state + 0x7fc0);
1531 for(i = 8; i < 16; i++)
1532 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1533 env->eip = ldq_phys(sm_state + 0x7f78);
1534 load_eflags(ldl_phys(sm_state + 0x7f70),
1535 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1536 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1537 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1538
1539 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1540 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1541 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1542
1543 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1544 if (val & 0x20000) {
1545 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1546 }
1547#else
1548 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1549 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1550 load_eflags(ldl_phys(sm_state + 0x7ff4),
1551 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1552 env->eip = ldl_phys(sm_state + 0x7ff0);
1553 EDI = ldl_phys(sm_state + 0x7fec);
1554 ESI = ldl_phys(sm_state + 0x7fe8);
1555 EBP = ldl_phys(sm_state + 0x7fe4);
1556 ESP = ldl_phys(sm_state + 0x7fe0);
1557 EBX = ldl_phys(sm_state + 0x7fdc);
1558 EDX = ldl_phys(sm_state + 0x7fd8);
1559 ECX = ldl_phys(sm_state + 0x7fd4);
1560 EAX = ldl_phys(sm_state + 0x7fd0);
1561 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1562 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1563
1564 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1565 env->tr.base = ldl_phys(sm_state + 0x7f64);
1566 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1567 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1568
1569 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1570 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1571 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1572 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1573
1574 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1575 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1576
1577 env->idt.base = ldl_phys(sm_state + 0x7f58);
1578 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1579
1580 for(i = 0; i < 6; i++) {
1581 if (i < 3)
1582 offset = 0x7f84 + i * 12;
1583 else
1584 offset = 0x7f2c + (i - 3) * 12;
1585 cpu_x86_load_seg_cache(env, i,
1586 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1587 ldl_phys(sm_state + offset + 8),
1588 ldl_phys(sm_state + offset + 4),
1589 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1590 }
1591 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1592
1593 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1594 if (val & 0x20000) {
1595 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1596 }
1597#endif
1598 CC_OP = CC_OP_EFLAGS;
1599 env->hflags &= ~HF_SMM_MASK;
1600 cpu_smm_update(env);
1601
93fcfe39
AL
1602 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1603 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1604}
1605
1606#endif /* !CONFIG_USER_ONLY */
1607
1608
1609/* division, flags are undefined */
1610
1611void helper_divb_AL(target_ulong t0)
1612{
1613 unsigned int num, den, q, r;
1614
1615 num = (EAX & 0xffff);
1616 den = (t0 & 0xff);
1617 if (den == 0) {
1618 raise_exception(EXCP00_DIVZ);
1619 }
1620 q = (num / den);
1621 if (q > 0xff)
1622 raise_exception(EXCP00_DIVZ);
1623 q &= 0xff;
1624 r = (num % den) & 0xff;
1625 EAX = (EAX & ~0xffff) | (r << 8) | q;
1626}
1627
1628void helper_idivb_AL(target_ulong t0)
1629{
1630 int num, den, q, r;
1631
1632 num = (int16_t)EAX;
1633 den = (int8_t)t0;
1634 if (den == 0) {
1635 raise_exception(EXCP00_DIVZ);
1636 }
1637 q = (num / den);
1638 if (q != (int8_t)q)
1639 raise_exception(EXCP00_DIVZ);
1640 q &= 0xff;
1641 r = (num % den) & 0xff;
1642 EAX = (EAX & ~0xffff) | (r << 8) | q;
1643}
1644
1645void helper_divw_AX(target_ulong t0)
1646{
1647 unsigned int num, den, q, r;
1648
1649 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1650 den = (t0 & 0xffff);
1651 if (den == 0) {
1652 raise_exception(EXCP00_DIVZ);
1653 }
1654 q = (num / den);
1655 if (q > 0xffff)
1656 raise_exception(EXCP00_DIVZ);
1657 q &= 0xffff;
1658 r = (num % den) & 0xffff;
1659 EAX = (EAX & ~0xffff) | q;
1660 EDX = (EDX & ~0xffff) | r;
1661}
1662
1663void helper_idivw_AX(target_ulong t0)
1664{
1665 int num, den, q, r;
1666
1667 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1668 den = (int16_t)t0;
1669 if (den == 0) {
1670 raise_exception(EXCP00_DIVZ);
1671 }
1672 q = (num / den);
1673 if (q != (int16_t)q)
1674 raise_exception(EXCP00_DIVZ);
1675 q &= 0xffff;
1676 r = (num % den) & 0xffff;
1677 EAX = (EAX & ~0xffff) | q;
1678 EDX = (EDX & ~0xffff) | r;
1679}
1680
1681void helper_divl_EAX(target_ulong t0)
1682{
1683 unsigned int den, r;
1684 uint64_t num, q;
1685
1686 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1687 den = t0;
1688 if (den == 0) {
1689 raise_exception(EXCP00_DIVZ);
1690 }
1691 q = (num / den);
1692 r = (num % den);
1693 if (q > 0xffffffff)
1694 raise_exception(EXCP00_DIVZ);
1695 EAX = (uint32_t)q;
1696 EDX = (uint32_t)r;
1697}
1698
1699void helper_idivl_EAX(target_ulong t0)
1700{
1701 int den, r;
1702 int64_t num, q;
1703
1704 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1705 den = t0;
1706 if (den == 0) {
1707 raise_exception(EXCP00_DIVZ);
1708 }
1709 q = (num / den);
1710 r = (num % den);
1711 if (q != (int32_t)q)
1712 raise_exception(EXCP00_DIVZ);
1713 EAX = (uint32_t)q;
1714 EDX = (uint32_t)r;
1715}
1716
1717/* bcd */
1718
1719/* XXX: exception */
1720void helper_aam(int base)
1721{
1722 int al, ah;
1723 al = EAX & 0xff;
1724 ah = al / base;
1725 al = al % base;
1726 EAX = (EAX & ~0xffff) | al | (ah << 8);
1727 CC_DST = al;
1728}
1729
1730void helper_aad(int base)
1731{
1732 int al, ah;
1733 al = EAX & 0xff;
1734 ah = (EAX >> 8) & 0xff;
1735 al = ((ah * base) + al) & 0xff;
1736 EAX = (EAX & ~0xffff) | al;
1737 CC_DST = al;
1738}
1739
1740void helper_aaa(void)
1741{
1742 int icarry;
1743 int al, ah, af;
1744 int eflags;
1745
a7812ae4 1746 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1747 af = eflags & CC_A;
1748 al = EAX & 0xff;
1749 ah = (EAX >> 8) & 0xff;
1750
1751 icarry = (al > 0xf9);
1752 if (((al & 0x0f) > 9 ) || af) {
1753 al = (al + 6) & 0x0f;
1754 ah = (ah + 1 + icarry) & 0xff;
1755 eflags |= CC_C | CC_A;
1756 } else {
1757 eflags &= ~(CC_C | CC_A);
1758 al &= 0x0f;
1759 }
1760 EAX = (EAX & ~0xffff) | al | (ah << 8);
1761 CC_SRC = eflags;
eaa728ee
FB
1762}
1763
1764void helper_aas(void)
1765{
1766 int icarry;
1767 int al, ah, af;
1768 int eflags;
1769
a7812ae4 1770 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1771 af = eflags & CC_A;
1772 al = EAX & 0xff;
1773 ah = (EAX >> 8) & 0xff;
1774
1775 icarry = (al < 6);
1776 if (((al & 0x0f) > 9 ) || af) {
1777 al = (al - 6) & 0x0f;
1778 ah = (ah - 1 - icarry) & 0xff;
1779 eflags |= CC_C | CC_A;
1780 } else {
1781 eflags &= ~(CC_C | CC_A);
1782 al &= 0x0f;
1783 }
1784 EAX = (EAX & ~0xffff) | al | (ah << 8);
1785 CC_SRC = eflags;
eaa728ee
FB
1786}
1787
1788void helper_daa(void)
1789{
1790 int al, af, cf;
1791 int eflags;
1792
a7812ae4 1793 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1794 cf = eflags & CC_C;
1795 af = eflags & CC_A;
1796 al = EAX & 0xff;
1797
1798 eflags = 0;
1799 if (((al & 0x0f) > 9 ) || af) {
1800 al = (al + 6) & 0xff;
1801 eflags |= CC_A;
1802 }
1803 if ((al > 0x9f) || cf) {
1804 al = (al + 0x60) & 0xff;
1805 eflags |= CC_C;
1806 }
1807 EAX = (EAX & ~0xff) | al;
1808 /* well, speed is not an issue here, so we compute the flags by hand */
1809 eflags |= (al == 0) << 6; /* zf */
1810 eflags |= parity_table[al]; /* pf */
1811 eflags |= (al & 0x80); /* sf */
1812 CC_SRC = eflags;
eaa728ee
FB
1813}
1814
1815void helper_das(void)
1816{
1817 int al, al1, af, cf;
1818 int eflags;
1819
a7812ae4 1820 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1821 cf = eflags & CC_C;
1822 af = eflags & CC_A;
1823 al = EAX & 0xff;
1824
1825 eflags = 0;
1826 al1 = al;
1827 if (((al & 0x0f) > 9 ) || af) {
1828 eflags |= CC_A;
1829 if (al < 6 || cf)
1830 eflags |= CC_C;
1831 al = (al - 6) & 0xff;
1832 }
1833 if ((al1 > 0x99) || cf) {
1834 al = (al - 0x60) & 0xff;
1835 eflags |= CC_C;
1836 }
1837 EAX = (EAX & ~0xff) | al;
1838 /* well, speed is not an issue here, so we compute the flags by hand */
1839 eflags |= (al == 0) << 6; /* zf */
1840 eflags |= parity_table[al]; /* pf */
1841 eflags |= (al & 0x80); /* sf */
1842 CC_SRC = eflags;
eaa728ee
FB
1843}
1844
1845void helper_into(int next_eip_addend)
1846{
1847 int eflags;
a7812ae4 1848 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1849 if (eflags & CC_O) {
1850 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1851 }
1852}
1853
1854void helper_cmpxchg8b(target_ulong a0)
1855{
1856 uint64_t d;
1857 int eflags;
1858
a7812ae4 1859 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1860 d = ldq(a0);
1861 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1862 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1863 eflags |= CC_Z;
1864 } else {
278ed7c3
FB
1865 /* always do the store */
1866 stq(a0, d);
eaa728ee
FB
1867 EDX = (uint32_t)(d >> 32);
1868 EAX = (uint32_t)d;
1869 eflags &= ~CC_Z;
1870 }
1871 CC_SRC = eflags;
1872}
1873
1874#ifdef TARGET_X86_64
1875void helper_cmpxchg16b(target_ulong a0)
1876{
1877 uint64_t d0, d1;
1878 int eflags;
1879
278ed7c3
FB
1880 if ((a0 & 0xf) != 0)
1881 raise_exception(EXCP0D_GPF);
a7812ae4 1882 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1883 d0 = ldq(a0);
1884 d1 = ldq(a0 + 8);
1885 if (d0 == EAX && d1 == EDX) {
1886 stq(a0, EBX);
1887 stq(a0 + 8, ECX);
1888 eflags |= CC_Z;
1889 } else {
278ed7c3
FB
1890 /* always do the store */
1891 stq(a0, d0);
1892 stq(a0 + 8, d1);
eaa728ee
FB
1893 EDX = d1;
1894 EAX = d0;
1895 eflags &= ~CC_Z;
1896 }
1897 CC_SRC = eflags;
1898}
1899#endif
1900
1901void helper_single_step(void)
1902{
01df040b
AL
1903#ifndef CONFIG_USER_ONLY
1904 check_hw_breakpoints(env, 1);
1905 env->dr[6] |= DR6_BS;
1906#endif
1907 raise_exception(EXCP01_DB);
eaa728ee
FB
1908}
1909
1910void helper_cpuid(void)
1911{
6fd805e1 1912 uint32_t eax, ebx, ecx, edx;
eaa728ee 1913
872929aa 1914 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 1915
6fd805e1
AL
1916 cpu_x86_cpuid(env, (uint32_t)EAX, &eax, &ebx, &ecx, &edx);
1917 EAX = eax;
1918 EBX = ebx;
1919 ECX = ecx;
1920 EDX = edx;
eaa728ee
FB
1921}
1922
1923void helper_enter_level(int level, int data32, target_ulong t1)
1924{
1925 target_ulong ssp;
1926 uint32_t esp_mask, esp, ebp;
1927
1928 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1929 ssp = env->segs[R_SS].base;
1930 ebp = EBP;
1931 esp = ESP;
1932 if (data32) {
1933 /* 32 bit */
1934 esp -= 4;
1935 while (--level) {
1936 esp -= 4;
1937 ebp -= 4;
1938 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1939 }
1940 esp -= 4;
1941 stl(ssp + (esp & esp_mask), t1);
1942 } else {
1943 /* 16 bit */
1944 esp -= 2;
1945 while (--level) {
1946 esp -= 2;
1947 ebp -= 2;
1948 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1949 }
1950 esp -= 2;
1951 stw(ssp + (esp & esp_mask), t1);
1952 }
1953}
1954
1955#ifdef TARGET_X86_64
1956void helper_enter64_level(int level, int data64, target_ulong t1)
1957{
1958 target_ulong esp, ebp;
1959 ebp = EBP;
1960 esp = ESP;
1961
1962 if (data64) {
1963 /* 64 bit */
1964 esp -= 8;
1965 while (--level) {
1966 esp -= 8;
1967 ebp -= 8;
1968 stq(esp, ldq(ebp));
1969 }
1970 esp -= 8;
1971 stq(esp, t1);
1972 } else {
1973 /* 16 bit */
1974 esp -= 2;
1975 while (--level) {
1976 esp -= 2;
1977 ebp -= 2;
1978 stw(esp, lduw(ebp));
1979 }
1980 esp -= 2;
1981 stw(esp, t1);
1982 }
1983}
1984#endif
1985
1986void helper_lldt(int selector)
1987{
1988 SegmentCache *dt;
1989 uint32_t e1, e2;
1990 int index, entry_limit;
1991 target_ulong ptr;
1992
1993 selector &= 0xffff;
1994 if ((selector & 0xfffc) == 0) {
1995 /* XXX: NULL selector case: invalid LDT */
1996 env->ldt.base = 0;
1997 env->ldt.limit = 0;
1998 } else {
1999 if (selector & 0x4)
2000 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2001 dt = &env->gdt;
2002 index = selector & ~7;
2003#ifdef TARGET_X86_64
2004 if (env->hflags & HF_LMA_MASK)
2005 entry_limit = 15;
2006 else
2007#endif
2008 entry_limit = 7;
2009 if ((index + entry_limit) > dt->limit)
2010 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2011 ptr = dt->base + index;
2012 e1 = ldl_kernel(ptr);
2013 e2 = ldl_kernel(ptr + 4);
2014 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2015 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2016 if (!(e2 & DESC_P_MASK))
2017 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2018#ifdef TARGET_X86_64
2019 if (env->hflags & HF_LMA_MASK) {
2020 uint32_t e3;
2021 e3 = ldl_kernel(ptr + 8);
2022 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2023 env->ldt.base |= (target_ulong)e3 << 32;
2024 } else
2025#endif
2026 {
2027 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2028 }
2029 }
2030 env->ldt.selector = selector;
2031}
2032
2033void helper_ltr(int selector)
2034{
2035 SegmentCache *dt;
2036 uint32_t e1, e2;
2037 int index, type, entry_limit;
2038 target_ulong ptr;
2039
2040 selector &= 0xffff;
2041 if ((selector & 0xfffc) == 0) {
2042 /* NULL selector case: invalid TR */
2043 env->tr.base = 0;
2044 env->tr.limit = 0;
2045 env->tr.flags = 0;
2046 } else {
2047 if (selector & 0x4)
2048 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2049 dt = &env->gdt;
2050 index = selector & ~7;
2051#ifdef TARGET_X86_64
2052 if (env->hflags & HF_LMA_MASK)
2053 entry_limit = 15;
2054 else
2055#endif
2056 entry_limit = 7;
2057 if ((index + entry_limit) > dt->limit)
2058 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2059 ptr = dt->base + index;
2060 e1 = ldl_kernel(ptr);
2061 e2 = ldl_kernel(ptr + 4);
2062 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2063 if ((e2 & DESC_S_MASK) ||
2064 (type != 1 && type != 9))
2065 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2066 if (!(e2 & DESC_P_MASK))
2067 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2068#ifdef TARGET_X86_64
2069 if (env->hflags & HF_LMA_MASK) {
2070 uint32_t e3, e4;
2071 e3 = ldl_kernel(ptr + 8);
2072 e4 = ldl_kernel(ptr + 12);
2073 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2074 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2075 load_seg_cache_raw_dt(&env->tr, e1, e2);
2076 env->tr.base |= (target_ulong)e3 << 32;
2077 } else
2078#endif
2079 {
2080 load_seg_cache_raw_dt(&env->tr, e1, e2);
2081 }
2082 e2 |= DESC_TSS_BUSY_MASK;
2083 stl_kernel(ptr + 4, e2);
2084 }
2085 env->tr.selector = selector;
2086}
2087
2088/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2089void helper_load_seg(int seg_reg, int selector)
2090{
2091 uint32_t e1, e2;
2092 int cpl, dpl, rpl;
2093 SegmentCache *dt;
2094 int index;
2095 target_ulong ptr;
2096
2097 selector &= 0xffff;
2098 cpl = env->hflags & HF_CPL_MASK;
2099 if ((selector & 0xfffc) == 0) {
2100 /* null selector case */
2101 if (seg_reg == R_SS
2102#ifdef TARGET_X86_64
2103 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2104#endif
2105 )
2106 raise_exception_err(EXCP0D_GPF, 0);
2107 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2108 } else {
2109
2110 if (selector & 0x4)
2111 dt = &env->ldt;
2112 else
2113 dt = &env->gdt;
2114 index = selector & ~7;
2115 if ((index + 7) > dt->limit)
2116 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2117 ptr = dt->base + index;
2118 e1 = ldl_kernel(ptr);
2119 e2 = ldl_kernel(ptr + 4);
2120
2121 if (!(e2 & DESC_S_MASK))
2122 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2123 rpl = selector & 3;
2124 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2125 if (seg_reg == R_SS) {
2126 /* must be writable segment */
2127 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2128 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2129 if (rpl != cpl || dpl != cpl)
2130 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2131 } else {
2132 /* must be readable segment */
2133 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2134 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2135
2136 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2137 /* if not conforming code, test rights */
2138 if (dpl < cpl || dpl < rpl)
2139 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2140 }
2141 }
2142
2143 if (!(e2 & DESC_P_MASK)) {
2144 if (seg_reg == R_SS)
2145 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2146 else
2147 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2148 }
2149
2150 /* set the access bit if not already set */
2151 if (!(e2 & DESC_A_MASK)) {
2152 e2 |= DESC_A_MASK;
2153 stl_kernel(ptr + 4, e2);
2154 }
2155
2156 cpu_x86_load_seg_cache(env, seg_reg, selector,
2157 get_seg_base(e1, e2),
2158 get_seg_limit(e1, e2),
2159 e2);
2160#if 0
93fcfe39 2161 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2162 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2163#endif
2164 }
2165}
2166
2167/* protected mode jump */
2168void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2169 int next_eip_addend)
2170{
2171 int gate_cs, type;
2172 uint32_t e1, e2, cpl, dpl, rpl, limit;
2173 target_ulong next_eip;
2174
2175 if ((new_cs & 0xfffc) == 0)
2176 raise_exception_err(EXCP0D_GPF, 0);
2177 if (load_segment(&e1, &e2, new_cs) != 0)
2178 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2179 cpl = env->hflags & HF_CPL_MASK;
2180 if (e2 & DESC_S_MASK) {
2181 if (!(e2 & DESC_CS_MASK))
2182 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2183 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2184 if (e2 & DESC_C_MASK) {
2185 /* conforming code segment */
2186 if (dpl > cpl)
2187 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2188 } else {
2189 /* non conforming code segment */
2190 rpl = new_cs & 3;
2191 if (rpl > cpl)
2192 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2193 if (dpl != cpl)
2194 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2195 }
2196 if (!(e2 & DESC_P_MASK))
2197 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2198 limit = get_seg_limit(e1, e2);
2199 if (new_eip > limit &&
2200 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2201 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2202 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2203 get_seg_base(e1, e2), limit, e2);
2204 EIP = new_eip;
2205 } else {
2206 /* jump to call or task gate */
2207 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2208 rpl = new_cs & 3;
2209 cpl = env->hflags & HF_CPL_MASK;
2210 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2211 switch(type) {
2212 case 1: /* 286 TSS */
2213 case 9: /* 386 TSS */
2214 case 5: /* task gate */
2215 if (dpl < cpl || dpl < rpl)
2216 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217 next_eip = env->eip + next_eip_addend;
2218 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2219 CC_OP = CC_OP_EFLAGS;
2220 break;
2221 case 4: /* 286 call gate */
2222 case 12: /* 386 call gate */
2223 if ((dpl < cpl) || (dpl < rpl))
2224 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2225 if (!(e2 & DESC_P_MASK))
2226 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2227 gate_cs = e1 >> 16;
2228 new_eip = (e1 & 0xffff);
2229 if (type == 12)
2230 new_eip |= (e2 & 0xffff0000);
2231 if (load_segment(&e1, &e2, gate_cs) != 0)
2232 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2233 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2234 /* must be code segment */
2235 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2236 (DESC_S_MASK | DESC_CS_MASK)))
2237 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2238 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2239 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2240 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2241 if (!(e2 & DESC_P_MASK))
2242 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2243 limit = get_seg_limit(e1, e2);
2244 if (new_eip > limit)
2245 raise_exception_err(EXCP0D_GPF, 0);
2246 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2247 get_seg_base(e1, e2), limit, e2);
2248 EIP = new_eip;
2249 break;
2250 default:
2251 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2252 break;
2253 }
2254 }
2255}
2256
2257/* real mode call */
2258void helper_lcall_real(int new_cs, target_ulong new_eip1,
2259 int shift, int next_eip)
2260{
2261 int new_eip;
2262 uint32_t esp, esp_mask;
2263 target_ulong ssp;
2264
2265 new_eip = new_eip1;
2266 esp = ESP;
2267 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2268 ssp = env->segs[R_SS].base;
2269 if (shift) {
2270 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2271 PUSHL(ssp, esp, esp_mask, next_eip);
2272 } else {
2273 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2274 PUSHW(ssp, esp, esp_mask, next_eip);
2275 }
2276
2277 SET_ESP(esp, esp_mask);
2278 env->eip = new_eip;
2279 env->segs[R_CS].selector = new_cs;
2280 env->segs[R_CS].base = (new_cs << 4);
2281}
2282
2283/* protected mode call */
2284void helper_lcall_protected(int new_cs, target_ulong new_eip,
2285 int shift, int next_eip_addend)
2286{
2287 int new_stack, i;
2288 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2289 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2290 uint32_t val, limit, old_sp_mask;
2291 target_ulong ssp, old_ssp, next_eip;
2292
2293 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2294 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2295 LOG_PCALL_STATE(env);
eaa728ee
FB
2296 if ((new_cs & 0xfffc) == 0)
2297 raise_exception_err(EXCP0D_GPF, 0);
2298 if (load_segment(&e1, &e2, new_cs) != 0)
2299 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2300 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2301 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee
FB
2302 if (e2 & DESC_S_MASK) {
2303 if (!(e2 & DESC_CS_MASK))
2304 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2305 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2306 if (e2 & DESC_C_MASK) {
2307 /* conforming code segment */
2308 if (dpl > cpl)
2309 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2310 } else {
2311 /* non conforming code segment */
2312 rpl = new_cs & 3;
2313 if (rpl > cpl)
2314 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2315 if (dpl != cpl)
2316 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2317 }
2318 if (!(e2 & DESC_P_MASK))
2319 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2320
2321#ifdef TARGET_X86_64
2322 /* XXX: check 16/32 bit cases in long mode */
2323 if (shift == 2) {
2324 target_ulong rsp;
2325 /* 64 bit case */
2326 rsp = ESP;
2327 PUSHQ(rsp, env->segs[R_CS].selector);
2328 PUSHQ(rsp, next_eip);
2329 /* from this point, not restartable */
2330 ESP = rsp;
2331 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2332 get_seg_base(e1, e2),
2333 get_seg_limit(e1, e2), e2);
2334 EIP = new_eip;
2335 } else
2336#endif
2337 {
2338 sp = ESP;
2339 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2340 ssp = env->segs[R_SS].base;
2341 if (shift) {
2342 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2343 PUSHL(ssp, sp, sp_mask, next_eip);
2344 } else {
2345 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2346 PUSHW(ssp, sp, sp_mask, next_eip);
2347 }
2348
2349 limit = get_seg_limit(e1, e2);
2350 if (new_eip > limit)
2351 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2352 /* from this point, not restartable */
2353 SET_ESP(sp, sp_mask);
2354 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2355 get_seg_base(e1, e2), limit, e2);
2356 EIP = new_eip;
2357 }
2358 } else {
2359 /* check gate type */
2360 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2361 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2362 rpl = new_cs & 3;
2363 switch(type) {
2364 case 1: /* available 286 TSS */
2365 case 9: /* available 386 TSS */
2366 case 5: /* task gate */
2367 if (dpl < cpl || dpl < rpl)
2368 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2369 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2370 CC_OP = CC_OP_EFLAGS;
2371 return;
2372 case 4: /* 286 call gate */
2373 case 12: /* 386 call gate */
2374 break;
2375 default:
2376 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2377 break;
2378 }
2379 shift = type >> 3;
2380
2381 if (dpl < cpl || dpl < rpl)
2382 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2383 /* check valid bit */
2384 if (!(e2 & DESC_P_MASK))
2385 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2386 selector = e1 >> 16;
2387 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2388 param_count = e2 & 0x1f;
2389 if ((selector & 0xfffc) == 0)
2390 raise_exception_err(EXCP0D_GPF, 0);
2391
2392 if (load_segment(&e1, &e2, selector) != 0)
2393 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2394 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2395 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2396 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2397 if (dpl > cpl)
2398 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2399 if (!(e2 & DESC_P_MASK))
2400 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2401
2402 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2403 /* to inner privilege */
2404 get_ss_esp_from_tss(&ss, &sp, dpl);
d12d51d5 2405 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
eaa728ee 2406 ss, sp, param_count, ESP);
eaa728ee
FB
2407 if ((ss & 0xfffc) == 0)
2408 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2409 if ((ss & 3) != dpl)
2410 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2411 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2412 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2413 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2414 if (ss_dpl != dpl)
2415 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2416 if (!(ss_e2 & DESC_S_MASK) ||
2417 (ss_e2 & DESC_CS_MASK) ||
2418 !(ss_e2 & DESC_W_MASK))
2419 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2420 if (!(ss_e2 & DESC_P_MASK))
2421 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2422
2423 // push_size = ((param_count * 2) + 8) << shift;
2424
2425 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2426 old_ssp = env->segs[R_SS].base;
2427
2428 sp_mask = get_sp_mask(ss_e2);
2429 ssp = get_seg_base(ss_e1, ss_e2);
2430 if (shift) {
2431 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2432 PUSHL(ssp, sp, sp_mask, ESP);
2433 for(i = param_count - 1; i >= 0; i--) {
2434 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2435 PUSHL(ssp, sp, sp_mask, val);
2436 }
2437 } else {
2438 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2439 PUSHW(ssp, sp, sp_mask, ESP);
2440 for(i = param_count - 1; i >= 0; i--) {
2441 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2442 PUSHW(ssp, sp, sp_mask, val);
2443 }
2444 }
2445 new_stack = 1;
2446 } else {
2447 /* to same privilege */
2448 sp = ESP;
2449 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2450 ssp = env->segs[R_SS].base;
2451 // push_size = (4 << shift);
2452 new_stack = 0;
2453 }
2454
2455 if (shift) {
2456 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2457 PUSHL(ssp, sp, sp_mask, next_eip);
2458 } else {
2459 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2460 PUSHW(ssp, sp, sp_mask, next_eip);
2461 }
2462
2463 /* from this point, not restartable */
2464
2465 if (new_stack) {
2466 ss = (ss & ~3) | dpl;
2467 cpu_x86_load_seg_cache(env, R_SS, ss,
2468 ssp,
2469 get_seg_limit(ss_e1, ss_e2),
2470 ss_e2);
2471 }
2472
2473 selector = (selector & ~3) | dpl;
2474 cpu_x86_load_seg_cache(env, R_CS, selector,
2475 get_seg_base(e1, e2),
2476 get_seg_limit(e1, e2),
2477 e2);
2478 cpu_x86_set_cpl(env, dpl);
2479 SET_ESP(sp, sp_mask);
2480 EIP = offset;
2481 }
2482#ifdef USE_KQEMU
2483 if (kqemu_is_ok(env)) {
2484 env->exception_index = -1;
2485 cpu_loop_exit();
2486 }
2487#endif
2488}
2489
2490/* real and vm86 mode iret */
2491void helper_iret_real(int shift)
2492{
2493 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2494 target_ulong ssp;
2495 int eflags_mask;
2496
2497 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2498 sp = ESP;
2499 ssp = env->segs[R_SS].base;
2500 if (shift == 1) {
2501 /* 32 bits */
2502 POPL(ssp, sp, sp_mask, new_eip);
2503 POPL(ssp, sp, sp_mask, new_cs);
2504 new_cs &= 0xffff;
2505 POPL(ssp, sp, sp_mask, new_eflags);
2506 } else {
2507 /* 16 bits */
2508 POPW(ssp, sp, sp_mask, new_eip);
2509 POPW(ssp, sp, sp_mask, new_cs);
2510 POPW(ssp, sp, sp_mask, new_eflags);
2511 }
2512 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2513 env->segs[R_CS].selector = new_cs;
2514 env->segs[R_CS].base = (new_cs << 4);
eaa728ee
FB
2515 env->eip = new_eip;
2516 if (env->eflags & VM_MASK)
2517 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2518 else
2519 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2520 if (shift == 0)
2521 eflags_mask &= 0xffff;
2522 load_eflags(new_eflags, eflags_mask);
db620f46 2523 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2524}
2525
2526static inline void validate_seg(int seg_reg, int cpl)
2527{
2528 int dpl;
2529 uint32_t e2;
2530
2531 /* XXX: on x86_64, we do not want to nullify FS and GS because
2532 they may still contain a valid base. I would be interested to
2533 know how a real x86_64 CPU behaves */
2534 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2535 (env->segs[seg_reg].selector & 0xfffc) == 0)
2536 return;
2537
2538 e2 = env->segs[seg_reg].flags;
2539 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2540 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2541 /* data or non conforming code segment */
2542 if (dpl < cpl) {
2543 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2544 }
2545 }
2546}
2547
2548/* protected mode iret */
2549static inline void helper_ret_protected(int shift, int is_iret, int addend)
2550{
2551 uint32_t new_cs, new_eflags, new_ss;
2552 uint32_t new_es, new_ds, new_fs, new_gs;
2553 uint32_t e1, e2, ss_e1, ss_e2;
2554 int cpl, dpl, rpl, eflags_mask, iopl;
2555 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2556
2557#ifdef TARGET_X86_64
2558 if (shift == 2)
2559 sp_mask = -1;
2560 else
2561#endif
2562 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2563 sp = ESP;
2564 ssp = env->segs[R_SS].base;
2565 new_eflags = 0; /* avoid warning */
2566#ifdef TARGET_X86_64
2567 if (shift == 2) {
2568 POPQ(sp, new_eip);
2569 POPQ(sp, new_cs);
2570 new_cs &= 0xffff;
2571 if (is_iret) {
2572 POPQ(sp, new_eflags);
2573 }
2574 } else
2575#endif
2576 if (shift == 1) {
2577 /* 32 bits */
2578 POPL(ssp, sp, sp_mask, new_eip);
2579 POPL(ssp, sp, sp_mask, new_cs);
2580 new_cs &= 0xffff;
2581 if (is_iret) {
2582 POPL(ssp, sp, sp_mask, new_eflags);
2583 if (new_eflags & VM_MASK)
2584 goto return_to_vm86;
2585 }
2586 } else {
2587 /* 16 bits */
2588 POPW(ssp, sp, sp_mask, new_eip);
2589 POPW(ssp, sp, sp_mask, new_cs);
2590 if (is_iret)
2591 POPW(ssp, sp, sp_mask, new_eflags);
2592 }
d12d51d5
AL
2593 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2594 new_cs, new_eip, shift, addend);
2595 LOG_PCALL_STATE(env);
eaa728ee
FB
2596 if ((new_cs & 0xfffc) == 0)
2597 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2598 if (load_segment(&e1, &e2, new_cs) != 0)
2599 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2600 if (!(e2 & DESC_S_MASK) ||
2601 !(e2 & DESC_CS_MASK))
2602 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2603 cpl = env->hflags & HF_CPL_MASK;
2604 rpl = new_cs & 3;
2605 if (rpl < cpl)
2606 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2607 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2608 if (e2 & DESC_C_MASK) {
2609 if (dpl > rpl)
2610 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2611 } else {
2612 if (dpl != rpl)
2613 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2614 }
2615 if (!(e2 & DESC_P_MASK))
2616 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2617
2618 sp += addend;
2619 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2620 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2621 /* return to same privilege level */
eaa728ee
FB
2622 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2623 get_seg_base(e1, e2),
2624 get_seg_limit(e1, e2),
2625 e2);
2626 } else {
2627 /* return to different privilege level */
2628#ifdef TARGET_X86_64
2629 if (shift == 2) {
2630 POPQ(sp, new_esp);
2631 POPQ(sp, new_ss);
2632 new_ss &= 0xffff;
2633 } else
2634#endif
2635 if (shift == 1) {
2636 /* 32 bits */
2637 POPL(ssp, sp, sp_mask, new_esp);
2638 POPL(ssp, sp, sp_mask, new_ss);
2639 new_ss &= 0xffff;
2640 } else {
2641 /* 16 bits */
2642 POPW(ssp, sp, sp_mask, new_esp);
2643 POPW(ssp, sp, sp_mask, new_ss);
2644 }
d12d51d5 2645 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
eaa728ee 2646 new_ss, new_esp);
eaa728ee
FB
2647 if ((new_ss & 0xfffc) == 0) {
2648#ifdef TARGET_X86_64
2649 /* NULL ss is allowed in long mode if cpl != 3*/
2650 /* XXX: test CS64 ? */
2651 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2652 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2653 0, 0xffffffff,
2654 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2655 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2656 DESC_W_MASK | DESC_A_MASK);
2657 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2658 } else
2659#endif
2660 {
2661 raise_exception_err(EXCP0D_GPF, 0);
2662 }
2663 } else {
2664 if ((new_ss & 3) != rpl)
2665 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2666 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2667 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2668 if (!(ss_e2 & DESC_S_MASK) ||
2669 (ss_e2 & DESC_CS_MASK) ||
2670 !(ss_e2 & DESC_W_MASK))
2671 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2672 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2673 if (dpl != rpl)
2674 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2675 if (!(ss_e2 & DESC_P_MASK))
2676 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2677 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2678 get_seg_base(ss_e1, ss_e2),
2679 get_seg_limit(ss_e1, ss_e2),
2680 ss_e2);
2681 }
2682
2683 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2684 get_seg_base(e1, e2),
2685 get_seg_limit(e1, e2),
2686 e2);
2687 cpu_x86_set_cpl(env, rpl);
2688 sp = new_esp;
2689#ifdef TARGET_X86_64
2690 if (env->hflags & HF_CS64_MASK)
2691 sp_mask = -1;
2692 else
2693#endif
2694 sp_mask = get_sp_mask(ss_e2);
2695
2696 /* validate data segments */
2697 validate_seg(R_ES, rpl);
2698 validate_seg(R_DS, rpl);
2699 validate_seg(R_FS, rpl);
2700 validate_seg(R_GS, rpl);
2701
2702 sp += addend;
2703 }
2704 SET_ESP(sp, sp_mask);
2705 env->eip = new_eip;
2706 if (is_iret) {
2707 /* NOTE: 'cpl' is the _old_ CPL */
2708 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2709 if (cpl == 0)
2710 eflags_mask |= IOPL_MASK;
2711 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2712 if (cpl <= iopl)
2713 eflags_mask |= IF_MASK;
2714 if (shift == 0)
2715 eflags_mask &= 0xffff;
2716 load_eflags(new_eflags, eflags_mask);
2717 }
2718 return;
2719
2720 return_to_vm86:
2721 POPL(ssp, sp, sp_mask, new_esp);
2722 POPL(ssp, sp, sp_mask, new_ss);
2723 POPL(ssp, sp, sp_mask, new_es);
2724 POPL(ssp, sp, sp_mask, new_ds);
2725 POPL(ssp, sp, sp_mask, new_fs);
2726 POPL(ssp, sp, sp_mask, new_gs);
2727
2728 /* modify processor state */
2729 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2730 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2731 load_seg_vm(R_CS, new_cs & 0xffff);
2732 cpu_x86_set_cpl(env, 3);
2733 load_seg_vm(R_SS, new_ss & 0xffff);
2734 load_seg_vm(R_ES, new_es & 0xffff);
2735 load_seg_vm(R_DS, new_ds & 0xffff);
2736 load_seg_vm(R_FS, new_fs & 0xffff);
2737 load_seg_vm(R_GS, new_gs & 0xffff);
2738
2739 env->eip = new_eip & 0xffff;
2740 ESP = new_esp;
2741}
2742
2743void helper_iret_protected(int shift, int next_eip)
2744{
2745 int tss_selector, type;
2746 uint32_t e1, e2;
2747
2748 /* specific case for TSS */
2749 if (env->eflags & NT_MASK) {
2750#ifdef TARGET_X86_64
2751 if (env->hflags & HF_LMA_MASK)
2752 raise_exception_err(EXCP0D_GPF, 0);
2753#endif
2754 tss_selector = lduw_kernel(env->tr.base + 0);
2755 if (tss_selector & 4)
2756 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2757 if (load_segment(&e1, &e2, tss_selector) != 0)
2758 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2759 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2760 /* NOTE: we check both segment and busy TSS */
2761 if (type != 3)
2762 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2763 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2764 } else {
2765 helper_ret_protected(shift, 1, 0);
2766 }
db620f46 2767 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2768#ifdef USE_KQEMU
2769 if (kqemu_is_ok(env)) {
2770 CC_OP = CC_OP_EFLAGS;
2771 env->exception_index = -1;
2772 cpu_loop_exit();
2773 }
2774#endif
2775}
2776
2777void helper_lret_protected(int shift, int addend)
2778{
2779 helper_ret_protected(shift, 0, addend);
2780#ifdef USE_KQEMU
2781 if (kqemu_is_ok(env)) {
2782 env->exception_index = -1;
2783 cpu_loop_exit();
2784 }
2785#endif
2786}
2787
2788void helper_sysenter(void)
2789{
2790 if (env->sysenter_cs == 0) {
2791 raise_exception_err(EXCP0D_GPF, 0);
2792 }
2793 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2794 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2795
2796#ifdef TARGET_X86_64
2797 if (env->hflags & HF_LMA_MASK) {
2798 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2799 0, 0xffffffff,
2800 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2801 DESC_S_MASK |
2802 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2803 } else
2804#endif
2805 {
2806 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2807 0, 0xffffffff,
2808 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2809 DESC_S_MASK |
2810 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2811 }
eaa728ee
FB
2812 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2813 0, 0xffffffff,
2814 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2815 DESC_S_MASK |
2816 DESC_W_MASK | DESC_A_MASK);
2817 ESP = env->sysenter_esp;
2818 EIP = env->sysenter_eip;
2819}
2820
2436b61a 2821void helper_sysexit(int dflag)
eaa728ee
FB
2822{
2823 int cpl;
2824
2825 cpl = env->hflags & HF_CPL_MASK;
2826 if (env->sysenter_cs == 0 || cpl != 0) {
2827 raise_exception_err(EXCP0D_GPF, 0);
2828 }
2829 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2830#ifdef TARGET_X86_64
2831 if (dflag == 2) {
2832 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2833 0, 0xffffffff,
2834 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2835 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2836 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2837 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2838 0, 0xffffffff,
2839 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2840 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2841 DESC_W_MASK | DESC_A_MASK);
2842 } else
2843#endif
2844 {
2845 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2846 0, 0xffffffff,
2847 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2848 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2849 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2850 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2851 0, 0xffffffff,
2852 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2853 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2854 DESC_W_MASK | DESC_A_MASK);
2855 }
eaa728ee
FB
2856 ESP = ECX;
2857 EIP = EDX;
2858#ifdef USE_KQEMU
2859 if (kqemu_is_ok(env)) {
2860 env->exception_index = -1;
2861 cpu_loop_exit();
2862 }
2863#endif
2864}
2865
872929aa
FB
2866#if defined(CONFIG_USER_ONLY)
2867target_ulong helper_read_crN(int reg)
eaa728ee 2868{
872929aa
FB
2869 return 0;
2870}
2871
2872void helper_write_crN(int reg, target_ulong t0)
2873{
2874}
01df040b
AL
2875
2876void helper_movl_drN_T0(int reg, target_ulong t0)
2877{
2878}
872929aa
FB
2879#else
2880target_ulong helper_read_crN(int reg)
2881{
2882 target_ulong val;
2883
2884 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2885 switch(reg) {
2886 default:
2887 val = env->cr[reg];
2888 break;
2889 case 8:
db620f46
FB
2890 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2891 val = cpu_get_apic_tpr(env);
2892 } else {
2893 val = env->v_tpr;
2894 }
872929aa
FB
2895 break;
2896 }
2897 return val;
2898}
2899
2900void helper_write_crN(int reg, target_ulong t0)
2901{
2902 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2903 switch(reg) {
2904 case 0:
2905 cpu_x86_update_cr0(env, t0);
2906 break;
2907 case 3:
2908 cpu_x86_update_cr3(env, t0);
2909 break;
2910 case 4:
2911 cpu_x86_update_cr4(env, t0);
2912 break;
2913 case 8:
db620f46
FB
2914 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2915 cpu_set_apic_tpr(env, t0);
2916 }
2917 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
2918 break;
2919 default:
2920 env->cr[reg] = t0;
2921 break;
2922 }
eaa728ee 2923}
01df040b
AL
2924
2925void helper_movl_drN_T0(int reg, target_ulong t0)
2926{
2927 int i;
2928
2929 if (reg < 4) {
2930 hw_breakpoint_remove(env, reg);
2931 env->dr[reg] = t0;
2932 hw_breakpoint_insert(env, reg);
2933 } else if (reg == 7) {
2934 for (i = 0; i < 4; i++)
2935 hw_breakpoint_remove(env, i);
2936 env->dr[7] = t0;
2937 for (i = 0; i < 4; i++)
2938 hw_breakpoint_insert(env, i);
2939 } else
2940 env->dr[reg] = t0;
2941}
872929aa 2942#endif
eaa728ee
FB
2943
2944void helper_lmsw(target_ulong t0)
2945{
2946 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2947 if already set to one. */
2948 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2949 helper_write_crN(0, t0);
eaa728ee
FB
2950}
2951
2952void helper_clts(void)
2953{
2954 env->cr[0] &= ~CR0_TS_MASK;
2955 env->hflags &= ~HF_TS_MASK;
2956}
2957
eaa728ee
FB
2958void helper_invlpg(target_ulong addr)
2959{
872929aa 2960 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 2961 tlb_flush_page(env, addr);
eaa728ee
FB
2962}
2963
2964void helper_rdtsc(void)
2965{
2966 uint64_t val;
2967
2968 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2969 raise_exception(EXCP0D_GPF);
2970 }
872929aa
FB
2971 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2972
33c263df 2973 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
2974 EAX = (uint32_t)(val);
2975 EDX = (uint32_t)(val >> 32);
2976}
2977
2978void helper_rdpmc(void)
2979{
2980 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2981 raise_exception(EXCP0D_GPF);
2982 }
eaa728ee
FB
2983 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2984
2985 /* currently unimplemented */
2986 raise_exception_err(EXCP06_ILLOP, 0);
2987}
2988
2989#if defined(CONFIG_USER_ONLY)
2990void helper_wrmsr(void)
2991{
2992}
2993
2994void helper_rdmsr(void)
2995{
2996}
2997#else
2998void helper_wrmsr(void)
2999{
3000 uint64_t val;
3001
872929aa
FB
3002 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3003
eaa728ee
FB
3004 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3005
3006 switch((uint32_t)ECX) {
3007 case MSR_IA32_SYSENTER_CS:
3008 env->sysenter_cs = val & 0xffff;
3009 break;
3010 case MSR_IA32_SYSENTER_ESP:
3011 env->sysenter_esp = val;
3012 break;
3013 case MSR_IA32_SYSENTER_EIP:
3014 env->sysenter_eip = val;
3015 break;
3016 case MSR_IA32_APICBASE:
3017 cpu_set_apic_base(env, val);
3018 break;
3019 case MSR_EFER:
3020 {
3021 uint64_t update_mask;
3022 update_mask = 0;
3023 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3024 update_mask |= MSR_EFER_SCE;
3025 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3026 update_mask |= MSR_EFER_LME;
3027 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3028 update_mask |= MSR_EFER_FFXSR;
3029 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3030 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3031 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3032 update_mask |= MSR_EFER_SVME;
3033 cpu_load_efer(env, (env->efer & ~update_mask) |
3034 (val & update_mask));
eaa728ee
FB
3035 }
3036 break;
3037 case MSR_STAR:
3038 env->star = val;
3039 break;
3040 case MSR_PAT:
3041 env->pat = val;
3042 break;
3043 case MSR_VM_HSAVE_PA:
3044 env->vm_hsave = val;
3045 break;
3046#ifdef TARGET_X86_64
3047 case MSR_LSTAR:
3048 env->lstar = val;
3049 break;
3050 case MSR_CSTAR:
3051 env->cstar = val;
3052 break;
3053 case MSR_FMASK:
3054 env->fmask = val;
3055 break;
3056 case MSR_FSBASE:
3057 env->segs[R_FS].base = val;
3058 break;
3059 case MSR_GSBASE:
3060 env->segs[R_GS].base = val;
3061 break;
3062 case MSR_KERNELGSBASE:
3063 env->kernelgsbase = val;
3064 break;
3065#endif
165d9b82
AL
3066 case MSR_MTRRphysBase(0):
3067 case MSR_MTRRphysBase(1):
3068 case MSR_MTRRphysBase(2):
3069 case MSR_MTRRphysBase(3):
3070 case MSR_MTRRphysBase(4):
3071 case MSR_MTRRphysBase(5):
3072 case MSR_MTRRphysBase(6):
3073 case MSR_MTRRphysBase(7):
3074 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3075 break;
3076 case MSR_MTRRphysMask(0):
3077 case MSR_MTRRphysMask(1):
3078 case MSR_MTRRphysMask(2):
3079 case MSR_MTRRphysMask(3):
3080 case MSR_MTRRphysMask(4):
3081 case MSR_MTRRphysMask(5):
3082 case MSR_MTRRphysMask(6):
3083 case MSR_MTRRphysMask(7):
3084 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3085 break;
3086 case MSR_MTRRfix64K_00000:
3087 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3088 break;
3089 case MSR_MTRRfix16K_80000:
3090 case MSR_MTRRfix16K_A0000:
3091 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3092 break;
3093 case MSR_MTRRfix4K_C0000:
3094 case MSR_MTRRfix4K_C8000:
3095 case MSR_MTRRfix4K_D0000:
3096 case MSR_MTRRfix4K_D8000:
3097 case MSR_MTRRfix4K_E0000:
3098 case MSR_MTRRfix4K_E8000:
3099 case MSR_MTRRfix4K_F0000:
3100 case MSR_MTRRfix4K_F8000:
3101 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3102 break;
3103 case MSR_MTRRdefType:
3104 env->mtrr_deftype = val;
3105 break;
eaa728ee
FB
3106 default:
3107 /* XXX: exception ? */
3108 break;
3109 }
3110}
3111
3112void helper_rdmsr(void)
3113{
3114 uint64_t val;
872929aa
FB
3115
3116 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3117
eaa728ee
FB
3118 switch((uint32_t)ECX) {
3119 case MSR_IA32_SYSENTER_CS:
3120 val = env->sysenter_cs;
3121 break;
3122 case MSR_IA32_SYSENTER_ESP:
3123 val = env->sysenter_esp;
3124 break;
3125 case MSR_IA32_SYSENTER_EIP:
3126 val = env->sysenter_eip;
3127 break;
3128 case MSR_IA32_APICBASE:
3129 val = cpu_get_apic_base(env);
3130 break;
3131 case MSR_EFER:
3132 val = env->efer;
3133 break;
3134 case MSR_STAR:
3135 val = env->star;
3136 break;
3137 case MSR_PAT:
3138 val = env->pat;
3139 break;
3140 case MSR_VM_HSAVE_PA:
3141 val = env->vm_hsave;
3142 break;
d5e49a81
AZ
3143 case MSR_IA32_PERF_STATUS:
3144 /* tsc_increment_by_tick */
3145 val = 1000ULL;
3146 /* CPU multiplier */
3147 val |= (((uint64_t)4ULL) << 40);
3148 break;
eaa728ee
FB
3149#ifdef TARGET_X86_64
3150 case MSR_LSTAR:
3151 val = env->lstar;
3152 break;
3153 case MSR_CSTAR:
3154 val = env->cstar;
3155 break;
3156 case MSR_FMASK:
3157 val = env->fmask;
3158 break;
3159 case MSR_FSBASE:
3160 val = env->segs[R_FS].base;
3161 break;
3162 case MSR_GSBASE:
3163 val = env->segs[R_GS].base;
3164 break;
3165 case MSR_KERNELGSBASE:
3166 val = env->kernelgsbase;
3167 break;
da260249
FB
3168#endif
3169#ifdef USE_KQEMU
3170 case MSR_QPI_COMMBASE:
3171 if (env->kqemu_enabled) {
3172 val = kqemu_comm_base;
3173 } else {
3174 val = 0;
3175 }
3176 break;
eaa728ee 3177#endif
165d9b82
AL
3178 case MSR_MTRRphysBase(0):
3179 case MSR_MTRRphysBase(1):
3180 case MSR_MTRRphysBase(2):
3181 case MSR_MTRRphysBase(3):
3182 case MSR_MTRRphysBase(4):
3183 case MSR_MTRRphysBase(5):
3184 case MSR_MTRRphysBase(6):
3185 case MSR_MTRRphysBase(7):
3186 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3187 break;
3188 case MSR_MTRRphysMask(0):
3189 case MSR_MTRRphysMask(1):
3190 case MSR_MTRRphysMask(2):
3191 case MSR_MTRRphysMask(3):
3192 case MSR_MTRRphysMask(4):
3193 case MSR_MTRRphysMask(5):
3194 case MSR_MTRRphysMask(6):
3195 case MSR_MTRRphysMask(7):
3196 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3197 break;
3198 case MSR_MTRRfix64K_00000:
3199 val = env->mtrr_fixed[0];
3200 break;
3201 case MSR_MTRRfix16K_80000:
3202 case MSR_MTRRfix16K_A0000:
3203 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3204 break;
3205 case MSR_MTRRfix4K_C0000:
3206 case MSR_MTRRfix4K_C8000:
3207 case MSR_MTRRfix4K_D0000:
3208 case MSR_MTRRfix4K_D8000:
3209 case MSR_MTRRfix4K_E0000:
3210 case MSR_MTRRfix4K_E8000:
3211 case MSR_MTRRfix4K_F0000:
3212 case MSR_MTRRfix4K_F8000:
3213 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3214 break;
3215 case MSR_MTRRdefType:
3216 val = env->mtrr_deftype;
3217 break;
dd5e3b17
AL
3218 case MSR_MTRRcap:
3219 if (env->cpuid_features & CPUID_MTRR)
3220 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3221 else
3222 /* XXX: exception ? */
3223 val = 0;
3224 break;
eaa728ee
FB
3225 default:
3226 /* XXX: exception ? */
3227 val = 0;
3228 break;
3229 }
3230 EAX = (uint32_t)(val);
3231 EDX = (uint32_t)(val >> 32);
3232}
3233#endif
3234
3235target_ulong helper_lsl(target_ulong selector1)
3236{
3237 unsigned int limit;
3238 uint32_t e1, e2, eflags, selector;
3239 int rpl, dpl, cpl, type;
3240
3241 selector = selector1 & 0xffff;
a7812ae4 3242 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3243 if (load_segment(&e1, &e2, selector) != 0)
3244 goto fail;
3245 rpl = selector & 3;
3246 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3247 cpl = env->hflags & HF_CPL_MASK;
3248 if (e2 & DESC_S_MASK) {
3249 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3250 /* conforming */
3251 } else {
3252 if (dpl < cpl || dpl < rpl)
3253 goto fail;
3254 }
3255 } else {
3256 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3257 switch(type) {
3258 case 1:
3259 case 2:
3260 case 3:
3261 case 9:
3262 case 11:
3263 break;
3264 default:
3265 goto fail;
3266 }
3267 if (dpl < cpl || dpl < rpl) {
3268 fail:
3269 CC_SRC = eflags & ~CC_Z;
3270 return 0;
3271 }
3272 }
3273 limit = get_seg_limit(e1, e2);
3274 CC_SRC = eflags | CC_Z;
3275 return limit;
3276}
3277
3278target_ulong helper_lar(target_ulong selector1)
3279{
3280 uint32_t e1, e2, eflags, selector;
3281 int rpl, dpl, cpl, type;
3282
3283 selector = selector1 & 0xffff;
a7812ae4 3284 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3285 if ((selector & 0xfffc) == 0)
3286 goto fail;
3287 if (load_segment(&e1, &e2, selector) != 0)
3288 goto fail;
3289 rpl = selector & 3;
3290 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3291 cpl = env->hflags & HF_CPL_MASK;
3292 if (e2 & DESC_S_MASK) {
3293 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3294 /* conforming */
3295 } else {
3296 if (dpl < cpl || dpl < rpl)
3297 goto fail;
3298 }
3299 } else {
3300 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3301 switch(type) {
3302 case 1:
3303 case 2:
3304 case 3:
3305 case 4:
3306 case 5:
3307 case 9:
3308 case 11:
3309 case 12:
3310 break;
3311 default:
3312 goto fail;
3313 }
3314 if (dpl < cpl || dpl < rpl) {
3315 fail:
3316 CC_SRC = eflags & ~CC_Z;
3317 return 0;
3318 }
3319 }
3320 CC_SRC = eflags | CC_Z;
3321 return e2 & 0x00f0ff00;
3322}
3323
3324void helper_verr(target_ulong selector1)
3325{
3326 uint32_t e1, e2, eflags, selector;
3327 int rpl, dpl, cpl;
3328
3329 selector = selector1 & 0xffff;
a7812ae4 3330 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3331 if ((selector & 0xfffc) == 0)
3332 goto fail;
3333 if (load_segment(&e1, &e2, selector) != 0)
3334 goto fail;
3335 if (!(e2 & DESC_S_MASK))
3336 goto fail;
3337 rpl = selector & 3;
3338 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3339 cpl = env->hflags & HF_CPL_MASK;
3340 if (e2 & DESC_CS_MASK) {
3341 if (!(e2 & DESC_R_MASK))
3342 goto fail;
3343 if (!(e2 & DESC_C_MASK)) {
3344 if (dpl < cpl || dpl < rpl)
3345 goto fail;
3346 }
3347 } else {
3348 if (dpl < cpl || dpl < rpl) {
3349 fail:
3350 CC_SRC = eflags & ~CC_Z;
3351 return;
3352 }
3353 }
3354 CC_SRC = eflags | CC_Z;
3355}
3356
3357void helper_verw(target_ulong selector1)
3358{
3359 uint32_t e1, e2, eflags, selector;
3360 int rpl, dpl, cpl;
3361
3362 selector = selector1 & 0xffff;
a7812ae4 3363 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3364 if ((selector & 0xfffc) == 0)
3365 goto fail;
3366 if (load_segment(&e1, &e2, selector) != 0)
3367 goto fail;
3368 if (!(e2 & DESC_S_MASK))
3369 goto fail;
3370 rpl = selector & 3;
3371 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3372 cpl = env->hflags & HF_CPL_MASK;
3373 if (e2 & DESC_CS_MASK) {
3374 goto fail;
3375 } else {
3376 if (dpl < cpl || dpl < rpl)
3377 goto fail;
3378 if (!(e2 & DESC_W_MASK)) {
3379 fail:
3380 CC_SRC = eflags & ~CC_Z;
3381 return;
3382 }
3383 }
3384 CC_SRC = eflags | CC_Z;
3385}
3386
3387/* x87 FPU helpers */
3388
3389static void fpu_set_exception(int mask)
3390{
3391 env->fpus |= mask;
3392 if (env->fpus & (~env->fpuc & FPUC_EM))
3393 env->fpus |= FPUS_SE | FPUS_B;
3394}
3395
3396static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3397{
3398 if (b == 0.0)
3399 fpu_set_exception(FPUS_ZE);
3400 return a / b;
3401}
3402
d9957a8b 3403static void fpu_raise_exception(void)
eaa728ee
FB
3404{
3405 if (env->cr[0] & CR0_NE_MASK) {
3406 raise_exception(EXCP10_COPR);
3407 }
3408#if !defined(CONFIG_USER_ONLY)
3409 else {
3410 cpu_set_ferr(env);
3411 }
3412#endif
3413}
3414
3415void helper_flds_FT0(uint32_t val)
3416{
3417 union {
3418 float32 f;
3419 uint32_t i;
3420 } u;
3421 u.i = val;
3422 FT0 = float32_to_floatx(u.f, &env->fp_status);
3423}
3424
3425void helper_fldl_FT0(uint64_t val)
3426{
3427 union {
3428 float64 f;
3429 uint64_t i;
3430 } u;
3431 u.i = val;
3432 FT0 = float64_to_floatx(u.f, &env->fp_status);
3433}
3434
3435void helper_fildl_FT0(int32_t val)
3436{
3437 FT0 = int32_to_floatx(val, &env->fp_status);
3438}
3439
3440void helper_flds_ST0(uint32_t val)
3441{
3442 int new_fpstt;
3443 union {
3444 float32 f;
3445 uint32_t i;
3446 } u;
3447 new_fpstt = (env->fpstt - 1) & 7;
3448 u.i = val;
3449 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3450 env->fpstt = new_fpstt;
3451 env->fptags[new_fpstt] = 0; /* validate stack entry */
3452}
3453
3454void helper_fldl_ST0(uint64_t val)
3455{
3456 int new_fpstt;
3457 union {
3458 float64 f;
3459 uint64_t i;
3460 } u;
3461 new_fpstt = (env->fpstt - 1) & 7;
3462 u.i = val;
3463 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3464 env->fpstt = new_fpstt;
3465 env->fptags[new_fpstt] = 0; /* validate stack entry */
3466}
3467
3468void helper_fildl_ST0(int32_t val)
3469{
3470 int new_fpstt;
3471 new_fpstt = (env->fpstt - 1) & 7;
3472 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3473 env->fpstt = new_fpstt;
3474 env->fptags[new_fpstt] = 0; /* validate stack entry */
3475}
3476
3477void helper_fildll_ST0(int64_t val)
3478{
3479 int new_fpstt;
3480 new_fpstt = (env->fpstt - 1) & 7;
3481 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3482 env->fpstt = new_fpstt;
3483 env->fptags[new_fpstt] = 0; /* validate stack entry */
3484}
3485
3486uint32_t helper_fsts_ST0(void)
3487{
3488 union {
3489 float32 f;
3490 uint32_t i;
3491 } u;
3492 u.f = floatx_to_float32(ST0, &env->fp_status);
3493 return u.i;
3494}
3495
3496uint64_t helper_fstl_ST0(void)
3497{
3498 union {
3499 float64 f;
3500 uint64_t i;
3501 } u;
3502 u.f = floatx_to_float64(ST0, &env->fp_status);
3503 return u.i;
3504}
3505
3506int32_t helper_fist_ST0(void)
3507{
3508 int32_t val;
3509 val = floatx_to_int32(ST0, &env->fp_status);
3510 if (val != (int16_t)val)
3511 val = -32768;
3512 return val;
3513}
3514
3515int32_t helper_fistl_ST0(void)
3516{
3517 int32_t val;
3518 val = floatx_to_int32(ST0, &env->fp_status);
3519 return val;
3520}
3521
3522int64_t helper_fistll_ST0(void)
3523{
3524 int64_t val;
3525 val = floatx_to_int64(ST0, &env->fp_status);
3526 return val;
3527}
3528
3529int32_t helper_fistt_ST0(void)
3530{
3531 int32_t val;
3532 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3533 if (val != (int16_t)val)
3534 val = -32768;
3535 return val;
3536}
3537
3538int32_t helper_fisttl_ST0(void)
3539{
3540 int32_t val;
3541 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3542 return val;
3543}
3544
3545int64_t helper_fisttll_ST0(void)
3546{
3547 int64_t val;
3548 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3549 return val;
3550}
3551
3552void helper_fldt_ST0(target_ulong ptr)
3553{
3554 int new_fpstt;
3555 new_fpstt = (env->fpstt - 1) & 7;
3556 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3557 env->fpstt = new_fpstt;
3558 env->fptags[new_fpstt] = 0; /* validate stack entry */
3559}
3560
3561void helper_fstt_ST0(target_ulong ptr)
3562{
3563 helper_fstt(ST0, ptr);
3564}
3565
3566void helper_fpush(void)
3567{
3568 fpush();
3569}
3570
3571void helper_fpop(void)
3572{
3573 fpop();
3574}
3575
3576void helper_fdecstp(void)
3577{
3578 env->fpstt = (env->fpstt - 1) & 7;
3579 env->fpus &= (~0x4700);
3580}
3581
3582void helper_fincstp(void)
3583{
3584 env->fpstt = (env->fpstt + 1) & 7;
3585 env->fpus &= (~0x4700);
3586}
3587
3588/* FPU move */
3589
3590void helper_ffree_STN(int st_index)
3591{
3592 env->fptags[(env->fpstt + st_index) & 7] = 1;
3593}
3594
3595void helper_fmov_ST0_FT0(void)
3596{
3597 ST0 = FT0;
3598}
3599
3600void helper_fmov_FT0_STN(int st_index)
3601{
3602 FT0 = ST(st_index);
3603}
3604
3605void helper_fmov_ST0_STN(int st_index)
3606{
3607 ST0 = ST(st_index);
3608}
3609
3610void helper_fmov_STN_ST0(int st_index)
3611{
3612 ST(st_index) = ST0;
3613}
3614
3615void helper_fxchg_ST0_STN(int st_index)
3616{
3617 CPU86_LDouble tmp;
3618 tmp = ST(st_index);
3619 ST(st_index) = ST0;
3620 ST0 = tmp;
3621}
3622
3623/* FPU operations */
3624
3625static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3626
3627void helper_fcom_ST0_FT0(void)
3628{
3629 int ret;
3630
3631 ret = floatx_compare(ST0, FT0, &env->fp_status);
3632 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
eaa728ee
FB
3633}
3634
3635void helper_fucom_ST0_FT0(void)
3636{
3637 int ret;
3638
3639 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3640 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
eaa728ee
FB
3641}
3642
3643static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3644
3645void helper_fcomi_ST0_FT0(void)
3646{
3647 int eflags;
3648 int ret;
3649
3650 ret = floatx_compare(ST0, FT0, &env->fp_status);
a7812ae4 3651 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3652 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3653 CC_SRC = eflags;
eaa728ee
FB
3654}
3655
3656void helper_fucomi_ST0_FT0(void)
3657{
3658 int eflags;
3659 int ret;
3660
3661 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
a7812ae4 3662 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3663 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3664 CC_SRC = eflags;
eaa728ee
FB
3665}
3666
3667void helper_fadd_ST0_FT0(void)
3668{
3669 ST0 += FT0;
3670}
3671
3672void helper_fmul_ST0_FT0(void)
3673{
3674 ST0 *= FT0;
3675}
3676
3677void helper_fsub_ST0_FT0(void)
3678{
3679 ST0 -= FT0;
3680}
3681
3682void helper_fsubr_ST0_FT0(void)
3683{
3684 ST0 = FT0 - ST0;
3685}
3686
3687void helper_fdiv_ST0_FT0(void)
3688{
3689 ST0 = helper_fdiv(ST0, FT0);
3690}
3691
3692void helper_fdivr_ST0_FT0(void)
3693{
3694 ST0 = helper_fdiv(FT0, ST0);
3695}
3696
3697/* fp operations between STN and ST0 */
3698
3699void helper_fadd_STN_ST0(int st_index)
3700{
3701 ST(st_index) += ST0;
3702}
3703
3704void helper_fmul_STN_ST0(int st_index)
3705{
3706 ST(st_index) *= ST0;
3707}
3708
3709void helper_fsub_STN_ST0(int st_index)
3710{
3711 ST(st_index) -= ST0;
3712}
3713
3714void helper_fsubr_STN_ST0(int st_index)
3715{
3716 CPU86_LDouble *p;
3717 p = &ST(st_index);
3718 *p = ST0 - *p;
3719}
3720
3721void helper_fdiv_STN_ST0(int st_index)
3722{
3723 CPU86_LDouble *p;
3724 p = &ST(st_index);
3725 *p = helper_fdiv(*p, ST0);
3726}
3727
3728void helper_fdivr_STN_ST0(int st_index)
3729{
3730 CPU86_LDouble *p;
3731 p = &ST(st_index);
3732 *p = helper_fdiv(ST0, *p);
3733}
3734
3735/* misc FPU operations */
3736void helper_fchs_ST0(void)
3737{
3738 ST0 = floatx_chs(ST0);
3739}
3740
3741void helper_fabs_ST0(void)
3742{
3743 ST0 = floatx_abs(ST0);
3744}
3745
3746void helper_fld1_ST0(void)
3747{
3748 ST0 = f15rk[1];
3749}
3750
3751void helper_fldl2t_ST0(void)
3752{
3753 ST0 = f15rk[6];
3754}
3755
3756void helper_fldl2e_ST0(void)
3757{
3758 ST0 = f15rk[5];
3759}
3760
3761void helper_fldpi_ST0(void)
3762{
3763 ST0 = f15rk[2];
3764}
3765
3766void helper_fldlg2_ST0(void)
3767{
3768 ST0 = f15rk[3];
3769}
3770
3771void helper_fldln2_ST0(void)
3772{
3773 ST0 = f15rk[4];
3774}
3775
3776void helper_fldz_ST0(void)
3777{
3778 ST0 = f15rk[0];
3779}
3780
3781void helper_fldz_FT0(void)
3782{
3783 FT0 = f15rk[0];
3784}
3785
3786uint32_t helper_fnstsw(void)
3787{
3788 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3789}
3790
3791uint32_t helper_fnstcw(void)
3792{
3793 return env->fpuc;
3794}
3795
3796static void update_fp_status(void)
3797{
3798 int rnd_type;
3799
3800 /* set rounding mode */
3801 switch(env->fpuc & RC_MASK) {
3802 default:
3803 case RC_NEAR:
3804 rnd_type = float_round_nearest_even;
3805 break;
3806 case RC_DOWN:
3807 rnd_type = float_round_down;
3808 break;
3809 case RC_UP:
3810 rnd_type = float_round_up;
3811 break;
3812 case RC_CHOP:
3813 rnd_type = float_round_to_zero;
3814 break;
3815 }
3816 set_float_rounding_mode(rnd_type, &env->fp_status);
3817#ifdef FLOATX80
3818 switch((env->fpuc >> 8) & 3) {
3819 case 0:
3820 rnd_type = 32;
3821 break;
3822 case 2:
3823 rnd_type = 64;
3824 break;
3825 case 3:
3826 default:
3827 rnd_type = 80;
3828 break;
3829 }
3830 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3831#endif
3832}
3833
3834void helper_fldcw(uint32_t val)
3835{
3836 env->fpuc = val;
3837 update_fp_status();
3838}
3839
3840void helper_fclex(void)
3841{
3842 env->fpus &= 0x7f00;
3843}
3844
3845void helper_fwait(void)
3846{
3847 if (env->fpus & FPUS_SE)
3848 fpu_raise_exception();
eaa728ee
FB
3849}
3850
3851void helper_fninit(void)
3852{
3853 env->fpus = 0;
3854 env->fpstt = 0;
3855 env->fpuc = 0x37f;
3856 env->fptags[0] = 1;
3857 env->fptags[1] = 1;
3858 env->fptags[2] = 1;
3859 env->fptags[3] = 1;
3860 env->fptags[4] = 1;
3861 env->fptags[5] = 1;
3862 env->fptags[6] = 1;
3863 env->fptags[7] = 1;
3864}
3865
3866/* BCD ops */
3867
3868void helper_fbld_ST0(target_ulong ptr)
3869{
3870 CPU86_LDouble tmp;
3871 uint64_t val;
3872 unsigned int v;
3873 int i;
3874
3875 val = 0;
3876 for(i = 8; i >= 0; i--) {
3877 v = ldub(ptr + i);
3878 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3879 }
3880 tmp = val;
3881 if (ldub(ptr + 9) & 0x80)
3882 tmp = -tmp;
3883 fpush();
3884 ST0 = tmp;
3885}
3886
3887void helper_fbst_ST0(target_ulong ptr)
3888{
3889 int v;
3890 target_ulong mem_ref, mem_end;
3891 int64_t val;
3892
3893 val = floatx_to_int64(ST0, &env->fp_status);
3894 mem_ref = ptr;
3895 mem_end = mem_ref + 9;
3896 if (val < 0) {
3897 stb(mem_end, 0x80);
3898 val = -val;
3899 } else {
3900 stb(mem_end, 0x00);
3901 }
3902 while (mem_ref < mem_end) {
3903 if (val == 0)
3904 break;
3905 v = val % 100;
3906 val = val / 100;
3907 v = ((v / 10) << 4) | (v % 10);
3908 stb(mem_ref++, v);
3909 }
3910 while (mem_ref < mem_end) {
3911 stb(mem_ref++, 0);
3912 }
3913}
3914
3915void helper_f2xm1(void)
3916{
3917 ST0 = pow(2.0,ST0) - 1.0;
3918}
3919
3920void helper_fyl2x(void)
3921{
3922 CPU86_LDouble fptemp;
3923
3924 fptemp = ST0;
3925 if (fptemp>0.0){
3926 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3927 ST1 *= fptemp;
3928 fpop();
3929 } else {
3930 env->fpus &= (~0x4700);
3931 env->fpus |= 0x400;
3932 }
3933}
3934
3935void helper_fptan(void)
3936{
3937 CPU86_LDouble fptemp;
3938
3939 fptemp = ST0;
3940 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3941 env->fpus |= 0x400;
3942 } else {
3943 ST0 = tan(fptemp);
3944 fpush();
3945 ST0 = 1.0;
3946 env->fpus &= (~0x400); /* C2 <-- 0 */
3947 /* the above code is for |arg| < 2**52 only */
3948 }
3949}
3950
3951void helper_fpatan(void)
3952{
3953 CPU86_LDouble fptemp, fpsrcop;
3954
3955 fpsrcop = ST1;
3956 fptemp = ST0;
3957 ST1 = atan2(fpsrcop,fptemp);
3958 fpop();
3959}
3960
3961void helper_fxtract(void)
3962{
3963 CPU86_LDoubleU temp;
3964 unsigned int expdif;
3965
3966 temp.d = ST0;
3967 expdif = EXPD(temp) - EXPBIAS;
3968 /*DP exponent bias*/
3969 ST0 = expdif;
3970 fpush();
3971 BIASEXPONENT(temp);
3972 ST0 = temp.d;
3973}
3974
3975void helper_fprem1(void)
3976{
3977 CPU86_LDouble dblq, fpsrcop, fptemp;
3978 CPU86_LDoubleU fpsrcop1, fptemp1;
3979 int expdif;
3980 signed long long int q;
3981
3982 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3983 ST0 = 0.0 / 0.0; /* NaN */
3984 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3985 return;
3986 }
3987
3988 fpsrcop = ST0;
3989 fptemp = ST1;
3990 fpsrcop1.d = fpsrcop;
3991 fptemp1.d = fptemp;
3992 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3993
3994 if (expdif < 0) {
3995 /* optimisation? taken from the AMD docs */
3996 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3997 /* ST0 is unchanged */
3998 return;
3999 }
4000
4001 if (expdif < 53) {
4002 dblq = fpsrcop / fptemp;
4003 /* round dblq towards nearest integer */
4004 dblq = rint(dblq);
4005 ST0 = fpsrcop - fptemp * dblq;
4006
4007 /* convert dblq to q by truncating towards zero */
4008 if (dblq < 0.0)
4009 q = (signed long long int)(-dblq);
4010 else
4011 q = (signed long long int)dblq;
4012
4013 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4014 /* (C0,C3,C1) <-- (q2,q1,q0) */
4015 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4016 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4017 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4018 } else {
4019 env->fpus |= 0x400; /* C2 <-- 1 */
4020 fptemp = pow(2.0, expdif - 50);
4021 fpsrcop = (ST0 / ST1) / fptemp;
4022 /* fpsrcop = integer obtained by chopping */
4023 fpsrcop = (fpsrcop < 0.0) ?
4024 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4025 ST0 -= (ST1 * fpsrcop * fptemp);
4026 }
4027}
4028
4029void helper_fprem(void)
4030{
4031 CPU86_LDouble dblq, fpsrcop, fptemp;
4032 CPU86_LDoubleU fpsrcop1, fptemp1;
4033 int expdif;
4034 signed long long int q;
4035
4036 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4037 ST0 = 0.0 / 0.0; /* NaN */
4038 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4039 return;
4040 }
4041
4042 fpsrcop = (CPU86_LDouble)ST0;
4043 fptemp = (CPU86_LDouble)ST1;
4044 fpsrcop1.d = fpsrcop;
4045 fptemp1.d = fptemp;
4046 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4047
4048 if (expdif < 0) {
4049 /* optimisation? taken from the AMD docs */
4050 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4051 /* ST0 is unchanged */
4052 return;
4053 }
4054
4055 if ( expdif < 53 ) {
4056 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4057 /* round dblq towards zero */
4058 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4059 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4060
4061 /* convert dblq to q by truncating towards zero */
4062 if (dblq < 0.0)
4063 q = (signed long long int)(-dblq);
4064 else
4065 q = (signed long long int)dblq;
4066
4067 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4068 /* (C0,C3,C1) <-- (q2,q1,q0) */
4069 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4070 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4071 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4072 } else {
4073 int N = 32 + (expdif % 32); /* as per AMD docs */
4074 env->fpus |= 0x400; /* C2 <-- 1 */
4075 fptemp = pow(2.0, (double)(expdif - N));
4076 fpsrcop = (ST0 / ST1) / fptemp;
4077 /* fpsrcop = integer obtained by chopping */
4078 fpsrcop = (fpsrcop < 0.0) ?
4079 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4080 ST0 -= (ST1 * fpsrcop * fptemp);
4081 }
4082}
4083
4084void helper_fyl2xp1(void)
4085{
4086 CPU86_LDouble fptemp;
4087
4088 fptemp = ST0;
4089 if ((fptemp+1.0)>0.0) {
4090 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4091 ST1 *= fptemp;
4092 fpop();
4093 } else {
4094 env->fpus &= (~0x4700);
4095 env->fpus |= 0x400;
4096 }
4097}
4098
4099void helper_fsqrt(void)
4100{
4101 CPU86_LDouble fptemp;
4102
4103 fptemp = ST0;
4104 if (fptemp<0.0) {
4105 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4106 env->fpus |= 0x400;
4107 }
4108 ST0 = sqrt(fptemp);
4109}
4110
4111void helper_fsincos(void)
4112{
4113 CPU86_LDouble fptemp;
4114
4115 fptemp = ST0;
4116 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4117 env->fpus |= 0x400;
4118 } else {
4119 ST0 = sin(fptemp);
4120 fpush();
4121 ST0 = cos(fptemp);
4122 env->fpus &= (~0x400); /* C2 <-- 0 */
4123 /* the above code is for |arg| < 2**63 only */
4124 }
4125}
4126
4127void helper_frndint(void)
4128{
4129 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4130}
4131
4132void helper_fscale(void)
4133{
4134 ST0 = ldexp (ST0, (int)(ST1));
4135}
4136
4137void helper_fsin(void)
4138{
4139 CPU86_LDouble fptemp;
4140
4141 fptemp = ST0;
4142 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4143 env->fpus |= 0x400;
4144 } else {
4145 ST0 = sin(fptemp);
4146 env->fpus &= (~0x400); /* C2 <-- 0 */
4147 /* the above code is for |arg| < 2**53 only */
4148 }
4149}
4150
4151void helper_fcos(void)
4152{
4153 CPU86_LDouble fptemp;
4154
4155 fptemp = ST0;
4156 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4157 env->fpus |= 0x400;
4158 } else {
4159 ST0 = cos(fptemp);
4160 env->fpus &= (~0x400); /* C2 <-- 0 */
4161 /* the above code is for |arg5 < 2**63 only */
4162 }
4163}
4164
4165void helper_fxam_ST0(void)
4166{
4167 CPU86_LDoubleU temp;
4168 int expdif;
4169
4170 temp.d = ST0;
4171
4172 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4173 if (SIGND(temp))
4174 env->fpus |= 0x200; /* C1 <-- 1 */
4175
4176 /* XXX: test fptags too */
4177 expdif = EXPD(temp);
4178 if (expdif == MAXEXPD) {
4179#ifdef USE_X86LDOUBLE
4180 if (MANTD(temp) == 0x8000000000000000ULL)
4181#else
4182 if (MANTD(temp) == 0)
4183#endif
4184 env->fpus |= 0x500 /*Infinity*/;
4185 else
4186 env->fpus |= 0x100 /*NaN*/;
4187 } else if (expdif == 0) {
4188 if (MANTD(temp) == 0)
4189 env->fpus |= 0x4000 /*Zero*/;
4190 else
4191 env->fpus |= 0x4400 /*Denormal*/;
4192 } else {
4193 env->fpus |= 0x400;
4194 }
4195}
4196
4197void helper_fstenv(target_ulong ptr, int data32)
4198{
4199 int fpus, fptag, exp, i;
4200 uint64_t mant;
4201 CPU86_LDoubleU tmp;
4202
4203 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4204 fptag = 0;
4205 for (i=7; i>=0; i--) {
4206 fptag <<= 2;
4207 if (env->fptags[i]) {
4208 fptag |= 3;
4209 } else {
4210 tmp.d = env->fpregs[i].d;
4211 exp = EXPD(tmp);
4212 mant = MANTD(tmp);
4213 if (exp == 0 && mant == 0) {
4214 /* zero */
4215 fptag |= 1;
4216 } else if (exp == 0 || exp == MAXEXPD
4217#ifdef USE_X86LDOUBLE
4218 || (mant & (1LL << 63)) == 0
4219#endif
4220 ) {
4221 /* NaNs, infinity, denormal */
4222 fptag |= 2;
4223 }
4224 }
4225 }
4226 if (data32) {
4227 /* 32 bit */
4228 stl(ptr, env->fpuc);
4229 stl(ptr + 4, fpus);
4230 stl(ptr + 8, fptag);
4231 stl(ptr + 12, 0); /* fpip */
4232 stl(ptr + 16, 0); /* fpcs */
4233 stl(ptr + 20, 0); /* fpoo */
4234 stl(ptr + 24, 0); /* fpos */
4235 } else {
4236 /* 16 bit */
4237 stw(ptr, env->fpuc);
4238 stw(ptr + 2, fpus);
4239 stw(ptr + 4, fptag);
4240 stw(ptr + 6, 0);
4241 stw(ptr + 8, 0);
4242 stw(ptr + 10, 0);
4243 stw(ptr + 12, 0);
4244 }
4245}
4246
4247void helper_fldenv(target_ulong ptr, int data32)
4248{
4249 int i, fpus, fptag;
4250
4251 if (data32) {
4252 env->fpuc = lduw(ptr);
4253 fpus = lduw(ptr + 4);
4254 fptag = lduw(ptr + 8);
4255 }
4256 else {
4257 env->fpuc = lduw(ptr);
4258 fpus = lduw(ptr + 2);
4259 fptag = lduw(ptr + 4);
4260 }
4261 env->fpstt = (fpus >> 11) & 7;
4262 env->fpus = fpus & ~0x3800;
4263 for(i = 0;i < 8; i++) {
4264 env->fptags[i] = ((fptag & 3) == 3);
4265 fptag >>= 2;
4266 }
4267}
4268
4269void helper_fsave(target_ulong ptr, int data32)
4270{
4271 CPU86_LDouble tmp;
4272 int i;
4273
4274 helper_fstenv(ptr, data32);
4275
4276 ptr += (14 << data32);
4277 for(i = 0;i < 8; i++) {
4278 tmp = ST(i);
4279 helper_fstt(tmp, ptr);
4280 ptr += 10;
4281 }
4282
4283 /* fninit */
4284 env->fpus = 0;
4285 env->fpstt = 0;
4286 env->fpuc = 0x37f;
4287 env->fptags[0] = 1;
4288 env->fptags[1] = 1;
4289 env->fptags[2] = 1;
4290 env->fptags[3] = 1;
4291 env->fptags[4] = 1;
4292 env->fptags[5] = 1;
4293 env->fptags[6] = 1;
4294 env->fptags[7] = 1;
4295}
4296
4297void helper_frstor(target_ulong ptr, int data32)
4298{
4299 CPU86_LDouble tmp;
4300 int i;
4301
4302 helper_fldenv(ptr, data32);
4303 ptr += (14 << data32);
4304
4305 for(i = 0;i < 8; i++) {
4306 tmp = helper_fldt(ptr);
4307 ST(i) = tmp;
4308 ptr += 10;
4309 }
4310}
4311
4312void helper_fxsave(target_ulong ptr, int data64)
4313{
4314 int fpus, fptag, i, nb_xmm_regs;
4315 CPU86_LDouble tmp;
4316 target_ulong addr;
4317
4318 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4319 fptag = 0;
4320 for(i = 0; i < 8; i++) {
4321 fptag |= (env->fptags[i] << i);
4322 }
4323 stw(ptr, env->fpuc);
4324 stw(ptr + 2, fpus);
4325 stw(ptr + 4, fptag ^ 0xff);
4326#ifdef TARGET_X86_64
4327 if (data64) {
4328 stq(ptr + 0x08, 0); /* rip */
4329 stq(ptr + 0x10, 0); /* rdp */
4330 } else
4331#endif
4332 {
4333 stl(ptr + 0x08, 0); /* eip */
4334 stl(ptr + 0x0c, 0); /* sel */
4335 stl(ptr + 0x10, 0); /* dp */
4336 stl(ptr + 0x14, 0); /* sel */
4337 }
4338
4339 addr = ptr + 0x20;
4340 for(i = 0;i < 8; i++) {
4341 tmp = ST(i);
4342 helper_fstt(tmp, addr);
4343 addr += 16;
4344 }
4345
4346 if (env->cr[4] & CR4_OSFXSR_MASK) {
4347 /* XXX: finish it */
4348 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4349 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4350 if (env->hflags & HF_CS64_MASK)
4351 nb_xmm_regs = 16;
4352 else
4353 nb_xmm_regs = 8;
4354 addr = ptr + 0xa0;
4355 for(i = 0; i < nb_xmm_regs; i++) {
4356 stq(addr, env->xmm_regs[i].XMM_Q(0));
4357 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4358 addr += 16;
4359 }
4360 }
4361}
4362
4363void helper_fxrstor(target_ulong ptr, int data64)
4364{
4365 int i, fpus, fptag, nb_xmm_regs;
4366 CPU86_LDouble tmp;
4367 target_ulong addr;
4368
4369 env->fpuc = lduw(ptr);
4370 fpus = lduw(ptr + 2);
4371 fptag = lduw(ptr + 4);
4372 env->fpstt = (fpus >> 11) & 7;
4373 env->fpus = fpus & ~0x3800;
4374 fptag ^= 0xff;
4375 for(i = 0;i < 8; i++) {
4376 env->fptags[i] = ((fptag >> i) & 1);
4377 }
4378
4379 addr = ptr + 0x20;
4380 for(i = 0;i < 8; i++) {
4381 tmp = helper_fldt(addr);
4382 ST(i) = tmp;
4383 addr += 16;
4384 }
4385
4386 if (env->cr[4] & CR4_OSFXSR_MASK) {
4387 /* XXX: finish it */
4388 env->mxcsr = ldl(ptr + 0x18);
4389 //ldl(ptr + 0x1c);
4390 if (env->hflags & HF_CS64_MASK)
4391 nb_xmm_regs = 16;
4392 else
4393 nb_xmm_regs = 8;
4394 addr = ptr + 0xa0;
4395 for(i = 0; i < nb_xmm_regs; i++) {
4396 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4397 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4398 addr += 16;
4399 }
4400 }
4401}
4402
4403#ifndef USE_X86LDOUBLE
4404
4405void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4406{
4407 CPU86_LDoubleU temp;
4408 int e;
4409
4410 temp.d = f;
4411 /* mantissa */
4412 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4413 /* exponent + sign */
4414 e = EXPD(temp) - EXPBIAS + 16383;
4415 e |= SIGND(temp) >> 16;
4416 *pexp = e;
4417}
4418
4419CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4420{
4421 CPU86_LDoubleU temp;
4422 int e;
4423 uint64_t ll;
4424
4425 /* XXX: handle overflow ? */
4426 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4427 e |= (upper >> 4) & 0x800; /* sign */
4428 ll = (mant >> 11) & ((1LL << 52) - 1);
4429#ifdef __arm__
4430 temp.l.upper = (e << 20) | (ll >> 32);
4431 temp.l.lower = ll;
4432#else
4433 temp.ll = ll | ((uint64_t)e << 52);
4434#endif
4435 return temp.d;
4436}
4437
4438#else
4439
4440void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4441{
4442 CPU86_LDoubleU temp;
4443
4444 temp.d = f;
4445 *pmant = temp.l.lower;
4446 *pexp = temp.l.upper;
4447}
4448
4449CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4450{
4451 CPU86_LDoubleU temp;
4452
4453 temp.l.upper = upper;
4454 temp.l.lower = mant;
4455 return temp.d;
4456}
4457#endif
4458
4459#ifdef TARGET_X86_64
4460
4461//#define DEBUG_MULDIV
4462
4463static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4464{
4465 *plow += a;
4466 /* carry test */
4467 if (*plow < a)
4468 (*phigh)++;
4469 *phigh += b;
4470}
4471
4472static void neg128(uint64_t *plow, uint64_t *phigh)
4473{
4474 *plow = ~ *plow;
4475 *phigh = ~ *phigh;
4476 add128(plow, phigh, 1, 0);
4477}
4478
4479/* return TRUE if overflow */
4480static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4481{
4482 uint64_t q, r, a1, a0;
4483 int i, qb, ab;
4484
4485 a0 = *plow;
4486 a1 = *phigh;
4487 if (a1 == 0) {
4488 q = a0 / b;
4489 r = a0 % b;
4490 *plow = q;
4491 *phigh = r;
4492 } else {
4493 if (a1 >= b)
4494 return 1;
4495 /* XXX: use a better algorithm */
4496 for(i = 0; i < 64; i++) {
4497 ab = a1 >> 63;
4498 a1 = (a1 << 1) | (a0 >> 63);
4499 if (ab || a1 >= b) {
4500 a1 -= b;
4501 qb = 1;
4502 } else {
4503 qb = 0;
4504 }
4505 a0 = (a0 << 1) | qb;
4506 }
4507#if defined(DEBUG_MULDIV)
4508 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4509 *phigh, *plow, b, a0, a1);
4510#endif
4511 *plow = a0;
4512 *phigh = a1;
4513 }
4514 return 0;
4515}
4516
4517/* return TRUE if overflow */
4518static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4519{
4520 int sa, sb;
4521 sa = ((int64_t)*phigh < 0);
4522 if (sa)
4523 neg128(plow, phigh);
4524 sb = (b < 0);
4525 if (sb)
4526 b = -b;
4527 if (div64(plow, phigh, b) != 0)
4528 return 1;
4529 if (sa ^ sb) {
4530 if (*plow > (1ULL << 63))
4531 return 1;
4532 *plow = - *plow;
4533 } else {
4534 if (*plow >= (1ULL << 63))
4535 return 1;
4536 }
4537 if (sa)
4538 *phigh = - *phigh;
4539 return 0;
4540}
4541
4542void helper_mulq_EAX_T0(target_ulong t0)
4543{
4544 uint64_t r0, r1;
4545
4546 mulu64(&r0, &r1, EAX, t0);
4547 EAX = r0;
4548 EDX = r1;
4549 CC_DST = r0;
4550 CC_SRC = r1;
4551}
4552
4553void helper_imulq_EAX_T0(target_ulong t0)
4554{
4555 uint64_t r0, r1;
4556
4557 muls64(&r0, &r1, EAX, t0);
4558 EAX = r0;
4559 EDX = r1;
4560 CC_DST = r0;
4561 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4562}
4563
4564target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4565{
4566 uint64_t r0, r1;
4567
4568 muls64(&r0, &r1, t0, t1);
4569 CC_DST = r0;
4570 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4571 return r0;
4572}
4573
4574void helper_divq_EAX(target_ulong t0)
4575{
4576 uint64_t r0, r1;
4577 if (t0 == 0) {
4578 raise_exception(EXCP00_DIVZ);
4579 }
4580 r0 = EAX;
4581 r1 = EDX;
4582 if (div64(&r0, &r1, t0))
4583 raise_exception(EXCP00_DIVZ);
4584 EAX = r0;
4585 EDX = r1;
4586}
4587
4588void helper_idivq_EAX(target_ulong t0)
4589{
4590 uint64_t r0, r1;
4591 if (t0 == 0) {
4592 raise_exception(EXCP00_DIVZ);
4593 }
4594 r0 = EAX;
4595 r1 = EDX;
4596 if (idiv64(&r0, &r1, t0))
4597 raise_exception(EXCP00_DIVZ);
4598 EAX = r0;
4599 EDX = r1;
4600}
4601#endif
4602
94451178 4603static void do_hlt(void)
eaa728ee
FB
4604{
4605 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4606 env->halted = 1;
eaa728ee
FB
4607 env->exception_index = EXCP_HLT;
4608 cpu_loop_exit();
4609}
4610
94451178
FB
4611void helper_hlt(int next_eip_addend)
4612{
4613 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4614 EIP += next_eip_addend;
4615
4616 do_hlt();
4617}
4618
eaa728ee
FB
4619void helper_monitor(target_ulong ptr)
4620{
4621 if ((uint32_t)ECX != 0)
4622 raise_exception(EXCP0D_GPF);
4623 /* XXX: store address ? */
872929aa 4624 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4625}
4626
94451178 4627void helper_mwait(int next_eip_addend)
eaa728ee
FB
4628{
4629 if ((uint32_t)ECX != 0)
4630 raise_exception(EXCP0D_GPF);
872929aa 4631 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4632 EIP += next_eip_addend;
4633
eaa728ee
FB
4634 /* XXX: not complete but not completely erroneous */
4635 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4636 /* more than one CPU: do not sleep because another CPU may
4637 wake this one */
4638 } else {
94451178 4639 do_hlt();
eaa728ee
FB
4640 }
4641}
4642
4643void helper_debug(void)
4644{
4645 env->exception_index = EXCP_DEBUG;
4646 cpu_loop_exit();
4647}
4648
4649void helper_raise_interrupt(int intno, int next_eip_addend)
4650{
4651 raise_interrupt(intno, 1, 0, next_eip_addend);
4652}
4653
4654void helper_raise_exception(int exception_index)
4655{
4656 raise_exception(exception_index);
4657}
4658
4659void helper_cli(void)
4660{
4661 env->eflags &= ~IF_MASK;
4662}
4663
4664void helper_sti(void)
4665{
4666 env->eflags |= IF_MASK;
4667}
4668
4669#if 0
4670/* vm86plus instructions */
4671void helper_cli_vm(void)
4672{
4673 env->eflags &= ~VIF_MASK;
4674}
4675
4676void helper_sti_vm(void)
4677{
4678 env->eflags |= VIF_MASK;
4679 if (env->eflags & VIP_MASK) {
4680 raise_exception(EXCP0D_GPF);
4681 }
4682}
4683#endif
4684
4685void helper_set_inhibit_irq(void)
4686{
4687 env->hflags |= HF_INHIBIT_IRQ_MASK;
4688}
4689
4690void helper_reset_inhibit_irq(void)
4691{
4692 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4693}
4694
4695void helper_boundw(target_ulong a0, int v)
4696{
4697 int low, high;
4698 low = ldsw(a0);
4699 high = ldsw(a0 + 2);
4700 v = (int16_t)v;
4701 if (v < low || v > high) {
4702 raise_exception(EXCP05_BOUND);
4703 }
eaa728ee
FB
4704}
4705
4706void helper_boundl(target_ulong a0, int v)
4707{
4708 int low, high;
4709 low = ldl(a0);
4710 high = ldl(a0 + 4);
4711 if (v < low || v > high) {
4712 raise_exception(EXCP05_BOUND);
4713 }
eaa728ee
FB
4714}
4715
4716static float approx_rsqrt(float a)
4717{
4718 return 1.0 / sqrt(a);
4719}
4720
4721static float approx_rcp(float a)
4722{
4723 return 1.0 / a;
4724}
4725
4726#if !defined(CONFIG_USER_ONLY)
4727
4728#define MMUSUFFIX _mmu
4729
4730#define SHIFT 0
4731#include "softmmu_template.h"
4732
4733#define SHIFT 1
4734#include "softmmu_template.h"
4735
4736#define SHIFT 2
4737#include "softmmu_template.h"
4738
4739#define SHIFT 3
4740#include "softmmu_template.h"
4741
4742#endif
4743
d9957a8b 4744#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
4745/* try to fill the TLB and return an exception if error. If retaddr is
4746 NULL, it means that the function was called in C code (i.e. not
4747 from generated code or from helper.c) */
4748/* XXX: fix it to restore all registers */
4749void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4750{
4751 TranslationBlock *tb;
4752 int ret;
4753 unsigned long pc;
4754 CPUX86State *saved_env;
4755
4756 /* XXX: hack to restore env in all cases, even if not called from
4757 generated code */
4758 saved_env = env;
4759 env = cpu_single_env;
4760
4761 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4762 if (ret) {
4763 if (retaddr) {
4764 /* now we have a real cpu fault */
4765 pc = (unsigned long)retaddr;
4766 tb = tb_find_pc(pc);
4767 if (tb) {
4768 /* the PC is inside the translated code. It means that we have
4769 a virtual CPU fault */
4770 cpu_restore_state(tb, env, pc, NULL);
4771 }
4772 }
872929aa 4773 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4774 }
4775 env = saved_env;
4776}
d9957a8b 4777#endif
eaa728ee
FB
4778
4779/* Secure Virtual Machine helpers */
4780
eaa728ee
FB
4781#if defined(CONFIG_USER_ONLY)
4782
db620f46 4783void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4784{
4785}
4786void helper_vmmcall(void)
4787{
4788}
914178d3 4789void helper_vmload(int aflag)
eaa728ee
FB
4790{
4791}
914178d3 4792void helper_vmsave(int aflag)
eaa728ee
FB
4793{
4794}
872929aa
FB
4795void helper_stgi(void)
4796{
4797}
4798void helper_clgi(void)
4799{
4800}
eaa728ee
FB
4801void helper_skinit(void)
4802{
4803}
914178d3 4804void helper_invlpga(int aflag)
eaa728ee
FB
4805{
4806}
4807void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4808{
4809}
4810void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4811{
4812}
4813
4814void helper_svm_check_io(uint32_t port, uint32_t param,
4815 uint32_t next_eip_addend)
4816{
4817}
4818#else
4819
872929aa
FB
4820static inline void svm_save_seg(target_phys_addr_t addr,
4821 const SegmentCache *sc)
eaa728ee 4822{
872929aa
FB
4823 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4824 sc->selector);
4825 stq_phys(addr + offsetof(struct vmcb_seg, base),
4826 sc->base);
4827 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4828 sc->limit);
4829 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4830 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4831}
4832
4833static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4834{
4835 unsigned int flags;
4836
4837 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4838 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4839 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4840 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4841 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4842}
4843
872929aa
FB
4844static inline void svm_load_seg_cache(target_phys_addr_t addr,
4845 CPUState *env, int seg_reg)
eaa728ee 4846{
872929aa
FB
4847 SegmentCache sc1, *sc = &sc1;
4848 svm_load_seg(addr, sc);
4849 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4850 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4851}
4852
db620f46 4853void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4854{
4855 target_ulong addr;
4856 uint32_t event_inj;
4857 uint32_t int_ctl;
4858
872929aa
FB
4859 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4860
914178d3
FB
4861 if (aflag == 2)
4862 addr = EAX;
4863 else
4864 addr = (uint32_t)EAX;
4865
93fcfe39 4866 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
4867
4868 env->vm_vmcb = addr;
4869
4870 /* save the current CPU state in the hsave page */
4871 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4872 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4873
4874 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4875 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4876
4877 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4878 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4879 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4880 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4881 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4882 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4883
4884 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4885 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4886
872929aa
FB
4887 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4888 &env->segs[R_ES]);
4889 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4890 &env->segs[R_CS]);
4891 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4892 &env->segs[R_SS]);
4893 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4894 &env->segs[R_DS]);
eaa728ee 4895
db620f46
FB
4896 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4897 EIP + next_eip_addend);
eaa728ee
FB
4898 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4899 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4900
4901 /* load the interception bitmaps so we do not need to access the
4902 vmcb in svm mode */
872929aa 4903 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
4904 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4905 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4906 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4907 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4908 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4909
872929aa
FB
4910 /* enable intercepts */
4911 env->hflags |= HF_SVMI_MASK;
4912
33c263df
FB
4913 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4914
eaa728ee
FB
4915 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4916 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4917
4918 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4919 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4920
4921 /* clear exit_info_2 so we behave like the real hardware */
4922 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4923
4924 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4925 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4926 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4927 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4928 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 4929 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 4930 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
4931 env->v_tpr = int_ctl & V_TPR_MASK;
4932 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 4933 if (env->eflags & IF_MASK)
db620f46 4934 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
4935 }
4936
5efc27bb
FB
4937 cpu_load_efer(env,
4938 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
4939 env->eflags = 0;
4940 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4941 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4942 CC_OP = CC_OP_EFLAGS;
eaa728ee 4943
872929aa
FB
4944 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4945 env, R_ES);
4946 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4947 env, R_CS);
4948 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4949 env, R_SS);
4950 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4951 env, R_DS);
eaa728ee
FB
4952
4953 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4954 env->eip = EIP;
4955 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4956 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4957 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4958 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4959 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4960
4961 /* FIXME: guest state consistency checks */
4962
4963 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4964 case TLB_CONTROL_DO_NOTHING:
4965 break;
4966 case TLB_CONTROL_FLUSH_ALL_ASID:
4967 /* FIXME: this is not 100% correct but should work for now */
4968 tlb_flush(env, 1);
4969 break;
4970 }
4971
960540b4 4972 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 4973
db620f46
FB
4974 if (int_ctl & V_IRQ_MASK) {
4975 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4976 }
4977
eaa728ee
FB
4978 /* maybe we need to inject an event */
4979 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4980 if (event_inj & SVM_EVTINJ_VALID) {
4981 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4982 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4983 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4984 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4985
93fcfe39 4986 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
4987 /* FIXME: need to implement valid_err */
4988 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4989 case SVM_EVTINJ_TYPE_INTR:
4990 env->exception_index = vector;
4991 env->error_code = event_inj_err;
4992 env->exception_is_int = 0;
4993 env->exception_next_eip = -1;
93fcfe39 4994 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
db620f46
FB
4995 /* XXX: is it always correct ? */
4996 do_interrupt(vector, 0, 0, 0, 1);
eaa728ee
FB
4997 break;
4998 case SVM_EVTINJ_TYPE_NMI:
db620f46 4999 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5000 env->error_code = event_inj_err;
5001 env->exception_is_int = 0;
5002 env->exception_next_eip = EIP;
93fcfe39 5003 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
db620f46 5004 cpu_loop_exit();
eaa728ee
FB
5005 break;
5006 case SVM_EVTINJ_TYPE_EXEPT:
5007 env->exception_index = vector;
5008 env->error_code = event_inj_err;
5009 env->exception_is_int = 0;
5010 env->exception_next_eip = -1;
93fcfe39 5011 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
db620f46 5012 cpu_loop_exit();
eaa728ee
FB
5013 break;
5014 case SVM_EVTINJ_TYPE_SOFT:
5015 env->exception_index = vector;
5016 env->error_code = event_inj_err;
5017 env->exception_is_int = 1;
5018 env->exception_next_eip = EIP;
93fcfe39 5019 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
db620f46 5020 cpu_loop_exit();
eaa728ee
FB
5021 break;
5022 }
93fcfe39 5023 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
eaa728ee 5024 }
eaa728ee
FB
5025}
5026
5027void helper_vmmcall(void)
5028{
872929aa
FB
5029 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5030 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5031}
5032
914178d3 5033void helper_vmload(int aflag)
eaa728ee
FB
5034{
5035 target_ulong addr;
872929aa
FB
5036 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5037
914178d3
FB
5038 if (aflag == 2)
5039 addr = EAX;
5040 else
5041 addr = (uint32_t)EAX;
5042
93fcfe39 5043 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5044 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5045 env->segs[R_FS].base);
5046
872929aa
FB
5047 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5048 env, R_FS);
5049 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5050 env, R_GS);
5051 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5052 &env->tr);
5053 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5054 &env->ldt);
eaa728ee
FB
5055
5056#ifdef TARGET_X86_64
5057 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5058 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5059 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5060 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5061#endif
5062 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5063 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5064 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5065 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5066}
5067
914178d3 5068void helper_vmsave(int aflag)
eaa728ee
FB
5069{
5070 target_ulong addr;
872929aa 5071 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5072
5073 if (aflag == 2)
5074 addr = EAX;
5075 else
5076 addr = (uint32_t)EAX;
5077
93fcfe39 5078 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5079 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5080 env->segs[R_FS].base);
5081
872929aa
FB
5082 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5083 &env->segs[R_FS]);
5084 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5085 &env->segs[R_GS]);
5086 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5087 &env->tr);
5088 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5089 &env->ldt);
eaa728ee
FB
5090
5091#ifdef TARGET_X86_64
5092 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5093 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5094 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5095 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5096#endif
5097 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5098 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5099 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5100 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5101}
5102
872929aa
FB
5103void helper_stgi(void)
5104{
5105 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5106 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5107}
5108
5109void helper_clgi(void)
5110{
5111 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5112 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5113}
5114
eaa728ee
FB
5115void helper_skinit(void)
5116{
872929aa
FB
5117 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5118 /* XXX: not implemented */
872929aa 5119 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5120}
5121
914178d3 5122void helper_invlpga(int aflag)
eaa728ee 5123{
914178d3 5124 target_ulong addr;
872929aa 5125 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5126
5127 if (aflag == 2)
5128 addr = EAX;
5129 else
5130 addr = (uint32_t)EAX;
5131
5132 /* XXX: could use the ASID to see if it is needed to do the
5133 flush */
5134 tlb_flush_page(env, addr);
eaa728ee
FB
5135}
5136
5137void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5138{
872929aa
FB
5139 if (likely(!(env->hflags & HF_SVMI_MASK)))
5140 return;
eaa728ee
FB
5141 switch(type) {
5142 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5143 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5144 helper_vmexit(type, param);
5145 }
5146 break;
872929aa
FB
5147 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5148 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5149 helper_vmexit(type, param);
5150 }
5151 break;
872929aa
FB
5152 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5153 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5154 helper_vmexit(type, param);
5155 }
5156 break;
872929aa
FB
5157 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5158 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5159 helper_vmexit(type, param);
5160 }
5161 break;
872929aa
FB
5162 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5163 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5164 helper_vmexit(type, param);
5165 }
5166 break;
eaa728ee 5167 case SVM_EXIT_MSR:
872929aa 5168 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5169 /* FIXME: this should be read in at vmrun (faster this way?) */
5170 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5171 uint32_t t0, t1;
5172 switch((uint32_t)ECX) {
5173 case 0 ... 0x1fff:
5174 t0 = (ECX * 2) % 8;
5175 t1 = ECX / 8;
5176 break;
5177 case 0xc0000000 ... 0xc0001fff:
5178 t0 = (8192 + ECX - 0xc0000000) * 2;
5179 t1 = (t0 / 8);
5180 t0 %= 8;
5181 break;
5182 case 0xc0010000 ... 0xc0011fff:
5183 t0 = (16384 + ECX - 0xc0010000) * 2;
5184 t1 = (t0 / 8);
5185 t0 %= 8;
5186 break;
5187 default:
5188 helper_vmexit(type, param);
5189 t0 = 0;
5190 t1 = 0;
5191 break;
5192 }
5193 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5194 helper_vmexit(type, param);
5195 }
5196 break;
5197 default:
872929aa 5198 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5199 helper_vmexit(type, param);
5200 }
5201 break;
5202 }
5203}
5204
5205void helper_svm_check_io(uint32_t port, uint32_t param,
5206 uint32_t next_eip_addend)
5207{
872929aa 5208 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5209 /* FIXME: this should be read in at vmrun (faster this way?) */
5210 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5211 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5212 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5213 /* next EIP */
5214 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5215 env->eip + next_eip_addend);
5216 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5217 }
5218 }
5219}
5220
5221/* Note: currently only 32 bits of exit_code are used */
5222void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5223{
5224 uint32_t int_ctl;
5225
93fcfe39 5226 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
eaa728ee
FB
5227 exit_code, exit_info_1,
5228 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5229 EIP);
5230
5231 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5232 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5233 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5234 } else {
5235 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5236 }
5237
5238 /* Save the VM state in the vmcb */
872929aa
FB
5239 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5240 &env->segs[R_ES]);
5241 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5242 &env->segs[R_CS]);
5243 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5244 &env->segs[R_SS]);
5245 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5246 &env->segs[R_DS]);
eaa728ee
FB
5247
5248 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5249 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5250
5251 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5252 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5253
5254 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5255 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5256 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5257 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5258 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5259
db620f46
FB
5260 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5261 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5262 int_ctl |= env->v_tpr & V_TPR_MASK;
5263 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5264 int_ctl |= V_IRQ_MASK;
5265 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5266
5267 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5268 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5269 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5270 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5271 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5272 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5273 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5274
5275 /* Reload the host state from vm_hsave */
db620f46 5276 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5277 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5278 env->intercept = 0;
5279 env->intercept_exceptions = 0;
5280 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5281 env->tsc_offset = 0;
eaa728ee
FB
5282
5283 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5284 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5285
5286 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5287 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5288
5289 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5290 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5291 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5292 /* we need to set the efer after the crs so the hidden flags get
5293 set properly */
5efc27bb
FB
5294 cpu_load_efer(env,
5295 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5296 env->eflags = 0;
5297 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5298 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5299 CC_OP = CC_OP_EFLAGS;
5300
872929aa
FB
5301 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5302 env, R_ES);
5303 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5304 env, R_CS);
5305 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5306 env, R_SS);
5307 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5308 env, R_DS);
eaa728ee
FB
5309
5310 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5311 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5312 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5313
5314 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5315 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5316
5317 /* other setups */
5318 cpu_x86_set_cpl(env, 0);
5319 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5320 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5321
960540b4 5322 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5323 /* FIXME: Resets the current ASID register to zero (host ASID). */
5324
5325 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5326
5327 /* Clears the TSC_OFFSET inside the processor. */
5328
5329 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5330 from the page table indicated the host's CR3. If the PDPEs contain
5331 illegal state, the processor causes a shutdown. */
5332
5333 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5334 env->cr[0] |= CR0_PE_MASK;
5335 env->eflags &= ~VM_MASK;
5336
5337 /* Disables all breakpoints in the host DR7 register. */
5338
5339 /* Checks the reloaded host state for consistency. */
5340
5341 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5342 host's code segment or non-canonical (in the case of long mode), a
5343 #GP fault is delivered inside the host.) */
5344
5345 /* remove any pending exception */
5346 env->exception_index = -1;
5347 env->error_code = 0;
5348 env->old_exception = -1;
5349
5350 cpu_loop_exit();
5351}
5352
5353#endif
5354
5355/* MMX/SSE */
5356/* XXX: optimize by storing fptt and fptags in the static cpu state */
5357void helper_enter_mmx(void)
5358{
5359 env->fpstt = 0;
5360 *(uint32_t *)(env->fptags) = 0;
5361 *(uint32_t *)(env->fptags + 4) = 0;
5362}
5363
5364void helper_emms(void)
5365{
5366 /* set to empty state */
5367 *(uint32_t *)(env->fptags) = 0x01010101;
5368 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5369}
5370
5371/* XXX: suppress */
a7812ae4 5372void helper_movq(void *d, void *s)
eaa728ee 5373{
a7812ae4 5374 *(uint64_t *)d = *(uint64_t *)s;
eaa728ee
FB
5375}
5376
5377#define SHIFT 0
5378#include "ops_sse.h"
5379
5380#define SHIFT 1
5381#include "ops_sse.h"
5382
5383#define SHIFT 0
5384#include "helper_template.h"
5385#undef SHIFT
5386
5387#define SHIFT 1
5388#include "helper_template.h"
5389#undef SHIFT
5390
5391#define SHIFT 2
5392#include "helper_template.h"
5393#undef SHIFT
5394
5395#ifdef TARGET_X86_64
5396
5397#define SHIFT 3
5398#include "helper_template.h"
5399#undef SHIFT
5400
5401#endif
5402
5403/* bit operations */
5404target_ulong helper_bsf(target_ulong t0)
5405{
5406 int count;
5407 target_ulong res;
5408
5409 res = t0;
5410 count = 0;
5411 while ((res & 1) == 0) {
5412 count++;
5413 res >>= 1;
5414 }
5415 return count;
5416}
5417
5418target_ulong helper_bsr(target_ulong t0)
5419{
5420 int count;
5421 target_ulong res, mask;
5422
5423 res = t0;
5424 count = TARGET_LONG_BITS - 1;
5425 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5426 while ((res & mask) == 0) {
5427 count--;
5428 res <<= 1;
5429 }
5430 return count;
5431}
5432
5433
5434static int compute_all_eflags(void)
5435{
5436 return CC_SRC;
5437}
5438
5439static int compute_c_eflags(void)
5440{
5441 return CC_SRC & CC_C;
5442}
5443
a7812ae4
PB
5444uint32_t helper_cc_compute_all(int op)
5445{
5446 switch (op) {
5447 default: /* should never happen */ return 0;
eaa728ee 5448
a7812ae4 5449 case CC_OP_EFLAGS: return compute_all_eflags();
eaa728ee 5450
a7812ae4
PB
5451 case CC_OP_MULB: return compute_all_mulb();
5452 case CC_OP_MULW: return compute_all_mulw();
5453 case CC_OP_MULL: return compute_all_mull();
eaa728ee 5454
a7812ae4
PB
5455 case CC_OP_ADDB: return compute_all_addb();
5456 case CC_OP_ADDW: return compute_all_addw();
5457 case CC_OP_ADDL: return compute_all_addl();
eaa728ee 5458
a7812ae4
PB
5459 case CC_OP_ADCB: return compute_all_adcb();
5460 case CC_OP_ADCW: return compute_all_adcw();
5461 case CC_OP_ADCL: return compute_all_adcl();
eaa728ee 5462
a7812ae4
PB
5463 case CC_OP_SUBB: return compute_all_subb();
5464 case CC_OP_SUBW: return compute_all_subw();
5465 case CC_OP_SUBL: return compute_all_subl();
eaa728ee 5466
a7812ae4
PB
5467 case CC_OP_SBBB: return compute_all_sbbb();
5468 case CC_OP_SBBW: return compute_all_sbbw();
5469 case CC_OP_SBBL: return compute_all_sbbl();
eaa728ee 5470
a7812ae4
PB
5471 case CC_OP_LOGICB: return compute_all_logicb();
5472 case CC_OP_LOGICW: return compute_all_logicw();
5473 case CC_OP_LOGICL: return compute_all_logicl();
eaa728ee 5474
a7812ae4
PB
5475 case CC_OP_INCB: return compute_all_incb();
5476 case CC_OP_INCW: return compute_all_incw();
5477 case CC_OP_INCL: return compute_all_incl();
eaa728ee 5478
a7812ae4
PB
5479 case CC_OP_DECB: return compute_all_decb();
5480 case CC_OP_DECW: return compute_all_decw();
5481 case CC_OP_DECL: return compute_all_decl();
eaa728ee 5482
a7812ae4
PB
5483 case CC_OP_SHLB: return compute_all_shlb();
5484 case CC_OP_SHLW: return compute_all_shlw();
5485 case CC_OP_SHLL: return compute_all_shll();
eaa728ee 5486
a7812ae4
PB
5487 case CC_OP_SARB: return compute_all_sarb();
5488 case CC_OP_SARW: return compute_all_sarw();
5489 case CC_OP_SARL: return compute_all_sarl();
eaa728ee
FB
5490
5491#ifdef TARGET_X86_64
a7812ae4 5492 case CC_OP_MULQ: return compute_all_mulq();
eaa728ee 5493
a7812ae4 5494 case CC_OP_ADDQ: return compute_all_addq();
eaa728ee 5495
a7812ae4 5496 case CC_OP_ADCQ: return compute_all_adcq();
eaa728ee 5497
a7812ae4 5498 case CC_OP_SUBQ: return compute_all_subq();
eaa728ee 5499
a7812ae4 5500 case CC_OP_SBBQ: return compute_all_sbbq();
eaa728ee 5501
a7812ae4 5502 case CC_OP_LOGICQ: return compute_all_logicq();
eaa728ee 5503
a7812ae4 5504 case CC_OP_INCQ: return compute_all_incq();
eaa728ee 5505
a7812ae4 5506 case CC_OP_DECQ: return compute_all_decq();
eaa728ee 5507
a7812ae4 5508 case CC_OP_SHLQ: return compute_all_shlq();
eaa728ee 5509
a7812ae4 5510 case CC_OP_SARQ: return compute_all_sarq();
eaa728ee 5511#endif
a7812ae4
PB
5512 }
5513}
5514
5515uint32_t helper_cc_compute_c(int op)
5516{
5517 switch (op) {
5518 default: /* should never happen */ return 0;
5519
5520 case CC_OP_EFLAGS: return compute_c_eflags();
5521
5522 case CC_OP_MULB: return compute_c_mull();
5523 case CC_OP_MULW: return compute_c_mull();
5524 case CC_OP_MULL: return compute_c_mull();
5525
5526 case CC_OP_ADDB: return compute_c_addb();
5527 case CC_OP_ADDW: return compute_c_addw();
5528 case CC_OP_ADDL: return compute_c_addl();
5529
5530 case CC_OP_ADCB: return compute_c_adcb();
5531 case CC_OP_ADCW: return compute_c_adcw();
5532 case CC_OP_ADCL: return compute_c_adcl();
5533
5534 case CC_OP_SUBB: return compute_c_subb();
5535 case CC_OP_SUBW: return compute_c_subw();
5536 case CC_OP_SUBL: return compute_c_subl();
5537
5538 case CC_OP_SBBB: return compute_c_sbbb();
5539 case CC_OP_SBBW: return compute_c_sbbw();
5540 case CC_OP_SBBL: return compute_c_sbbl();
5541
5542 case CC_OP_LOGICB: return compute_c_logicb();
5543 case CC_OP_LOGICW: return compute_c_logicw();
5544 case CC_OP_LOGICL: return compute_c_logicl();
5545
5546 case CC_OP_INCB: return compute_c_incl();
5547 case CC_OP_INCW: return compute_c_incl();
5548 case CC_OP_INCL: return compute_c_incl();
5549
5550 case CC_OP_DECB: return compute_c_incl();
5551 case CC_OP_DECW: return compute_c_incl();
5552 case CC_OP_DECL: return compute_c_incl();
eaa728ee 5553
a7812ae4
PB
5554 case CC_OP_SHLB: return compute_c_shlb();
5555 case CC_OP_SHLW: return compute_c_shlw();
5556 case CC_OP_SHLL: return compute_c_shll();
5557
5558 case CC_OP_SARB: return compute_c_sarl();
5559 case CC_OP_SARW: return compute_c_sarl();
5560 case CC_OP_SARL: return compute_c_sarl();
5561
5562#ifdef TARGET_X86_64
5563 case CC_OP_MULQ: return compute_c_mull();
5564
5565 case CC_OP_ADDQ: return compute_c_addq();
5566
5567 case CC_OP_ADCQ: return compute_c_adcq();
5568
5569 case CC_OP_SUBQ: return compute_c_subq();
5570
5571 case CC_OP_SBBQ: return compute_c_sbbq();
5572
5573 case CC_OP_LOGICQ: return compute_c_logicq();
5574
5575 case CC_OP_INCQ: return compute_c_incl();
5576
5577 case CC_OP_DECQ: return compute_c_incl();
5578
5579 case CC_OP_SHLQ: return compute_c_shlq();
5580
5581 case CC_OP_SARQ: return compute_c_sarl();
5582#endif
5583 }
5584}