]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
Add release tag for 0.10.1 release
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
eaa728ee
FB
19 */
20#define CPU_NO_GLOBAL_REGS
21#include "exec.h"
d9957a8b 22#include "exec-all.h"
eaa728ee
FB
23#include "host-utils.h"
24
25//#define DEBUG_PCALL
26
d12d51d5
AL
27
28#ifdef DEBUG_PCALL
93fcfe39
AL
29# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30# define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5
AL
32#else
33# define LOG_PCALL(...) do { } while (0)
34# define LOG_PCALL_STATE(env) do { } while (0)
35#endif
36
37
eaa728ee
FB
38#if 0
39#define raise_exception_err(a, b)\
40do {\
93fcfe39 41 qemu_log("raise_exception line=%d\n", __LINE__);\
eaa728ee
FB
42 (raise_exception_err)(a, b);\
43} while (0)
44#endif
45
d9957a8b 46static const uint8_t parity_table[256] = {
eaa728ee
FB
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79};
80
81/* modulo 17 table */
d9957a8b 82static const uint8_t rclw_table[32] = {
eaa728ee
FB
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
87};
88
89/* modulo 9 table */
d9957a8b 90static const uint8_t rclb_table[32] = {
eaa728ee
FB
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
95};
96
d9957a8b 97static const CPU86_LDouble f15rk[7] =
eaa728ee
FB
98{
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
106};
107
108/* broken thread support */
109
b1d8e52e 110static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
111
112void helper_lock(void)
113{
114 spin_lock(&global_cpu_lock);
115}
116
117void helper_unlock(void)
118{
119 spin_unlock(&global_cpu_lock);
120}
121
122void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123{
124 load_eflags(t0, update_mask);
125}
126
127target_ulong helper_read_eflags(void)
128{
129 uint32_t eflags;
a7812ae4 130 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
131 eflags |= (DF & DF_MASK);
132 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133 return eflags;
134}
135
136/* return non zero if error */
137static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138 int selector)
139{
140 SegmentCache *dt;
141 int index;
142 target_ulong ptr;
143
144 if (selector & 0x4)
145 dt = &env->ldt;
146 else
147 dt = &env->gdt;
148 index = selector & ~7;
149 if ((index + 7) > dt->limit)
150 return -1;
151 ptr = dt->base + index;
152 *e1_ptr = ldl_kernel(ptr);
153 *e2_ptr = ldl_kernel(ptr + 4);
154 return 0;
155}
156
157static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158{
159 unsigned int limit;
160 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161 if (e2 & DESC_G_MASK)
162 limit = (limit << 12) | 0xfff;
163 return limit;
164}
165
166static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167{
168 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169}
170
171static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172{
173 sc->base = get_seg_base(e1, e2);
174 sc->limit = get_seg_limit(e1, e2);
175 sc->flags = e2;
176}
177
178/* init the segment cache in vm86 mode. */
179static inline void load_seg_vm(int seg, int selector)
180{
181 selector &= 0xffff;
182 cpu_x86_load_seg_cache(env, seg, selector,
183 (selector << 4), 0xffff, 0);
184}
185
186static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187 uint32_t *esp_ptr, int dpl)
188{
189 int type, index, shift;
190
191#if 0
192 {
193 int i;
194 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195 for(i=0;i<env->tr.limit;i++) {
196 printf("%02x ", env->tr.base[i]);
197 if ((i & 7) == 7) printf("\n");
198 }
199 printf("\n");
200 }
201#endif
202
203 if (!(env->tr.flags & DESC_P_MASK))
204 cpu_abort(env, "invalid tss");
205 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206 if ((type & 7) != 1)
207 cpu_abort(env, "invalid tss type");
208 shift = type >> 3;
209 index = (dpl * 4 + 2) << shift;
210 if (index + (4 << shift) - 1 > env->tr.limit)
211 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212 if (shift == 0) {
213 *esp_ptr = lduw_kernel(env->tr.base + index);
214 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215 } else {
216 *esp_ptr = ldl_kernel(env->tr.base + index);
217 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218 }
219}
220
221/* XXX: merge with load_seg() */
222static void tss_load_seg(int seg_reg, int selector)
223{
224 uint32_t e1, e2;
225 int rpl, dpl, cpl;
226
227 if ((selector & 0xfffc) != 0) {
228 if (load_segment(&e1, &e2, selector) != 0)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if (!(e2 & DESC_S_MASK))
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 rpl = selector & 3;
233 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234 cpl = env->hflags & HF_CPL_MASK;
235 if (seg_reg == R_CS) {
236 if (!(e2 & DESC_CS_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 /* XXX: is it correct ? */
239 if (dpl != rpl)
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if ((e2 & DESC_C_MASK) && dpl > rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 } else if (seg_reg == R_SS) {
244 /* SS must be writable data */
245 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if (dpl != cpl || dpl != rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 } else {
250 /* not readable code */
251 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255 if (dpl < cpl || dpl < rpl)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257 }
258 }
259 if (!(e2 & DESC_P_MASK))
260 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261 cpu_x86_load_seg_cache(env, seg_reg, selector,
262 get_seg_base(e1, e2),
263 get_seg_limit(e1, e2),
264 e2);
265 } else {
266 if (seg_reg == R_SS || seg_reg == R_CS)
267 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268 }
269}
270
271#define SWITCH_TSS_JMP 0
272#define SWITCH_TSS_IRET 1
273#define SWITCH_TSS_CALL 2
274
275/* XXX: restore CPU state in registers (PowerPC case) */
276static void switch_tss(int tss_selector,
277 uint32_t e1, uint32_t e2, int source,
278 uint32_t next_eip)
279{
280 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281 target_ulong tss_base;
282 uint32_t new_regs[8], new_segs[6];
283 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284 uint32_t old_eflags, eflags_mask;
285 SegmentCache *dt;
286 int index;
287 target_ulong ptr;
288
289 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
d12d51d5 290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
eaa728ee
FB
291
292 /* if task gate, we read the TSS segment and we load it */
293 if (type == 5) {
294 if (!(e2 & DESC_P_MASK))
295 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296 tss_selector = e1 >> 16;
297 if (tss_selector & 4)
298 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299 if (load_segment(&e1, &e2, tss_selector) != 0)
300 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301 if (e2 & DESC_S_MASK)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304 if ((type & 7) != 1)
305 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306 }
307
308 if (!(e2 & DESC_P_MASK))
309 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310
311 if (type & 8)
312 tss_limit_max = 103;
313 else
314 tss_limit_max = 43;
315 tss_limit = get_seg_limit(e1, e2);
316 tss_base = get_seg_base(e1, e2);
317 if ((tss_selector & 4) != 0 ||
318 tss_limit < tss_limit_max)
319 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321 if (old_type & 8)
322 old_tss_limit_max = 103;
323 else
324 old_tss_limit_max = 43;
325
326 /* read all the registers from the new TSS */
327 if (type & 8) {
328 /* 32 bit */
329 new_cr3 = ldl_kernel(tss_base + 0x1c);
330 new_eip = ldl_kernel(tss_base + 0x20);
331 new_eflags = ldl_kernel(tss_base + 0x24);
332 for(i = 0; i < 8; i++)
333 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334 for(i = 0; i < 6; i++)
335 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336 new_ldt = lduw_kernel(tss_base + 0x60);
337 new_trap = ldl_kernel(tss_base + 0x64);
338 } else {
339 /* 16 bit */
340 new_cr3 = 0;
341 new_eip = lduw_kernel(tss_base + 0x0e);
342 new_eflags = lduw_kernel(tss_base + 0x10);
343 for(i = 0; i < 8; i++)
344 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345 for(i = 0; i < 4; i++)
346 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347 new_ldt = lduw_kernel(tss_base + 0x2a);
348 new_segs[R_FS] = 0;
349 new_segs[R_GS] = 0;
350 new_trap = 0;
351 }
352
353 /* NOTE: we must avoid memory exceptions during the task switch,
354 so we make dummy accesses before */
355 /* XXX: it can still fail in some cases, so a bigger hack is
356 necessary to valid the TLB after having done the accesses */
357
358 v1 = ldub_kernel(env->tr.base);
359 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
360 stb_kernel(env->tr.base, v1);
361 stb_kernel(env->tr.base + old_tss_limit_max, v2);
362
363 /* clear busy bit (it is restartable) */
364 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
365 target_ulong ptr;
366 uint32_t e2;
367 ptr = env->gdt.base + (env->tr.selector & ~7);
368 e2 = ldl_kernel(ptr + 4);
369 e2 &= ~DESC_TSS_BUSY_MASK;
370 stl_kernel(ptr + 4, e2);
371 }
372 old_eflags = compute_eflags();
373 if (source == SWITCH_TSS_IRET)
374 old_eflags &= ~NT_MASK;
375
376 /* save the current state in the old TSS */
377 if (type & 8) {
378 /* 32 bit */
379 stl_kernel(env->tr.base + 0x20, next_eip);
380 stl_kernel(env->tr.base + 0x24, old_eflags);
381 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
382 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
383 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
384 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
385 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
386 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
387 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
388 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
389 for(i = 0; i < 6; i++)
390 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391 } else {
392 /* 16 bit */
393 stw_kernel(env->tr.base + 0x0e, next_eip);
394 stw_kernel(env->tr.base + 0x10, old_eflags);
395 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
396 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
397 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
398 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
399 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
400 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
401 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
402 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
403 for(i = 0; i < 4; i++)
404 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
405 }
406
407 /* now if an exception occurs, it will occurs in the next task
408 context */
409
410 if (source == SWITCH_TSS_CALL) {
411 stw_kernel(tss_base, env->tr.selector);
412 new_eflags |= NT_MASK;
413 }
414
415 /* set busy bit */
416 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
417 target_ulong ptr;
418 uint32_t e2;
419 ptr = env->gdt.base + (tss_selector & ~7);
420 e2 = ldl_kernel(ptr + 4);
421 e2 |= DESC_TSS_BUSY_MASK;
422 stl_kernel(ptr + 4, e2);
423 }
424
425 /* set the new CPU state */
426 /* from this point, any exception which occurs can give problems */
427 env->cr[0] |= CR0_TS_MASK;
428 env->hflags |= HF_TS_MASK;
429 env->tr.selector = tss_selector;
430 env->tr.base = tss_base;
431 env->tr.limit = tss_limit;
432 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
433
434 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
435 cpu_x86_update_cr3(env, new_cr3);
436 }
437
438 /* load all registers without an exception, then reload them with
439 possible exception */
440 env->eip = new_eip;
441 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
442 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
443 if (!(type & 8))
444 eflags_mask &= 0xffff;
445 load_eflags(new_eflags, eflags_mask);
446 /* XXX: what to do in 16 bit case ? */
447 EAX = new_regs[0];
448 ECX = new_regs[1];
449 EDX = new_regs[2];
450 EBX = new_regs[3];
451 ESP = new_regs[4];
452 EBP = new_regs[5];
453 ESI = new_regs[6];
454 EDI = new_regs[7];
455 if (new_eflags & VM_MASK) {
456 for(i = 0; i < 6; i++)
457 load_seg_vm(i, new_segs[i]);
458 /* in vm86, CPL is always 3 */
459 cpu_x86_set_cpl(env, 3);
460 } else {
461 /* CPL is set the RPL of CS */
462 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
463 /* first just selectors as the rest may trigger exceptions */
464 for(i = 0; i < 6; i++)
465 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
466 }
467
468 env->ldt.selector = new_ldt & ~4;
469 env->ldt.base = 0;
470 env->ldt.limit = 0;
471 env->ldt.flags = 0;
472
473 /* load the LDT */
474 if (new_ldt & 4)
475 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476
477 if ((new_ldt & 0xfffc) != 0) {
478 dt = &env->gdt;
479 index = new_ldt & ~7;
480 if ((index + 7) > dt->limit)
481 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482 ptr = dt->base + index;
483 e1 = ldl_kernel(ptr);
484 e2 = ldl_kernel(ptr + 4);
485 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 if (!(e2 & DESC_P_MASK))
488 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489 load_seg_cache_raw_dt(&env->ldt, e1, e2);
490 }
491
492 /* load the segments */
493 if (!(new_eflags & VM_MASK)) {
494 tss_load_seg(R_CS, new_segs[R_CS]);
495 tss_load_seg(R_SS, new_segs[R_SS]);
496 tss_load_seg(R_ES, new_segs[R_ES]);
497 tss_load_seg(R_DS, new_segs[R_DS]);
498 tss_load_seg(R_FS, new_segs[R_FS]);
499 tss_load_seg(R_GS, new_segs[R_GS]);
500 }
501
502 /* check that EIP is in the CS segment limits */
503 if (new_eip > env->segs[R_CS].limit) {
504 /* XXX: different exception if CALL ? */
505 raise_exception_err(EXCP0D_GPF, 0);
506 }
01df040b
AL
507
508#ifndef CONFIG_USER_ONLY
509 /* reset local breakpoints */
510 if (env->dr[7] & 0x55) {
511 for (i = 0; i < 4; i++) {
512 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
513 hw_breakpoint_remove(env, i);
514 }
515 env->dr[7] &= ~0x55;
516 }
517#endif
eaa728ee
FB
518}
519
520/* check if Port I/O is allowed in TSS */
521static inline void check_io(int addr, int size)
522{
523 int io_offset, val, mask;
524
525 /* TSS must be a valid 32 bit one */
526 if (!(env->tr.flags & DESC_P_MASK) ||
527 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
528 env->tr.limit < 103)
529 goto fail;
530 io_offset = lduw_kernel(env->tr.base + 0x66);
531 io_offset += (addr >> 3);
532 /* Note: the check needs two bytes */
533 if ((io_offset + 1) > env->tr.limit)
534 goto fail;
535 val = lduw_kernel(env->tr.base + io_offset);
536 val >>= (addr & 7);
537 mask = (1 << size) - 1;
538 /* all bits must be zero to allow the I/O */
539 if ((val & mask) != 0) {
540 fail:
541 raise_exception_err(EXCP0D_GPF, 0);
542 }
543}
544
545void helper_check_iob(uint32_t t0)
546{
547 check_io(t0, 1);
548}
549
550void helper_check_iow(uint32_t t0)
551{
552 check_io(t0, 2);
553}
554
555void helper_check_iol(uint32_t t0)
556{
557 check_io(t0, 4);
558}
559
560void helper_outb(uint32_t port, uint32_t data)
561{
562 cpu_outb(env, port, data & 0xff);
563}
564
565target_ulong helper_inb(uint32_t port)
566{
567 return cpu_inb(env, port);
568}
569
570void helper_outw(uint32_t port, uint32_t data)
571{
572 cpu_outw(env, port, data & 0xffff);
573}
574
575target_ulong helper_inw(uint32_t port)
576{
577 return cpu_inw(env, port);
578}
579
580void helper_outl(uint32_t port, uint32_t data)
581{
582 cpu_outl(env, port, data);
583}
584
585target_ulong helper_inl(uint32_t port)
586{
587 return cpu_inl(env, port);
588}
589
590static inline unsigned int get_sp_mask(unsigned int e2)
591{
592 if (e2 & DESC_B_MASK)
593 return 0xffffffff;
594 else
595 return 0xffff;
596}
597
598#ifdef TARGET_X86_64
599#define SET_ESP(val, sp_mask)\
600do {\
601 if ((sp_mask) == 0xffff)\
602 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
603 else if ((sp_mask) == 0xffffffffLL)\
604 ESP = (uint32_t)(val);\
605 else\
606 ESP = (val);\
607} while (0)
608#else
609#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
610#endif
611
c0a04f0e
AL
612/* in 64-bit machines, this can overflow. So this segment addition macro
613 * can be used to trim the value to 32-bit whenever needed */
614#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
615
eaa728ee
FB
616/* XXX: add a is_user flag to have proper security support */
617#define PUSHW(ssp, sp, sp_mask, val)\
618{\
619 sp -= 2;\
620 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
621}
622
623#define PUSHL(ssp, sp, sp_mask, val)\
624{\
625 sp -= 4;\
c0a04f0e 626 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
627}
628
629#define POPW(ssp, sp, sp_mask, val)\
630{\
631 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
632 sp += 2;\
633}
634
635#define POPL(ssp, sp, sp_mask, val)\
636{\
c0a04f0e 637 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
638 sp += 4;\
639}
640
641/* protected mode interrupt */
642static void do_interrupt_protected(int intno, int is_int, int error_code,
643 unsigned int next_eip, int is_hw)
644{
645 SegmentCache *dt;
646 target_ulong ptr, ssp;
647 int type, dpl, selector, ss_dpl, cpl;
648 int has_error_code, new_stack, shift;
1c918eba 649 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 650 uint32_t old_eip, sp_mask;
eaa728ee 651
eaa728ee
FB
652 has_error_code = 0;
653 if (!is_int && !is_hw) {
654 switch(intno) {
655 case 8:
656 case 10:
657 case 11:
658 case 12:
659 case 13:
660 case 14:
661 case 17:
662 has_error_code = 1;
663 break;
664 }
665 }
666 if (is_int)
667 old_eip = next_eip;
668 else
669 old_eip = env->eip;
670
671 dt = &env->idt;
672 if (intno * 8 + 7 > dt->limit)
673 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
674 ptr = dt->base + intno * 8;
675 e1 = ldl_kernel(ptr);
676 e2 = ldl_kernel(ptr + 4);
677 /* check gate type */
678 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
679 switch(type) {
680 case 5: /* task gate */
681 /* must do that check here to return the correct error code */
682 if (!(e2 & DESC_P_MASK))
683 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
684 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
685 if (has_error_code) {
686 int type;
687 uint32_t mask;
688 /* push the error code */
689 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
690 shift = type >> 3;
691 if (env->segs[R_SS].flags & DESC_B_MASK)
692 mask = 0xffffffff;
693 else
694 mask = 0xffff;
695 esp = (ESP - (2 << shift)) & mask;
696 ssp = env->segs[R_SS].base + esp;
697 if (shift)
698 stl_kernel(ssp, error_code);
699 else
700 stw_kernel(ssp, error_code);
701 SET_ESP(esp, mask);
702 }
703 return;
704 case 6: /* 286 interrupt gate */
705 case 7: /* 286 trap gate */
706 case 14: /* 386 interrupt gate */
707 case 15: /* 386 trap gate */
708 break;
709 default:
710 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
711 break;
712 }
713 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
714 cpl = env->hflags & HF_CPL_MASK;
1235fc06 715 /* check privilege if software int */
eaa728ee
FB
716 if (is_int && dpl < cpl)
717 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
718 /* check valid bit */
719 if (!(e2 & DESC_P_MASK))
720 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
721 selector = e1 >> 16;
722 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
723 if ((selector & 0xfffc) == 0)
724 raise_exception_err(EXCP0D_GPF, 0);
725
726 if (load_segment(&e1, &e2, selector) != 0)
727 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
728 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
731 if (dpl > cpl)
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 if (!(e2 & DESC_P_MASK))
734 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
735 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
736 /* to inner privilege */
737 get_ss_esp_from_tss(&ss, &esp, dpl);
738 if ((ss & 0xfffc) == 0)
739 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
740 if ((ss & 3) != dpl)
741 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
745 if (ss_dpl != dpl)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 if (!(ss_e2 & DESC_S_MASK) ||
748 (ss_e2 & DESC_CS_MASK) ||
749 !(ss_e2 & DESC_W_MASK))
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (!(ss_e2 & DESC_P_MASK))
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753 new_stack = 1;
754 sp_mask = get_sp_mask(ss_e2);
755 ssp = get_seg_base(ss_e1, ss_e2);
756 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
757 /* to same privilege */
758 if (env->eflags & VM_MASK)
759 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
760 new_stack = 0;
761 sp_mask = get_sp_mask(env->segs[R_SS].flags);
762 ssp = env->segs[R_SS].base;
763 esp = ESP;
764 dpl = cpl;
765 } else {
766 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
767 new_stack = 0; /* avoid warning */
768 sp_mask = 0; /* avoid warning */
769 ssp = 0; /* avoid warning */
770 esp = 0; /* avoid warning */
771 }
772
773 shift = type >> 3;
774
775#if 0
776 /* XXX: check that enough room is available */
777 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
778 if (env->eflags & VM_MASK)
779 push_size += 8;
780 push_size <<= shift;
781#endif
782 if (shift == 1) {
783 if (new_stack) {
784 if (env->eflags & VM_MASK) {
785 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
786 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
787 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
788 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
789 }
790 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
791 PUSHL(ssp, esp, sp_mask, ESP);
792 }
793 PUSHL(ssp, esp, sp_mask, compute_eflags());
794 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
795 PUSHL(ssp, esp, sp_mask, old_eip);
796 if (has_error_code) {
797 PUSHL(ssp, esp, sp_mask, error_code);
798 }
799 } else {
800 if (new_stack) {
801 if (env->eflags & VM_MASK) {
802 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
803 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
804 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
805 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
806 }
807 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
808 PUSHW(ssp, esp, sp_mask, ESP);
809 }
810 PUSHW(ssp, esp, sp_mask, compute_eflags());
811 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
812 PUSHW(ssp, esp, sp_mask, old_eip);
813 if (has_error_code) {
814 PUSHW(ssp, esp, sp_mask, error_code);
815 }
816 }
817
818 if (new_stack) {
819 if (env->eflags & VM_MASK) {
820 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
821 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
822 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
824 }
825 ss = (ss & ~3) | dpl;
826 cpu_x86_load_seg_cache(env, R_SS, ss,
827 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
828 }
829 SET_ESP(esp, sp_mask);
830
831 selector = (selector & ~3) | dpl;
832 cpu_x86_load_seg_cache(env, R_CS, selector,
833 get_seg_base(e1, e2),
834 get_seg_limit(e1, e2),
835 e2);
836 cpu_x86_set_cpl(env, dpl);
837 env->eip = offset;
838
839 /* interrupt gate clear IF mask */
840 if ((type & 1) == 0) {
841 env->eflags &= ~IF_MASK;
842 }
843 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
844}
845
846#ifdef TARGET_X86_64
847
848#define PUSHQ(sp, val)\
849{\
850 sp -= 8;\
851 stq_kernel(sp, (val));\
852}
853
854#define POPQ(sp, val)\
855{\
856 val = ldq_kernel(sp);\
857 sp += 8;\
858}
859
860static inline target_ulong get_rsp_from_tss(int level)
861{
862 int index;
863
864#if 0
865 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
866 env->tr.base, env->tr.limit);
867#endif
868
869 if (!(env->tr.flags & DESC_P_MASK))
870 cpu_abort(env, "invalid tss");
871 index = 8 * level + 4;
872 if ((index + 7) > env->tr.limit)
873 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
874 return ldq_kernel(env->tr.base + index);
875}
876
877/* 64 bit interrupt */
878static void do_interrupt64(int intno, int is_int, int error_code,
879 target_ulong next_eip, int is_hw)
880{
881 SegmentCache *dt;
882 target_ulong ptr;
883 int type, dpl, selector, cpl, ist;
884 int has_error_code, new_stack;
885 uint32_t e1, e2, e3, ss;
886 target_ulong old_eip, esp, offset;
eaa728ee 887
eaa728ee
FB
888 has_error_code = 0;
889 if (!is_int && !is_hw) {
890 switch(intno) {
891 case 8:
892 case 10:
893 case 11:
894 case 12:
895 case 13:
896 case 14:
897 case 17:
898 has_error_code = 1;
899 break;
900 }
901 }
902 if (is_int)
903 old_eip = next_eip;
904 else
905 old_eip = env->eip;
906
907 dt = &env->idt;
908 if (intno * 16 + 15 > dt->limit)
909 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910 ptr = dt->base + intno * 16;
911 e1 = ldl_kernel(ptr);
912 e2 = ldl_kernel(ptr + 4);
913 e3 = ldl_kernel(ptr + 8);
914 /* check gate type */
915 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
916 switch(type) {
917 case 14: /* 386 interrupt gate */
918 case 15: /* 386 trap gate */
919 break;
920 default:
921 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
922 break;
923 }
924 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
925 cpl = env->hflags & HF_CPL_MASK;
1235fc06 926 /* check privilege if software int */
eaa728ee
FB
927 if (is_int && dpl < cpl)
928 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
929 /* check valid bit */
930 if (!(e2 & DESC_P_MASK))
931 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
932 selector = e1 >> 16;
933 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
934 ist = e2 & 7;
935 if ((selector & 0xfffc) == 0)
936 raise_exception_err(EXCP0D_GPF, 0);
937
938 if (load_segment(&e1, &e2, selector) != 0)
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
941 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
943 if (dpl > cpl)
944 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
945 if (!(e2 & DESC_P_MASK))
946 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
947 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
948 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
949 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
950 /* to inner privilege */
951 if (ist != 0)
952 esp = get_rsp_from_tss(ist + 3);
953 else
954 esp = get_rsp_from_tss(dpl);
955 esp &= ~0xfLL; /* align stack */
956 ss = 0;
957 new_stack = 1;
958 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
959 /* to same privilege */
960 if (env->eflags & VM_MASK)
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 new_stack = 0;
963 if (ist != 0)
964 esp = get_rsp_from_tss(ist + 3);
965 else
966 esp = ESP;
967 esp &= ~0xfLL; /* align stack */
968 dpl = cpl;
969 } else {
970 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
971 new_stack = 0; /* avoid warning */
972 esp = 0; /* avoid warning */
973 }
974
975 PUSHQ(esp, env->segs[R_SS].selector);
976 PUSHQ(esp, ESP);
977 PUSHQ(esp, compute_eflags());
978 PUSHQ(esp, env->segs[R_CS].selector);
979 PUSHQ(esp, old_eip);
980 if (has_error_code) {
981 PUSHQ(esp, error_code);
982 }
983
984 if (new_stack) {
985 ss = 0 | dpl;
986 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
987 }
988 ESP = esp;
989
990 selector = (selector & ~3) | dpl;
991 cpu_x86_load_seg_cache(env, R_CS, selector,
992 get_seg_base(e1, e2),
993 get_seg_limit(e1, e2),
994 e2);
995 cpu_x86_set_cpl(env, dpl);
996 env->eip = offset;
997
998 /* interrupt gate clear IF mask */
999 if ((type & 1) == 0) {
1000 env->eflags &= ~IF_MASK;
1001 }
1002 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1003}
1004#endif
1005
d9957a8b 1006#ifdef TARGET_X86_64
eaa728ee
FB
1007#if defined(CONFIG_USER_ONLY)
1008void helper_syscall(int next_eip_addend)
1009{
1010 env->exception_index = EXCP_SYSCALL;
1011 env->exception_next_eip = env->eip + next_eip_addend;
1012 cpu_loop_exit();
1013}
1014#else
1015void helper_syscall(int next_eip_addend)
1016{
1017 int selector;
1018
1019 if (!(env->efer & MSR_EFER_SCE)) {
1020 raise_exception_err(EXCP06_ILLOP, 0);
1021 }
1022 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1023 if (env->hflags & HF_LMA_MASK) {
1024 int code64;
1025
1026 ECX = env->eip + next_eip_addend;
1027 env->regs[11] = compute_eflags();
1028
1029 code64 = env->hflags & HF_CS64_MASK;
1030
1031 cpu_x86_set_cpl(env, 0);
1032 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1033 0, 0xffffffff,
1034 DESC_G_MASK | DESC_P_MASK |
1035 DESC_S_MASK |
1036 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1037 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1038 0, 0xffffffff,
1039 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1040 DESC_S_MASK |
1041 DESC_W_MASK | DESC_A_MASK);
1042 env->eflags &= ~env->fmask;
1043 load_eflags(env->eflags, 0);
1044 if (code64)
1045 env->eip = env->lstar;
1046 else
1047 env->eip = env->cstar;
d9957a8b 1048 } else {
eaa728ee
FB
1049 ECX = (uint32_t)(env->eip + next_eip_addend);
1050
1051 cpu_x86_set_cpl(env, 0);
1052 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1053 0, 0xffffffff,
1054 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1055 DESC_S_MASK |
1056 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1057 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1058 0, 0xffffffff,
1059 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060 DESC_S_MASK |
1061 DESC_W_MASK | DESC_A_MASK);
1062 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1063 env->eip = (uint32_t)env->star;
1064 }
1065}
1066#endif
d9957a8b 1067#endif
eaa728ee 1068
d9957a8b 1069#ifdef TARGET_X86_64
eaa728ee
FB
1070void helper_sysret(int dflag)
1071{
1072 int cpl, selector;
1073
1074 if (!(env->efer & MSR_EFER_SCE)) {
1075 raise_exception_err(EXCP06_ILLOP, 0);
1076 }
1077 cpl = env->hflags & HF_CPL_MASK;
1078 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1079 raise_exception_err(EXCP0D_GPF, 0);
1080 }
1081 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1082 if (env->hflags & HF_LMA_MASK) {
1083 if (dflag == 2) {
1084 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1085 0, 0xffffffff,
1086 DESC_G_MASK | DESC_P_MASK |
1087 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1089 DESC_L_MASK);
1090 env->eip = ECX;
1091 } else {
1092 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093 0, 0xffffffff,
1094 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097 env->eip = (uint32_t)ECX;
1098 }
1099 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1100 0, 0xffffffff,
1101 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1102 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1103 DESC_W_MASK | DESC_A_MASK);
1104 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1105 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1106 cpu_x86_set_cpl(env, 3);
d9957a8b 1107 } else {
eaa728ee
FB
1108 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1109 0, 0xffffffff,
1110 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1111 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1112 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1113 env->eip = (uint32_t)ECX;
1114 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1115 0, 0xffffffff,
1116 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1117 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1118 DESC_W_MASK | DESC_A_MASK);
1119 env->eflags |= IF_MASK;
1120 cpu_x86_set_cpl(env, 3);
1121 }
1122#ifdef USE_KQEMU
1123 if (kqemu_is_ok(env)) {
1124 if (env->hflags & HF_LMA_MASK)
1125 CC_OP = CC_OP_EFLAGS;
1126 env->exception_index = -1;
1127 cpu_loop_exit();
1128 }
1129#endif
1130}
d9957a8b 1131#endif
eaa728ee
FB
1132
1133/* real mode interrupt */
1134static void do_interrupt_real(int intno, int is_int, int error_code,
1135 unsigned int next_eip)
1136{
1137 SegmentCache *dt;
1138 target_ulong ptr, ssp;
1139 int selector;
1140 uint32_t offset, esp;
1141 uint32_t old_cs, old_eip;
eaa728ee 1142
eaa728ee
FB
1143 /* real mode (simpler !) */
1144 dt = &env->idt;
1145 if (intno * 4 + 3 > dt->limit)
1146 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1147 ptr = dt->base + intno * 4;
1148 offset = lduw_kernel(ptr);
1149 selector = lduw_kernel(ptr + 2);
1150 esp = ESP;
1151 ssp = env->segs[R_SS].base;
1152 if (is_int)
1153 old_eip = next_eip;
1154 else
1155 old_eip = env->eip;
1156 old_cs = env->segs[R_CS].selector;
1157 /* XXX: use SS segment size ? */
1158 PUSHW(ssp, esp, 0xffff, compute_eflags());
1159 PUSHW(ssp, esp, 0xffff, old_cs);
1160 PUSHW(ssp, esp, 0xffff, old_eip);
1161
1162 /* update processor state */
1163 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1164 env->eip = offset;
1165 env->segs[R_CS].selector = selector;
1166 env->segs[R_CS].base = (selector << 4);
1167 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1168}
1169
1170/* fake user mode interrupt */
1171void do_interrupt_user(int intno, int is_int, int error_code,
1172 target_ulong next_eip)
1173{
1174 SegmentCache *dt;
1175 target_ulong ptr;
1176 int dpl, cpl, shift;
1177 uint32_t e2;
1178
1179 dt = &env->idt;
1180 if (env->hflags & HF_LMA_MASK) {
1181 shift = 4;
1182 } else {
1183 shift = 3;
1184 }
1185 ptr = dt->base + (intno << shift);
1186 e2 = ldl_kernel(ptr + 4);
1187
1188 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1189 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1190 /* check privilege if software int */
eaa728ee
FB
1191 if (is_int && dpl < cpl)
1192 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1193
1194 /* Since we emulate only user space, we cannot do more than
1195 exiting the emulation with the suitable exception and error
1196 code */
1197 if (is_int)
1198 EIP = next_eip;
1199}
1200
1201/*
1202 * Begin execution of an interruption. is_int is TRUE if coming from
1203 * the int instruction. next_eip is the EIP value AFTER the interrupt
1204 * instruction. It is only relevant if is_int is TRUE.
1205 */
1206void do_interrupt(int intno, int is_int, int error_code,
1207 target_ulong next_eip, int is_hw)
1208{
8fec2b8c 1209 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1210 if ((env->cr[0] & CR0_PE_MASK)) {
1211 static int count;
93fcfe39 1212 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
eaa728ee
FB
1213 count, intno, error_code, is_int,
1214 env->hflags & HF_CPL_MASK,
1215 env->segs[R_CS].selector, EIP,
1216 (int)env->segs[R_CS].base + EIP,
1217 env->segs[R_SS].selector, ESP);
1218 if (intno == 0x0e) {
93fcfe39 1219 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1220 } else {
93fcfe39 1221 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1222 }
93fcfe39
AL
1223 qemu_log("\n");
1224 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1225#if 0
1226 {
1227 int i;
1228 uint8_t *ptr;
93fcfe39 1229 qemu_log(" code=");
eaa728ee
FB
1230 ptr = env->segs[R_CS].base + env->eip;
1231 for(i = 0; i < 16; i++) {
93fcfe39 1232 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1233 }
93fcfe39 1234 qemu_log("\n");
eaa728ee
FB
1235 }
1236#endif
1237 count++;
1238 }
1239 }
1240 if (env->cr[0] & CR0_PE_MASK) {
eb38c52c 1241#ifdef TARGET_X86_64
eaa728ee
FB
1242 if (env->hflags & HF_LMA_MASK) {
1243 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1244 } else
1245#endif
1246 {
1247 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1248 }
1249 } else {
1250 do_interrupt_real(intno, is_int, error_code, next_eip);
1251 }
1252}
1253
f55761a0
AL
1254/* This should come from sysemu.h - if we could include it here... */
1255void qemu_system_reset_request(void);
1256
eaa728ee
FB
1257/*
1258 * Check nested exceptions and change to double or triple fault if
1259 * needed. It should only be called, if this is not an interrupt.
1260 * Returns the new exception number.
1261 */
1262static int check_exception(int intno, int *error_code)
1263{
1264 int first_contributory = env->old_exception == 0 ||
1265 (env->old_exception >= 10 &&
1266 env->old_exception <= 13);
1267 int second_contributory = intno == 0 ||
1268 (intno >= 10 && intno <= 13);
1269
93fcfe39 1270 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
eaa728ee
FB
1271 env->old_exception, intno);
1272
f55761a0
AL
1273#if !defined(CONFIG_USER_ONLY)
1274 if (env->old_exception == EXCP08_DBLE) {
1275 if (env->hflags & HF_SVMI_MASK)
1276 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1277
680c3069 1278 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
f55761a0
AL
1279
1280 qemu_system_reset_request();
1281 return EXCP_HLT;
1282 }
1283#endif
eaa728ee
FB
1284
1285 if ((first_contributory && second_contributory)
1286 || (env->old_exception == EXCP0E_PAGE &&
1287 (second_contributory || (intno == EXCP0E_PAGE)))) {
1288 intno = EXCP08_DBLE;
1289 *error_code = 0;
1290 }
1291
1292 if (second_contributory || (intno == EXCP0E_PAGE) ||
1293 (intno == EXCP08_DBLE))
1294 env->old_exception = intno;
1295
1296 return intno;
1297}
1298
1299/*
1300 * Signal an interruption. It is executed in the main CPU loop.
1301 * is_int is TRUE if coming from the int instruction. next_eip is the
1302 * EIP value AFTER the interrupt instruction. It is only relevant if
1303 * is_int is TRUE.
1304 */
a5e50b26 1305static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1306 int next_eip_addend)
eaa728ee
FB
1307{
1308 if (!is_int) {
1309 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1310 intno = check_exception(intno, &error_code);
872929aa
FB
1311 } else {
1312 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1313 }
1314
1315 env->exception_index = intno;
1316 env->error_code = error_code;
1317 env->exception_is_int = is_int;
1318 env->exception_next_eip = env->eip + next_eip_addend;
1319 cpu_loop_exit();
1320}
1321
eaa728ee
FB
1322/* shortcuts to generate exceptions */
1323
d9957a8b 1324void raise_exception_err(int exception_index, int error_code)
eaa728ee
FB
1325{
1326 raise_interrupt(exception_index, 0, error_code, 0);
1327}
1328
1329void raise_exception(int exception_index)
1330{
1331 raise_interrupt(exception_index, 0, 0, 0);
1332}
1333
1334/* SMM support */
1335
1336#if defined(CONFIG_USER_ONLY)
1337
1338void do_smm_enter(void)
1339{
1340}
1341
1342void helper_rsm(void)
1343{
1344}
1345
1346#else
1347
1348#ifdef TARGET_X86_64
1349#define SMM_REVISION_ID 0x00020064
1350#else
1351#define SMM_REVISION_ID 0x00020000
1352#endif
1353
1354void do_smm_enter(void)
1355{
1356 target_ulong sm_state;
1357 SegmentCache *dt;
1358 int i, offset;
1359
93fcfe39
AL
1360 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1361 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1362
1363 env->hflags |= HF_SMM_MASK;
1364 cpu_smm_update(env);
1365
1366 sm_state = env->smbase + 0x8000;
1367
1368#ifdef TARGET_X86_64
1369 for(i = 0; i < 6; i++) {
1370 dt = &env->segs[i];
1371 offset = 0x7e00 + i * 16;
1372 stw_phys(sm_state + offset, dt->selector);
1373 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1374 stl_phys(sm_state + offset + 4, dt->limit);
1375 stq_phys(sm_state + offset + 8, dt->base);
1376 }
1377
1378 stq_phys(sm_state + 0x7e68, env->gdt.base);
1379 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1380
1381 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1382 stq_phys(sm_state + 0x7e78, env->ldt.base);
1383 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1384 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1385
1386 stq_phys(sm_state + 0x7e88, env->idt.base);
1387 stl_phys(sm_state + 0x7e84, env->idt.limit);
1388
1389 stw_phys(sm_state + 0x7e90, env->tr.selector);
1390 stq_phys(sm_state + 0x7e98, env->tr.base);
1391 stl_phys(sm_state + 0x7e94, env->tr.limit);
1392 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1393
1394 stq_phys(sm_state + 0x7ed0, env->efer);
1395
1396 stq_phys(sm_state + 0x7ff8, EAX);
1397 stq_phys(sm_state + 0x7ff0, ECX);
1398 stq_phys(sm_state + 0x7fe8, EDX);
1399 stq_phys(sm_state + 0x7fe0, EBX);
1400 stq_phys(sm_state + 0x7fd8, ESP);
1401 stq_phys(sm_state + 0x7fd0, EBP);
1402 stq_phys(sm_state + 0x7fc8, ESI);
1403 stq_phys(sm_state + 0x7fc0, EDI);
1404 for(i = 8; i < 16; i++)
1405 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1406 stq_phys(sm_state + 0x7f78, env->eip);
1407 stl_phys(sm_state + 0x7f70, compute_eflags());
1408 stl_phys(sm_state + 0x7f68, env->dr[6]);
1409 stl_phys(sm_state + 0x7f60, env->dr[7]);
1410
1411 stl_phys(sm_state + 0x7f48, env->cr[4]);
1412 stl_phys(sm_state + 0x7f50, env->cr[3]);
1413 stl_phys(sm_state + 0x7f58, env->cr[0]);
1414
1415 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1416 stl_phys(sm_state + 0x7f00, env->smbase);
1417#else
1418 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1419 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1420 stl_phys(sm_state + 0x7ff4, compute_eflags());
1421 stl_phys(sm_state + 0x7ff0, env->eip);
1422 stl_phys(sm_state + 0x7fec, EDI);
1423 stl_phys(sm_state + 0x7fe8, ESI);
1424 stl_phys(sm_state + 0x7fe4, EBP);
1425 stl_phys(sm_state + 0x7fe0, ESP);
1426 stl_phys(sm_state + 0x7fdc, EBX);
1427 stl_phys(sm_state + 0x7fd8, EDX);
1428 stl_phys(sm_state + 0x7fd4, ECX);
1429 stl_phys(sm_state + 0x7fd0, EAX);
1430 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1431 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1432
1433 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1434 stl_phys(sm_state + 0x7f64, env->tr.base);
1435 stl_phys(sm_state + 0x7f60, env->tr.limit);
1436 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1437
1438 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1439 stl_phys(sm_state + 0x7f80, env->ldt.base);
1440 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1441 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1442
1443 stl_phys(sm_state + 0x7f74, env->gdt.base);
1444 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1445
1446 stl_phys(sm_state + 0x7f58, env->idt.base);
1447 stl_phys(sm_state + 0x7f54, env->idt.limit);
1448
1449 for(i = 0; i < 6; i++) {
1450 dt = &env->segs[i];
1451 if (i < 3)
1452 offset = 0x7f84 + i * 12;
1453 else
1454 offset = 0x7f2c + (i - 3) * 12;
1455 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1456 stl_phys(sm_state + offset + 8, dt->base);
1457 stl_phys(sm_state + offset + 4, dt->limit);
1458 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1459 }
1460 stl_phys(sm_state + 0x7f14, env->cr[4]);
1461
1462 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1463 stl_phys(sm_state + 0x7ef8, env->smbase);
1464#endif
1465 /* init SMM cpu state */
1466
1467#ifdef TARGET_X86_64
5efc27bb 1468 cpu_load_efer(env, 0);
eaa728ee
FB
1469#endif
1470 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1471 env->eip = 0x00008000;
1472 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1473 0xffffffff, 0);
1474 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1475 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1476 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1477 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1478 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1479
1480 cpu_x86_update_cr0(env,
1481 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1482 cpu_x86_update_cr4(env, 0);
1483 env->dr[7] = 0x00000400;
1484 CC_OP = CC_OP_EFLAGS;
1485}
1486
1487void helper_rsm(void)
1488{
1489 target_ulong sm_state;
1490 int i, offset;
1491 uint32_t val;
1492
1493 sm_state = env->smbase + 0x8000;
1494#ifdef TARGET_X86_64
5efc27bb 1495 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1496
1497 for(i = 0; i < 6; i++) {
1498 offset = 0x7e00 + i * 16;
1499 cpu_x86_load_seg_cache(env, i,
1500 lduw_phys(sm_state + offset),
1501 ldq_phys(sm_state + offset + 8),
1502 ldl_phys(sm_state + offset + 4),
1503 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1504 }
1505
1506 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1507 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1508
1509 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1510 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1511 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1512 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1513
1514 env->idt.base = ldq_phys(sm_state + 0x7e88);
1515 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1516
1517 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1518 env->tr.base = ldq_phys(sm_state + 0x7e98);
1519 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1520 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1521
1522 EAX = ldq_phys(sm_state + 0x7ff8);
1523 ECX = ldq_phys(sm_state + 0x7ff0);
1524 EDX = ldq_phys(sm_state + 0x7fe8);
1525 EBX = ldq_phys(sm_state + 0x7fe0);
1526 ESP = ldq_phys(sm_state + 0x7fd8);
1527 EBP = ldq_phys(sm_state + 0x7fd0);
1528 ESI = ldq_phys(sm_state + 0x7fc8);
1529 EDI = ldq_phys(sm_state + 0x7fc0);
1530 for(i = 8; i < 16; i++)
1531 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1532 env->eip = ldq_phys(sm_state + 0x7f78);
1533 load_eflags(ldl_phys(sm_state + 0x7f70),
1534 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1535 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1536 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1537
1538 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1539 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1540 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1541
1542 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1543 if (val & 0x20000) {
1544 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1545 }
1546#else
1547 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1548 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1549 load_eflags(ldl_phys(sm_state + 0x7ff4),
1550 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1551 env->eip = ldl_phys(sm_state + 0x7ff0);
1552 EDI = ldl_phys(sm_state + 0x7fec);
1553 ESI = ldl_phys(sm_state + 0x7fe8);
1554 EBP = ldl_phys(sm_state + 0x7fe4);
1555 ESP = ldl_phys(sm_state + 0x7fe0);
1556 EBX = ldl_phys(sm_state + 0x7fdc);
1557 EDX = ldl_phys(sm_state + 0x7fd8);
1558 ECX = ldl_phys(sm_state + 0x7fd4);
1559 EAX = ldl_phys(sm_state + 0x7fd0);
1560 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1561 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1562
1563 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1564 env->tr.base = ldl_phys(sm_state + 0x7f64);
1565 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1566 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1567
1568 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1569 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1570 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1571 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1572
1573 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1574 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1575
1576 env->idt.base = ldl_phys(sm_state + 0x7f58);
1577 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1578
1579 for(i = 0; i < 6; i++) {
1580 if (i < 3)
1581 offset = 0x7f84 + i * 12;
1582 else
1583 offset = 0x7f2c + (i - 3) * 12;
1584 cpu_x86_load_seg_cache(env, i,
1585 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1586 ldl_phys(sm_state + offset + 8),
1587 ldl_phys(sm_state + offset + 4),
1588 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1589 }
1590 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1591
1592 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1593 if (val & 0x20000) {
1594 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1595 }
1596#endif
1597 CC_OP = CC_OP_EFLAGS;
1598 env->hflags &= ~HF_SMM_MASK;
1599 cpu_smm_update(env);
1600
93fcfe39
AL
1601 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1602 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1603}
1604
1605#endif /* !CONFIG_USER_ONLY */
1606
1607
1608/* division, flags are undefined */
1609
1610void helper_divb_AL(target_ulong t0)
1611{
1612 unsigned int num, den, q, r;
1613
1614 num = (EAX & 0xffff);
1615 den = (t0 & 0xff);
1616 if (den == 0) {
1617 raise_exception(EXCP00_DIVZ);
1618 }
1619 q = (num / den);
1620 if (q > 0xff)
1621 raise_exception(EXCP00_DIVZ);
1622 q &= 0xff;
1623 r = (num % den) & 0xff;
1624 EAX = (EAX & ~0xffff) | (r << 8) | q;
1625}
1626
1627void helper_idivb_AL(target_ulong t0)
1628{
1629 int num, den, q, r;
1630
1631 num = (int16_t)EAX;
1632 den = (int8_t)t0;
1633 if (den == 0) {
1634 raise_exception(EXCP00_DIVZ);
1635 }
1636 q = (num / den);
1637 if (q != (int8_t)q)
1638 raise_exception(EXCP00_DIVZ);
1639 q &= 0xff;
1640 r = (num % den) & 0xff;
1641 EAX = (EAX & ~0xffff) | (r << 8) | q;
1642}
1643
1644void helper_divw_AX(target_ulong t0)
1645{
1646 unsigned int num, den, q, r;
1647
1648 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1649 den = (t0 & 0xffff);
1650 if (den == 0) {
1651 raise_exception(EXCP00_DIVZ);
1652 }
1653 q = (num / den);
1654 if (q > 0xffff)
1655 raise_exception(EXCP00_DIVZ);
1656 q &= 0xffff;
1657 r = (num % den) & 0xffff;
1658 EAX = (EAX & ~0xffff) | q;
1659 EDX = (EDX & ~0xffff) | r;
1660}
1661
1662void helper_idivw_AX(target_ulong t0)
1663{
1664 int num, den, q, r;
1665
1666 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1667 den = (int16_t)t0;
1668 if (den == 0) {
1669 raise_exception(EXCP00_DIVZ);
1670 }
1671 q = (num / den);
1672 if (q != (int16_t)q)
1673 raise_exception(EXCP00_DIVZ);
1674 q &= 0xffff;
1675 r = (num % den) & 0xffff;
1676 EAX = (EAX & ~0xffff) | q;
1677 EDX = (EDX & ~0xffff) | r;
1678}
1679
1680void helper_divl_EAX(target_ulong t0)
1681{
1682 unsigned int den, r;
1683 uint64_t num, q;
1684
1685 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1686 den = t0;
1687 if (den == 0) {
1688 raise_exception(EXCP00_DIVZ);
1689 }
1690 q = (num / den);
1691 r = (num % den);
1692 if (q > 0xffffffff)
1693 raise_exception(EXCP00_DIVZ);
1694 EAX = (uint32_t)q;
1695 EDX = (uint32_t)r;
1696}
1697
1698void helper_idivl_EAX(target_ulong t0)
1699{
1700 int den, r;
1701 int64_t num, q;
1702
1703 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1704 den = t0;
1705 if (den == 0) {
1706 raise_exception(EXCP00_DIVZ);
1707 }
1708 q = (num / den);
1709 r = (num % den);
1710 if (q != (int32_t)q)
1711 raise_exception(EXCP00_DIVZ);
1712 EAX = (uint32_t)q;
1713 EDX = (uint32_t)r;
1714}
1715
1716/* bcd */
1717
1718/* XXX: exception */
1719void helper_aam(int base)
1720{
1721 int al, ah;
1722 al = EAX & 0xff;
1723 ah = al / base;
1724 al = al % base;
1725 EAX = (EAX & ~0xffff) | al | (ah << 8);
1726 CC_DST = al;
1727}
1728
1729void helper_aad(int base)
1730{
1731 int al, ah;
1732 al = EAX & 0xff;
1733 ah = (EAX >> 8) & 0xff;
1734 al = ((ah * base) + al) & 0xff;
1735 EAX = (EAX & ~0xffff) | al;
1736 CC_DST = al;
1737}
1738
1739void helper_aaa(void)
1740{
1741 int icarry;
1742 int al, ah, af;
1743 int eflags;
1744
a7812ae4 1745 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1746 af = eflags & CC_A;
1747 al = EAX & 0xff;
1748 ah = (EAX >> 8) & 0xff;
1749
1750 icarry = (al > 0xf9);
1751 if (((al & 0x0f) > 9 ) || af) {
1752 al = (al + 6) & 0x0f;
1753 ah = (ah + 1 + icarry) & 0xff;
1754 eflags |= CC_C | CC_A;
1755 } else {
1756 eflags &= ~(CC_C | CC_A);
1757 al &= 0x0f;
1758 }
1759 EAX = (EAX & ~0xffff) | al | (ah << 8);
1760 CC_SRC = eflags;
eaa728ee
FB
1761}
1762
1763void helper_aas(void)
1764{
1765 int icarry;
1766 int al, ah, af;
1767 int eflags;
1768
a7812ae4 1769 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1770 af = eflags & CC_A;
1771 al = EAX & 0xff;
1772 ah = (EAX >> 8) & 0xff;
1773
1774 icarry = (al < 6);
1775 if (((al & 0x0f) > 9 ) || af) {
1776 al = (al - 6) & 0x0f;
1777 ah = (ah - 1 - icarry) & 0xff;
1778 eflags |= CC_C | CC_A;
1779 } else {
1780 eflags &= ~(CC_C | CC_A);
1781 al &= 0x0f;
1782 }
1783 EAX = (EAX & ~0xffff) | al | (ah << 8);
1784 CC_SRC = eflags;
eaa728ee
FB
1785}
1786
1787void helper_daa(void)
1788{
1789 int al, af, cf;
1790 int eflags;
1791
a7812ae4 1792 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1793 cf = eflags & CC_C;
1794 af = eflags & CC_A;
1795 al = EAX & 0xff;
1796
1797 eflags = 0;
1798 if (((al & 0x0f) > 9 ) || af) {
1799 al = (al + 6) & 0xff;
1800 eflags |= CC_A;
1801 }
1802 if ((al > 0x9f) || cf) {
1803 al = (al + 0x60) & 0xff;
1804 eflags |= CC_C;
1805 }
1806 EAX = (EAX & ~0xff) | al;
1807 /* well, speed is not an issue here, so we compute the flags by hand */
1808 eflags |= (al == 0) << 6; /* zf */
1809 eflags |= parity_table[al]; /* pf */
1810 eflags |= (al & 0x80); /* sf */
1811 CC_SRC = eflags;
eaa728ee
FB
1812}
1813
1814void helper_das(void)
1815{
1816 int al, al1, af, cf;
1817 int eflags;
1818
a7812ae4 1819 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1820 cf = eflags & CC_C;
1821 af = eflags & CC_A;
1822 al = EAX & 0xff;
1823
1824 eflags = 0;
1825 al1 = al;
1826 if (((al & 0x0f) > 9 ) || af) {
1827 eflags |= CC_A;
1828 if (al < 6 || cf)
1829 eflags |= CC_C;
1830 al = (al - 6) & 0xff;
1831 }
1832 if ((al1 > 0x99) || cf) {
1833 al = (al - 0x60) & 0xff;
1834 eflags |= CC_C;
1835 }
1836 EAX = (EAX & ~0xff) | al;
1837 /* well, speed is not an issue here, so we compute the flags by hand */
1838 eflags |= (al == 0) << 6; /* zf */
1839 eflags |= parity_table[al]; /* pf */
1840 eflags |= (al & 0x80); /* sf */
1841 CC_SRC = eflags;
eaa728ee
FB
1842}
1843
1844void helper_into(int next_eip_addend)
1845{
1846 int eflags;
a7812ae4 1847 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1848 if (eflags & CC_O) {
1849 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1850 }
1851}
1852
1853void helper_cmpxchg8b(target_ulong a0)
1854{
1855 uint64_t d;
1856 int eflags;
1857
a7812ae4 1858 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1859 d = ldq(a0);
1860 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1861 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1862 eflags |= CC_Z;
1863 } else {
278ed7c3
FB
1864 /* always do the store */
1865 stq(a0, d);
eaa728ee
FB
1866 EDX = (uint32_t)(d >> 32);
1867 EAX = (uint32_t)d;
1868 eflags &= ~CC_Z;
1869 }
1870 CC_SRC = eflags;
1871}
1872
1873#ifdef TARGET_X86_64
1874void helper_cmpxchg16b(target_ulong a0)
1875{
1876 uint64_t d0, d1;
1877 int eflags;
1878
278ed7c3
FB
1879 if ((a0 & 0xf) != 0)
1880 raise_exception(EXCP0D_GPF);
a7812ae4 1881 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1882 d0 = ldq(a0);
1883 d1 = ldq(a0 + 8);
1884 if (d0 == EAX && d1 == EDX) {
1885 stq(a0, EBX);
1886 stq(a0 + 8, ECX);
1887 eflags |= CC_Z;
1888 } else {
278ed7c3
FB
1889 /* always do the store */
1890 stq(a0, d0);
1891 stq(a0 + 8, d1);
eaa728ee
FB
1892 EDX = d1;
1893 EAX = d0;
1894 eflags &= ~CC_Z;
1895 }
1896 CC_SRC = eflags;
1897}
1898#endif
1899
1900void helper_single_step(void)
1901{
01df040b
AL
1902#ifndef CONFIG_USER_ONLY
1903 check_hw_breakpoints(env, 1);
1904 env->dr[6] |= DR6_BS;
1905#endif
1906 raise_exception(EXCP01_DB);
eaa728ee
FB
1907}
1908
1909void helper_cpuid(void)
1910{
6fd805e1 1911 uint32_t eax, ebx, ecx, edx;
eaa728ee 1912
872929aa 1913 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 1914
e00b6f80 1915 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
1916 EAX = eax;
1917 EBX = ebx;
1918 ECX = ecx;
1919 EDX = edx;
eaa728ee
FB
1920}
1921
1922void helper_enter_level(int level, int data32, target_ulong t1)
1923{
1924 target_ulong ssp;
1925 uint32_t esp_mask, esp, ebp;
1926
1927 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1928 ssp = env->segs[R_SS].base;
1929 ebp = EBP;
1930 esp = ESP;
1931 if (data32) {
1932 /* 32 bit */
1933 esp -= 4;
1934 while (--level) {
1935 esp -= 4;
1936 ebp -= 4;
1937 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1938 }
1939 esp -= 4;
1940 stl(ssp + (esp & esp_mask), t1);
1941 } else {
1942 /* 16 bit */
1943 esp -= 2;
1944 while (--level) {
1945 esp -= 2;
1946 ebp -= 2;
1947 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1948 }
1949 esp -= 2;
1950 stw(ssp + (esp & esp_mask), t1);
1951 }
1952}
1953
1954#ifdef TARGET_X86_64
1955void helper_enter64_level(int level, int data64, target_ulong t1)
1956{
1957 target_ulong esp, ebp;
1958 ebp = EBP;
1959 esp = ESP;
1960
1961 if (data64) {
1962 /* 64 bit */
1963 esp -= 8;
1964 while (--level) {
1965 esp -= 8;
1966 ebp -= 8;
1967 stq(esp, ldq(ebp));
1968 }
1969 esp -= 8;
1970 stq(esp, t1);
1971 } else {
1972 /* 16 bit */
1973 esp -= 2;
1974 while (--level) {
1975 esp -= 2;
1976 ebp -= 2;
1977 stw(esp, lduw(ebp));
1978 }
1979 esp -= 2;
1980 stw(esp, t1);
1981 }
1982}
1983#endif
1984
1985void helper_lldt(int selector)
1986{
1987 SegmentCache *dt;
1988 uint32_t e1, e2;
1989 int index, entry_limit;
1990 target_ulong ptr;
1991
1992 selector &= 0xffff;
1993 if ((selector & 0xfffc) == 0) {
1994 /* XXX: NULL selector case: invalid LDT */
1995 env->ldt.base = 0;
1996 env->ldt.limit = 0;
1997 } else {
1998 if (selector & 0x4)
1999 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2000 dt = &env->gdt;
2001 index = selector & ~7;
2002#ifdef TARGET_X86_64
2003 if (env->hflags & HF_LMA_MASK)
2004 entry_limit = 15;
2005 else
2006#endif
2007 entry_limit = 7;
2008 if ((index + entry_limit) > dt->limit)
2009 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2010 ptr = dt->base + index;
2011 e1 = ldl_kernel(ptr);
2012 e2 = ldl_kernel(ptr + 4);
2013 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2014 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2015 if (!(e2 & DESC_P_MASK))
2016 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2017#ifdef TARGET_X86_64
2018 if (env->hflags & HF_LMA_MASK) {
2019 uint32_t e3;
2020 e3 = ldl_kernel(ptr + 8);
2021 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2022 env->ldt.base |= (target_ulong)e3 << 32;
2023 } else
2024#endif
2025 {
2026 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2027 }
2028 }
2029 env->ldt.selector = selector;
2030}
2031
2032void helper_ltr(int selector)
2033{
2034 SegmentCache *dt;
2035 uint32_t e1, e2;
2036 int index, type, entry_limit;
2037 target_ulong ptr;
2038
2039 selector &= 0xffff;
2040 if ((selector & 0xfffc) == 0) {
2041 /* NULL selector case: invalid TR */
2042 env->tr.base = 0;
2043 env->tr.limit = 0;
2044 env->tr.flags = 0;
2045 } else {
2046 if (selector & 0x4)
2047 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2048 dt = &env->gdt;
2049 index = selector & ~7;
2050#ifdef TARGET_X86_64
2051 if (env->hflags & HF_LMA_MASK)
2052 entry_limit = 15;
2053 else
2054#endif
2055 entry_limit = 7;
2056 if ((index + entry_limit) > dt->limit)
2057 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2058 ptr = dt->base + index;
2059 e1 = ldl_kernel(ptr);
2060 e2 = ldl_kernel(ptr + 4);
2061 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2062 if ((e2 & DESC_S_MASK) ||
2063 (type != 1 && type != 9))
2064 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2065 if (!(e2 & DESC_P_MASK))
2066 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2067#ifdef TARGET_X86_64
2068 if (env->hflags & HF_LMA_MASK) {
2069 uint32_t e3, e4;
2070 e3 = ldl_kernel(ptr + 8);
2071 e4 = ldl_kernel(ptr + 12);
2072 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2073 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2074 load_seg_cache_raw_dt(&env->tr, e1, e2);
2075 env->tr.base |= (target_ulong)e3 << 32;
2076 } else
2077#endif
2078 {
2079 load_seg_cache_raw_dt(&env->tr, e1, e2);
2080 }
2081 e2 |= DESC_TSS_BUSY_MASK;
2082 stl_kernel(ptr + 4, e2);
2083 }
2084 env->tr.selector = selector;
2085}
2086
2087/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2088void helper_load_seg(int seg_reg, int selector)
2089{
2090 uint32_t e1, e2;
2091 int cpl, dpl, rpl;
2092 SegmentCache *dt;
2093 int index;
2094 target_ulong ptr;
2095
2096 selector &= 0xffff;
2097 cpl = env->hflags & HF_CPL_MASK;
2098 if ((selector & 0xfffc) == 0) {
2099 /* null selector case */
2100 if (seg_reg == R_SS
2101#ifdef TARGET_X86_64
2102 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2103#endif
2104 )
2105 raise_exception_err(EXCP0D_GPF, 0);
2106 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2107 } else {
2108
2109 if (selector & 0x4)
2110 dt = &env->ldt;
2111 else
2112 dt = &env->gdt;
2113 index = selector & ~7;
2114 if ((index + 7) > dt->limit)
2115 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2116 ptr = dt->base + index;
2117 e1 = ldl_kernel(ptr);
2118 e2 = ldl_kernel(ptr + 4);
2119
2120 if (!(e2 & DESC_S_MASK))
2121 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2122 rpl = selector & 3;
2123 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2124 if (seg_reg == R_SS) {
2125 /* must be writable segment */
2126 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2127 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2128 if (rpl != cpl || dpl != cpl)
2129 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130 } else {
2131 /* must be readable segment */
2132 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2133 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2134
2135 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2136 /* if not conforming code, test rights */
2137 if (dpl < cpl || dpl < rpl)
2138 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2139 }
2140 }
2141
2142 if (!(e2 & DESC_P_MASK)) {
2143 if (seg_reg == R_SS)
2144 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2145 else
2146 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2147 }
2148
2149 /* set the access bit if not already set */
2150 if (!(e2 & DESC_A_MASK)) {
2151 e2 |= DESC_A_MASK;
2152 stl_kernel(ptr + 4, e2);
2153 }
2154
2155 cpu_x86_load_seg_cache(env, seg_reg, selector,
2156 get_seg_base(e1, e2),
2157 get_seg_limit(e1, e2),
2158 e2);
2159#if 0
93fcfe39 2160 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2161 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2162#endif
2163 }
2164}
2165
2166/* protected mode jump */
2167void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2168 int next_eip_addend)
2169{
2170 int gate_cs, type;
2171 uint32_t e1, e2, cpl, dpl, rpl, limit;
2172 target_ulong next_eip;
2173
2174 if ((new_cs & 0xfffc) == 0)
2175 raise_exception_err(EXCP0D_GPF, 0);
2176 if (load_segment(&e1, &e2, new_cs) != 0)
2177 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2178 cpl = env->hflags & HF_CPL_MASK;
2179 if (e2 & DESC_S_MASK) {
2180 if (!(e2 & DESC_CS_MASK))
2181 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2182 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2183 if (e2 & DESC_C_MASK) {
2184 /* conforming code segment */
2185 if (dpl > cpl)
2186 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2187 } else {
2188 /* non conforming code segment */
2189 rpl = new_cs & 3;
2190 if (rpl > cpl)
2191 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2192 if (dpl != cpl)
2193 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2194 }
2195 if (!(e2 & DESC_P_MASK))
2196 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2197 limit = get_seg_limit(e1, e2);
2198 if (new_eip > limit &&
2199 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2200 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2201 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2202 get_seg_base(e1, e2), limit, e2);
2203 EIP = new_eip;
2204 } else {
2205 /* jump to call or task gate */
2206 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2207 rpl = new_cs & 3;
2208 cpl = env->hflags & HF_CPL_MASK;
2209 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2210 switch(type) {
2211 case 1: /* 286 TSS */
2212 case 9: /* 386 TSS */
2213 case 5: /* task gate */
2214 if (dpl < cpl || dpl < rpl)
2215 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216 next_eip = env->eip + next_eip_addend;
2217 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2218 CC_OP = CC_OP_EFLAGS;
2219 break;
2220 case 4: /* 286 call gate */
2221 case 12: /* 386 call gate */
2222 if ((dpl < cpl) || (dpl < rpl))
2223 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2224 if (!(e2 & DESC_P_MASK))
2225 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2226 gate_cs = e1 >> 16;
2227 new_eip = (e1 & 0xffff);
2228 if (type == 12)
2229 new_eip |= (e2 & 0xffff0000);
2230 if (load_segment(&e1, &e2, gate_cs) != 0)
2231 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2232 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2233 /* must be code segment */
2234 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2235 (DESC_S_MASK | DESC_CS_MASK)))
2236 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2237 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2238 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2239 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2240 if (!(e2 & DESC_P_MASK))
2241 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2242 limit = get_seg_limit(e1, e2);
2243 if (new_eip > limit)
2244 raise_exception_err(EXCP0D_GPF, 0);
2245 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2246 get_seg_base(e1, e2), limit, e2);
2247 EIP = new_eip;
2248 break;
2249 default:
2250 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2251 break;
2252 }
2253 }
2254}
2255
2256/* real mode call */
2257void helper_lcall_real(int new_cs, target_ulong new_eip1,
2258 int shift, int next_eip)
2259{
2260 int new_eip;
2261 uint32_t esp, esp_mask;
2262 target_ulong ssp;
2263
2264 new_eip = new_eip1;
2265 esp = ESP;
2266 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2267 ssp = env->segs[R_SS].base;
2268 if (shift) {
2269 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2270 PUSHL(ssp, esp, esp_mask, next_eip);
2271 } else {
2272 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2273 PUSHW(ssp, esp, esp_mask, next_eip);
2274 }
2275
2276 SET_ESP(esp, esp_mask);
2277 env->eip = new_eip;
2278 env->segs[R_CS].selector = new_cs;
2279 env->segs[R_CS].base = (new_cs << 4);
2280}
2281
2282/* protected mode call */
2283void helper_lcall_protected(int new_cs, target_ulong new_eip,
2284 int shift, int next_eip_addend)
2285{
2286 int new_stack, i;
2287 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2288 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2289 uint32_t val, limit, old_sp_mask;
2290 target_ulong ssp, old_ssp, next_eip;
2291
2292 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2293 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2294 LOG_PCALL_STATE(env);
eaa728ee
FB
2295 if ((new_cs & 0xfffc) == 0)
2296 raise_exception_err(EXCP0D_GPF, 0);
2297 if (load_segment(&e1, &e2, new_cs) != 0)
2298 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2299 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2300 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee
FB
2301 if (e2 & DESC_S_MASK) {
2302 if (!(e2 & DESC_CS_MASK))
2303 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2304 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2305 if (e2 & DESC_C_MASK) {
2306 /* conforming code segment */
2307 if (dpl > cpl)
2308 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2309 } else {
2310 /* non conforming code segment */
2311 rpl = new_cs & 3;
2312 if (rpl > cpl)
2313 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2314 if (dpl != cpl)
2315 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2316 }
2317 if (!(e2 & DESC_P_MASK))
2318 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2319
2320#ifdef TARGET_X86_64
2321 /* XXX: check 16/32 bit cases in long mode */
2322 if (shift == 2) {
2323 target_ulong rsp;
2324 /* 64 bit case */
2325 rsp = ESP;
2326 PUSHQ(rsp, env->segs[R_CS].selector);
2327 PUSHQ(rsp, next_eip);
2328 /* from this point, not restartable */
2329 ESP = rsp;
2330 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2331 get_seg_base(e1, e2),
2332 get_seg_limit(e1, e2), e2);
2333 EIP = new_eip;
2334 } else
2335#endif
2336 {
2337 sp = ESP;
2338 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2339 ssp = env->segs[R_SS].base;
2340 if (shift) {
2341 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2342 PUSHL(ssp, sp, sp_mask, next_eip);
2343 } else {
2344 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2345 PUSHW(ssp, sp, sp_mask, next_eip);
2346 }
2347
2348 limit = get_seg_limit(e1, e2);
2349 if (new_eip > limit)
2350 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2351 /* from this point, not restartable */
2352 SET_ESP(sp, sp_mask);
2353 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2354 get_seg_base(e1, e2), limit, e2);
2355 EIP = new_eip;
2356 }
2357 } else {
2358 /* check gate type */
2359 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2360 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2361 rpl = new_cs & 3;
2362 switch(type) {
2363 case 1: /* available 286 TSS */
2364 case 9: /* available 386 TSS */
2365 case 5: /* task gate */
2366 if (dpl < cpl || dpl < rpl)
2367 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2368 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2369 CC_OP = CC_OP_EFLAGS;
2370 return;
2371 case 4: /* 286 call gate */
2372 case 12: /* 386 call gate */
2373 break;
2374 default:
2375 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2376 break;
2377 }
2378 shift = type >> 3;
2379
2380 if (dpl < cpl || dpl < rpl)
2381 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2382 /* check valid bit */
2383 if (!(e2 & DESC_P_MASK))
2384 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2385 selector = e1 >> 16;
2386 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2387 param_count = e2 & 0x1f;
2388 if ((selector & 0xfffc) == 0)
2389 raise_exception_err(EXCP0D_GPF, 0);
2390
2391 if (load_segment(&e1, &e2, selector) != 0)
2392 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2393 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2394 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2395 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2396 if (dpl > cpl)
2397 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2398 if (!(e2 & DESC_P_MASK))
2399 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2400
2401 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2402 /* to inner privilege */
2403 get_ss_esp_from_tss(&ss, &sp, dpl);
d12d51d5 2404 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
eaa728ee 2405 ss, sp, param_count, ESP);
eaa728ee
FB
2406 if ((ss & 0xfffc) == 0)
2407 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2408 if ((ss & 3) != dpl)
2409 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2410 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2411 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2412 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2413 if (ss_dpl != dpl)
2414 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2415 if (!(ss_e2 & DESC_S_MASK) ||
2416 (ss_e2 & DESC_CS_MASK) ||
2417 !(ss_e2 & DESC_W_MASK))
2418 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2419 if (!(ss_e2 & DESC_P_MASK))
2420 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2421
2422 // push_size = ((param_count * 2) + 8) << shift;
2423
2424 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2425 old_ssp = env->segs[R_SS].base;
2426
2427 sp_mask = get_sp_mask(ss_e2);
2428 ssp = get_seg_base(ss_e1, ss_e2);
2429 if (shift) {
2430 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2431 PUSHL(ssp, sp, sp_mask, ESP);
2432 for(i = param_count - 1; i >= 0; i--) {
2433 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2434 PUSHL(ssp, sp, sp_mask, val);
2435 }
2436 } else {
2437 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2438 PUSHW(ssp, sp, sp_mask, ESP);
2439 for(i = param_count - 1; i >= 0; i--) {
2440 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2441 PUSHW(ssp, sp, sp_mask, val);
2442 }
2443 }
2444 new_stack = 1;
2445 } else {
2446 /* to same privilege */
2447 sp = ESP;
2448 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2449 ssp = env->segs[R_SS].base;
2450 // push_size = (4 << shift);
2451 new_stack = 0;
2452 }
2453
2454 if (shift) {
2455 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2456 PUSHL(ssp, sp, sp_mask, next_eip);
2457 } else {
2458 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2459 PUSHW(ssp, sp, sp_mask, next_eip);
2460 }
2461
2462 /* from this point, not restartable */
2463
2464 if (new_stack) {
2465 ss = (ss & ~3) | dpl;
2466 cpu_x86_load_seg_cache(env, R_SS, ss,
2467 ssp,
2468 get_seg_limit(ss_e1, ss_e2),
2469 ss_e2);
2470 }
2471
2472 selector = (selector & ~3) | dpl;
2473 cpu_x86_load_seg_cache(env, R_CS, selector,
2474 get_seg_base(e1, e2),
2475 get_seg_limit(e1, e2),
2476 e2);
2477 cpu_x86_set_cpl(env, dpl);
2478 SET_ESP(sp, sp_mask);
2479 EIP = offset;
2480 }
2481#ifdef USE_KQEMU
2482 if (kqemu_is_ok(env)) {
2483 env->exception_index = -1;
2484 cpu_loop_exit();
2485 }
2486#endif
2487}
2488
2489/* real and vm86 mode iret */
2490void helper_iret_real(int shift)
2491{
2492 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2493 target_ulong ssp;
2494 int eflags_mask;
2495
2496 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2497 sp = ESP;
2498 ssp = env->segs[R_SS].base;
2499 if (shift == 1) {
2500 /* 32 bits */
2501 POPL(ssp, sp, sp_mask, new_eip);
2502 POPL(ssp, sp, sp_mask, new_cs);
2503 new_cs &= 0xffff;
2504 POPL(ssp, sp, sp_mask, new_eflags);
2505 } else {
2506 /* 16 bits */
2507 POPW(ssp, sp, sp_mask, new_eip);
2508 POPW(ssp, sp, sp_mask, new_cs);
2509 POPW(ssp, sp, sp_mask, new_eflags);
2510 }
2511 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2512 env->segs[R_CS].selector = new_cs;
2513 env->segs[R_CS].base = (new_cs << 4);
eaa728ee
FB
2514 env->eip = new_eip;
2515 if (env->eflags & VM_MASK)
2516 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2517 else
2518 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2519 if (shift == 0)
2520 eflags_mask &= 0xffff;
2521 load_eflags(new_eflags, eflags_mask);
db620f46 2522 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2523}
2524
2525static inline void validate_seg(int seg_reg, int cpl)
2526{
2527 int dpl;
2528 uint32_t e2;
2529
2530 /* XXX: on x86_64, we do not want to nullify FS and GS because
2531 they may still contain a valid base. I would be interested to
2532 know how a real x86_64 CPU behaves */
2533 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2534 (env->segs[seg_reg].selector & 0xfffc) == 0)
2535 return;
2536
2537 e2 = env->segs[seg_reg].flags;
2538 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2539 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2540 /* data or non conforming code segment */
2541 if (dpl < cpl) {
2542 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2543 }
2544 }
2545}
2546
2547/* protected mode iret */
2548static inline void helper_ret_protected(int shift, int is_iret, int addend)
2549{
2550 uint32_t new_cs, new_eflags, new_ss;
2551 uint32_t new_es, new_ds, new_fs, new_gs;
2552 uint32_t e1, e2, ss_e1, ss_e2;
2553 int cpl, dpl, rpl, eflags_mask, iopl;
2554 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2555
2556#ifdef TARGET_X86_64
2557 if (shift == 2)
2558 sp_mask = -1;
2559 else
2560#endif
2561 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2562 sp = ESP;
2563 ssp = env->segs[R_SS].base;
2564 new_eflags = 0; /* avoid warning */
2565#ifdef TARGET_X86_64
2566 if (shift == 2) {
2567 POPQ(sp, new_eip);
2568 POPQ(sp, new_cs);
2569 new_cs &= 0xffff;
2570 if (is_iret) {
2571 POPQ(sp, new_eflags);
2572 }
2573 } else
2574#endif
2575 if (shift == 1) {
2576 /* 32 bits */
2577 POPL(ssp, sp, sp_mask, new_eip);
2578 POPL(ssp, sp, sp_mask, new_cs);
2579 new_cs &= 0xffff;
2580 if (is_iret) {
2581 POPL(ssp, sp, sp_mask, new_eflags);
2582 if (new_eflags & VM_MASK)
2583 goto return_to_vm86;
2584 }
2585 } else {
2586 /* 16 bits */
2587 POPW(ssp, sp, sp_mask, new_eip);
2588 POPW(ssp, sp, sp_mask, new_cs);
2589 if (is_iret)
2590 POPW(ssp, sp, sp_mask, new_eflags);
2591 }
d12d51d5
AL
2592 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2593 new_cs, new_eip, shift, addend);
2594 LOG_PCALL_STATE(env);
eaa728ee
FB
2595 if ((new_cs & 0xfffc) == 0)
2596 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2597 if (load_segment(&e1, &e2, new_cs) != 0)
2598 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2599 if (!(e2 & DESC_S_MASK) ||
2600 !(e2 & DESC_CS_MASK))
2601 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2602 cpl = env->hflags & HF_CPL_MASK;
2603 rpl = new_cs & 3;
2604 if (rpl < cpl)
2605 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2606 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2607 if (e2 & DESC_C_MASK) {
2608 if (dpl > rpl)
2609 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2610 } else {
2611 if (dpl != rpl)
2612 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2613 }
2614 if (!(e2 & DESC_P_MASK))
2615 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2616
2617 sp += addend;
2618 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2619 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2620 /* return to same privilege level */
eaa728ee
FB
2621 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2622 get_seg_base(e1, e2),
2623 get_seg_limit(e1, e2),
2624 e2);
2625 } else {
2626 /* return to different privilege level */
2627#ifdef TARGET_X86_64
2628 if (shift == 2) {
2629 POPQ(sp, new_esp);
2630 POPQ(sp, new_ss);
2631 new_ss &= 0xffff;
2632 } else
2633#endif
2634 if (shift == 1) {
2635 /* 32 bits */
2636 POPL(ssp, sp, sp_mask, new_esp);
2637 POPL(ssp, sp, sp_mask, new_ss);
2638 new_ss &= 0xffff;
2639 } else {
2640 /* 16 bits */
2641 POPW(ssp, sp, sp_mask, new_esp);
2642 POPW(ssp, sp, sp_mask, new_ss);
2643 }
d12d51d5 2644 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
eaa728ee 2645 new_ss, new_esp);
eaa728ee
FB
2646 if ((new_ss & 0xfffc) == 0) {
2647#ifdef TARGET_X86_64
2648 /* NULL ss is allowed in long mode if cpl != 3*/
2649 /* XXX: test CS64 ? */
2650 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2651 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2652 0, 0xffffffff,
2653 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2654 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2655 DESC_W_MASK | DESC_A_MASK);
2656 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2657 } else
2658#endif
2659 {
2660 raise_exception_err(EXCP0D_GPF, 0);
2661 }
2662 } else {
2663 if ((new_ss & 3) != rpl)
2664 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2665 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2666 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2667 if (!(ss_e2 & DESC_S_MASK) ||
2668 (ss_e2 & DESC_CS_MASK) ||
2669 !(ss_e2 & DESC_W_MASK))
2670 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2671 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2672 if (dpl != rpl)
2673 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2674 if (!(ss_e2 & DESC_P_MASK))
2675 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2676 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2677 get_seg_base(ss_e1, ss_e2),
2678 get_seg_limit(ss_e1, ss_e2),
2679 ss_e2);
2680 }
2681
2682 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2683 get_seg_base(e1, e2),
2684 get_seg_limit(e1, e2),
2685 e2);
2686 cpu_x86_set_cpl(env, rpl);
2687 sp = new_esp;
2688#ifdef TARGET_X86_64
2689 if (env->hflags & HF_CS64_MASK)
2690 sp_mask = -1;
2691 else
2692#endif
2693 sp_mask = get_sp_mask(ss_e2);
2694
2695 /* validate data segments */
2696 validate_seg(R_ES, rpl);
2697 validate_seg(R_DS, rpl);
2698 validate_seg(R_FS, rpl);
2699 validate_seg(R_GS, rpl);
2700
2701 sp += addend;
2702 }
2703 SET_ESP(sp, sp_mask);
2704 env->eip = new_eip;
2705 if (is_iret) {
2706 /* NOTE: 'cpl' is the _old_ CPL */
2707 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2708 if (cpl == 0)
2709 eflags_mask |= IOPL_MASK;
2710 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2711 if (cpl <= iopl)
2712 eflags_mask |= IF_MASK;
2713 if (shift == 0)
2714 eflags_mask &= 0xffff;
2715 load_eflags(new_eflags, eflags_mask);
2716 }
2717 return;
2718
2719 return_to_vm86:
2720 POPL(ssp, sp, sp_mask, new_esp);
2721 POPL(ssp, sp, sp_mask, new_ss);
2722 POPL(ssp, sp, sp_mask, new_es);
2723 POPL(ssp, sp, sp_mask, new_ds);
2724 POPL(ssp, sp, sp_mask, new_fs);
2725 POPL(ssp, sp, sp_mask, new_gs);
2726
2727 /* modify processor state */
2728 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2729 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2730 load_seg_vm(R_CS, new_cs & 0xffff);
2731 cpu_x86_set_cpl(env, 3);
2732 load_seg_vm(R_SS, new_ss & 0xffff);
2733 load_seg_vm(R_ES, new_es & 0xffff);
2734 load_seg_vm(R_DS, new_ds & 0xffff);
2735 load_seg_vm(R_FS, new_fs & 0xffff);
2736 load_seg_vm(R_GS, new_gs & 0xffff);
2737
2738 env->eip = new_eip & 0xffff;
2739 ESP = new_esp;
2740}
2741
2742void helper_iret_protected(int shift, int next_eip)
2743{
2744 int tss_selector, type;
2745 uint32_t e1, e2;
2746
2747 /* specific case for TSS */
2748 if (env->eflags & NT_MASK) {
2749#ifdef TARGET_X86_64
2750 if (env->hflags & HF_LMA_MASK)
2751 raise_exception_err(EXCP0D_GPF, 0);
2752#endif
2753 tss_selector = lduw_kernel(env->tr.base + 0);
2754 if (tss_selector & 4)
2755 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2756 if (load_segment(&e1, &e2, tss_selector) != 0)
2757 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2758 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2759 /* NOTE: we check both segment and busy TSS */
2760 if (type != 3)
2761 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2762 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2763 } else {
2764 helper_ret_protected(shift, 1, 0);
2765 }
db620f46 2766 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2767#ifdef USE_KQEMU
2768 if (kqemu_is_ok(env)) {
2769 CC_OP = CC_OP_EFLAGS;
2770 env->exception_index = -1;
2771 cpu_loop_exit();
2772 }
2773#endif
2774}
2775
2776void helper_lret_protected(int shift, int addend)
2777{
2778 helper_ret_protected(shift, 0, addend);
2779#ifdef USE_KQEMU
2780 if (kqemu_is_ok(env)) {
2781 env->exception_index = -1;
2782 cpu_loop_exit();
2783 }
2784#endif
2785}
2786
2787void helper_sysenter(void)
2788{
2789 if (env->sysenter_cs == 0) {
2790 raise_exception_err(EXCP0D_GPF, 0);
2791 }
2792 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2793 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2794
2795#ifdef TARGET_X86_64
2796 if (env->hflags & HF_LMA_MASK) {
2797 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2798 0, 0xffffffff,
2799 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2800 DESC_S_MASK |
2801 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2802 } else
2803#endif
2804 {
2805 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2806 0, 0xffffffff,
2807 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2808 DESC_S_MASK |
2809 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2810 }
eaa728ee
FB
2811 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2812 0, 0xffffffff,
2813 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2814 DESC_S_MASK |
2815 DESC_W_MASK | DESC_A_MASK);
2816 ESP = env->sysenter_esp;
2817 EIP = env->sysenter_eip;
2818}
2819
2436b61a 2820void helper_sysexit(int dflag)
eaa728ee
FB
2821{
2822 int cpl;
2823
2824 cpl = env->hflags & HF_CPL_MASK;
2825 if (env->sysenter_cs == 0 || cpl != 0) {
2826 raise_exception_err(EXCP0D_GPF, 0);
2827 }
2828 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2829#ifdef TARGET_X86_64
2830 if (dflag == 2) {
2831 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2832 0, 0xffffffff,
2833 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2834 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2835 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2836 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2837 0, 0xffffffff,
2838 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2839 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2840 DESC_W_MASK | DESC_A_MASK);
2841 } else
2842#endif
2843 {
2844 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2845 0, 0xffffffff,
2846 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2847 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2848 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2849 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2850 0, 0xffffffff,
2851 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2852 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2853 DESC_W_MASK | DESC_A_MASK);
2854 }
eaa728ee
FB
2855 ESP = ECX;
2856 EIP = EDX;
2857#ifdef USE_KQEMU
2858 if (kqemu_is_ok(env)) {
2859 env->exception_index = -1;
2860 cpu_loop_exit();
2861 }
2862#endif
2863}
2864
872929aa
FB
2865#if defined(CONFIG_USER_ONLY)
2866target_ulong helper_read_crN(int reg)
eaa728ee 2867{
872929aa
FB
2868 return 0;
2869}
2870
2871void helper_write_crN(int reg, target_ulong t0)
2872{
2873}
01df040b
AL
2874
2875void helper_movl_drN_T0(int reg, target_ulong t0)
2876{
2877}
872929aa
FB
2878#else
2879target_ulong helper_read_crN(int reg)
2880{
2881 target_ulong val;
2882
2883 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2884 switch(reg) {
2885 default:
2886 val = env->cr[reg];
2887 break;
2888 case 8:
db620f46
FB
2889 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2890 val = cpu_get_apic_tpr(env);
2891 } else {
2892 val = env->v_tpr;
2893 }
872929aa
FB
2894 break;
2895 }
2896 return val;
2897}
2898
2899void helper_write_crN(int reg, target_ulong t0)
2900{
2901 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2902 switch(reg) {
2903 case 0:
2904 cpu_x86_update_cr0(env, t0);
2905 break;
2906 case 3:
2907 cpu_x86_update_cr3(env, t0);
2908 break;
2909 case 4:
2910 cpu_x86_update_cr4(env, t0);
2911 break;
2912 case 8:
db620f46
FB
2913 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2914 cpu_set_apic_tpr(env, t0);
2915 }
2916 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
2917 break;
2918 default:
2919 env->cr[reg] = t0;
2920 break;
2921 }
eaa728ee 2922}
01df040b
AL
2923
2924void helper_movl_drN_T0(int reg, target_ulong t0)
2925{
2926 int i;
2927
2928 if (reg < 4) {
2929 hw_breakpoint_remove(env, reg);
2930 env->dr[reg] = t0;
2931 hw_breakpoint_insert(env, reg);
2932 } else if (reg == 7) {
2933 for (i = 0; i < 4; i++)
2934 hw_breakpoint_remove(env, i);
2935 env->dr[7] = t0;
2936 for (i = 0; i < 4; i++)
2937 hw_breakpoint_insert(env, i);
2938 } else
2939 env->dr[reg] = t0;
2940}
872929aa 2941#endif
eaa728ee
FB
2942
2943void helper_lmsw(target_ulong t0)
2944{
2945 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2946 if already set to one. */
2947 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2948 helper_write_crN(0, t0);
eaa728ee
FB
2949}
2950
2951void helper_clts(void)
2952{
2953 env->cr[0] &= ~CR0_TS_MASK;
2954 env->hflags &= ~HF_TS_MASK;
2955}
2956
eaa728ee
FB
2957void helper_invlpg(target_ulong addr)
2958{
872929aa 2959 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 2960 tlb_flush_page(env, addr);
eaa728ee
FB
2961}
2962
2963void helper_rdtsc(void)
2964{
2965 uint64_t val;
2966
2967 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2968 raise_exception(EXCP0D_GPF);
2969 }
872929aa
FB
2970 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2971
33c263df 2972 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
2973 EAX = (uint32_t)(val);
2974 EDX = (uint32_t)(val >> 32);
2975}
2976
2977void helper_rdpmc(void)
2978{
2979 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2980 raise_exception(EXCP0D_GPF);
2981 }
eaa728ee
FB
2982 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2983
2984 /* currently unimplemented */
2985 raise_exception_err(EXCP06_ILLOP, 0);
2986}
2987
2988#if defined(CONFIG_USER_ONLY)
2989void helper_wrmsr(void)
2990{
2991}
2992
2993void helper_rdmsr(void)
2994{
2995}
2996#else
2997void helper_wrmsr(void)
2998{
2999 uint64_t val;
3000
872929aa
FB
3001 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3002
eaa728ee
FB
3003 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3004
3005 switch((uint32_t)ECX) {
3006 case MSR_IA32_SYSENTER_CS:
3007 env->sysenter_cs = val & 0xffff;
3008 break;
3009 case MSR_IA32_SYSENTER_ESP:
3010 env->sysenter_esp = val;
3011 break;
3012 case MSR_IA32_SYSENTER_EIP:
3013 env->sysenter_eip = val;
3014 break;
3015 case MSR_IA32_APICBASE:
3016 cpu_set_apic_base(env, val);
3017 break;
3018 case MSR_EFER:
3019 {
3020 uint64_t update_mask;
3021 update_mask = 0;
3022 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3023 update_mask |= MSR_EFER_SCE;
3024 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3025 update_mask |= MSR_EFER_LME;
3026 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3027 update_mask |= MSR_EFER_FFXSR;
3028 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3029 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3030 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3031 update_mask |= MSR_EFER_SVME;
eef26553
AL
3032 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3033 update_mask |= MSR_EFER_FFXSR;
5efc27bb
FB
3034 cpu_load_efer(env, (env->efer & ~update_mask) |
3035 (val & update_mask));
eaa728ee
FB
3036 }
3037 break;
3038 case MSR_STAR:
3039 env->star = val;
3040 break;
3041 case MSR_PAT:
3042 env->pat = val;
3043 break;
3044 case MSR_VM_HSAVE_PA:
3045 env->vm_hsave = val;
3046 break;
3047#ifdef TARGET_X86_64
3048 case MSR_LSTAR:
3049 env->lstar = val;
3050 break;
3051 case MSR_CSTAR:
3052 env->cstar = val;
3053 break;
3054 case MSR_FMASK:
3055 env->fmask = val;
3056 break;
3057 case MSR_FSBASE:
3058 env->segs[R_FS].base = val;
3059 break;
3060 case MSR_GSBASE:
3061 env->segs[R_GS].base = val;
3062 break;
3063 case MSR_KERNELGSBASE:
3064 env->kernelgsbase = val;
3065 break;
3066#endif
165d9b82
AL
3067 case MSR_MTRRphysBase(0):
3068 case MSR_MTRRphysBase(1):
3069 case MSR_MTRRphysBase(2):
3070 case MSR_MTRRphysBase(3):
3071 case MSR_MTRRphysBase(4):
3072 case MSR_MTRRphysBase(5):
3073 case MSR_MTRRphysBase(6):
3074 case MSR_MTRRphysBase(7):
3075 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3076 break;
3077 case MSR_MTRRphysMask(0):
3078 case MSR_MTRRphysMask(1):
3079 case MSR_MTRRphysMask(2):
3080 case MSR_MTRRphysMask(3):
3081 case MSR_MTRRphysMask(4):
3082 case MSR_MTRRphysMask(5):
3083 case MSR_MTRRphysMask(6):
3084 case MSR_MTRRphysMask(7):
3085 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3086 break;
3087 case MSR_MTRRfix64K_00000:
3088 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3089 break;
3090 case MSR_MTRRfix16K_80000:
3091 case MSR_MTRRfix16K_A0000:
3092 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3093 break;
3094 case MSR_MTRRfix4K_C0000:
3095 case MSR_MTRRfix4K_C8000:
3096 case MSR_MTRRfix4K_D0000:
3097 case MSR_MTRRfix4K_D8000:
3098 case MSR_MTRRfix4K_E0000:
3099 case MSR_MTRRfix4K_E8000:
3100 case MSR_MTRRfix4K_F0000:
3101 case MSR_MTRRfix4K_F8000:
3102 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3103 break;
3104 case MSR_MTRRdefType:
3105 env->mtrr_deftype = val;
3106 break;
eaa728ee
FB
3107 default:
3108 /* XXX: exception ? */
3109 break;
3110 }
3111}
3112
3113void helper_rdmsr(void)
3114{
3115 uint64_t val;
872929aa
FB
3116
3117 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3118
eaa728ee
FB
3119 switch((uint32_t)ECX) {
3120 case MSR_IA32_SYSENTER_CS:
3121 val = env->sysenter_cs;
3122 break;
3123 case MSR_IA32_SYSENTER_ESP:
3124 val = env->sysenter_esp;
3125 break;
3126 case MSR_IA32_SYSENTER_EIP:
3127 val = env->sysenter_eip;
3128 break;
3129 case MSR_IA32_APICBASE:
3130 val = cpu_get_apic_base(env);
3131 break;
3132 case MSR_EFER:
3133 val = env->efer;
3134 break;
3135 case MSR_STAR:
3136 val = env->star;
3137 break;
3138 case MSR_PAT:
3139 val = env->pat;
3140 break;
3141 case MSR_VM_HSAVE_PA:
3142 val = env->vm_hsave;
3143 break;
d5e49a81
AZ
3144 case MSR_IA32_PERF_STATUS:
3145 /* tsc_increment_by_tick */
3146 val = 1000ULL;
3147 /* CPU multiplier */
3148 val |= (((uint64_t)4ULL) << 40);
3149 break;
eaa728ee
FB
3150#ifdef TARGET_X86_64
3151 case MSR_LSTAR:
3152 val = env->lstar;
3153 break;
3154 case MSR_CSTAR:
3155 val = env->cstar;
3156 break;
3157 case MSR_FMASK:
3158 val = env->fmask;
3159 break;
3160 case MSR_FSBASE:
3161 val = env->segs[R_FS].base;
3162 break;
3163 case MSR_GSBASE:
3164 val = env->segs[R_GS].base;
3165 break;
3166 case MSR_KERNELGSBASE:
3167 val = env->kernelgsbase;
3168 break;
da260249
FB
3169#endif
3170#ifdef USE_KQEMU
3171 case MSR_QPI_COMMBASE:
3172 if (env->kqemu_enabled) {
3173 val = kqemu_comm_base;
3174 } else {
3175 val = 0;
3176 }
3177 break;
eaa728ee 3178#endif
165d9b82
AL
3179 case MSR_MTRRphysBase(0):
3180 case MSR_MTRRphysBase(1):
3181 case MSR_MTRRphysBase(2):
3182 case MSR_MTRRphysBase(3):
3183 case MSR_MTRRphysBase(4):
3184 case MSR_MTRRphysBase(5):
3185 case MSR_MTRRphysBase(6):
3186 case MSR_MTRRphysBase(7):
3187 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3188 break;
3189 case MSR_MTRRphysMask(0):
3190 case MSR_MTRRphysMask(1):
3191 case MSR_MTRRphysMask(2):
3192 case MSR_MTRRphysMask(3):
3193 case MSR_MTRRphysMask(4):
3194 case MSR_MTRRphysMask(5):
3195 case MSR_MTRRphysMask(6):
3196 case MSR_MTRRphysMask(7):
3197 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3198 break;
3199 case MSR_MTRRfix64K_00000:
3200 val = env->mtrr_fixed[0];
3201 break;
3202 case MSR_MTRRfix16K_80000:
3203 case MSR_MTRRfix16K_A0000:
3204 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3205 break;
3206 case MSR_MTRRfix4K_C0000:
3207 case MSR_MTRRfix4K_C8000:
3208 case MSR_MTRRfix4K_D0000:
3209 case MSR_MTRRfix4K_D8000:
3210 case MSR_MTRRfix4K_E0000:
3211 case MSR_MTRRfix4K_E8000:
3212 case MSR_MTRRfix4K_F0000:
3213 case MSR_MTRRfix4K_F8000:
3214 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3215 break;
3216 case MSR_MTRRdefType:
3217 val = env->mtrr_deftype;
3218 break;
dd5e3b17
AL
3219 case MSR_MTRRcap:
3220 if (env->cpuid_features & CPUID_MTRR)
3221 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3222 else
3223 /* XXX: exception ? */
3224 val = 0;
3225 break;
eaa728ee
FB
3226 default:
3227 /* XXX: exception ? */
3228 val = 0;
3229 break;
3230 }
3231 EAX = (uint32_t)(val);
3232 EDX = (uint32_t)(val >> 32);
3233}
3234#endif
3235
3236target_ulong helper_lsl(target_ulong selector1)
3237{
3238 unsigned int limit;
3239 uint32_t e1, e2, eflags, selector;
3240 int rpl, dpl, cpl, type;
3241
3242 selector = selector1 & 0xffff;
a7812ae4 3243 eflags = helper_cc_compute_all(CC_OP);
0e39d3f8
AL
3244 if ((selector & 0xfffc) == 0)
3245 goto fail;
eaa728ee
FB
3246 if (load_segment(&e1, &e2, selector) != 0)
3247 goto fail;
3248 rpl = selector & 3;
3249 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3250 cpl = env->hflags & HF_CPL_MASK;
3251 if (e2 & DESC_S_MASK) {
3252 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3253 /* conforming */
3254 } else {
3255 if (dpl < cpl || dpl < rpl)
3256 goto fail;
3257 }
3258 } else {
3259 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3260 switch(type) {
3261 case 1:
3262 case 2:
3263 case 3:
3264 case 9:
3265 case 11:
3266 break;
3267 default:
3268 goto fail;
3269 }
3270 if (dpl < cpl || dpl < rpl) {
3271 fail:
3272 CC_SRC = eflags & ~CC_Z;
3273 return 0;
3274 }
3275 }
3276 limit = get_seg_limit(e1, e2);
3277 CC_SRC = eflags | CC_Z;
3278 return limit;
3279}
3280
3281target_ulong helper_lar(target_ulong selector1)
3282{
3283 uint32_t e1, e2, eflags, selector;
3284 int rpl, dpl, cpl, type;
3285
3286 selector = selector1 & 0xffff;
a7812ae4 3287 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3288 if ((selector & 0xfffc) == 0)
3289 goto fail;
3290 if (load_segment(&e1, &e2, selector) != 0)
3291 goto fail;
3292 rpl = selector & 3;
3293 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3294 cpl = env->hflags & HF_CPL_MASK;
3295 if (e2 & DESC_S_MASK) {
3296 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3297 /* conforming */
3298 } else {
3299 if (dpl < cpl || dpl < rpl)
3300 goto fail;
3301 }
3302 } else {
3303 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3304 switch(type) {
3305 case 1:
3306 case 2:
3307 case 3:
3308 case 4:
3309 case 5:
3310 case 9:
3311 case 11:
3312 case 12:
3313 break;
3314 default:
3315 goto fail;
3316 }
3317 if (dpl < cpl || dpl < rpl) {
3318 fail:
3319 CC_SRC = eflags & ~CC_Z;
3320 return 0;
3321 }
3322 }
3323 CC_SRC = eflags | CC_Z;
3324 return e2 & 0x00f0ff00;
3325}
3326
3327void helper_verr(target_ulong selector1)
3328{
3329 uint32_t e1, e2, eflags, selector;
3330 int rpl, dpl, cpl;
3331
3332 selector = selector1 & 0xffff;
a7812ae4 3333 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3334 if ((selector & 0xfffc) == 0)
3335 goto fail;
3336 if (load_segment(&e1, &e2, selector) != 0)
3337 goto fail;
3338 if (!(e2 & DESC_S_MASK))
3339 goto fail;
3340 rpl = selector & 3;
3341 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3342 cpl = env->hflags & HF_CPL_MASK;
3343 if (e2 & DESC_CS_MASK) {
3344 if (!(e2 & DESC_R_MASK))
3345 goto fail;
3346 if (!(e2 & DESC_C_MASK)) {
3347 if (dpl < cpl || dpl < rpl)
3348 goto fail;
3349 }
3350 } else {
3351 if (dpl < cpl || dpl < rpl) {
3352 fail:
3353 CC_SRC = eflags & ~CC_Z;
3354 return;
3355 }
3356 }
3357 CC_SRC = eflags | CC_Z;
3358}
3359
3360void helper_verw(target_ulong selector1)
3361{
3362 uint32_t e1, e2, eflags, selector;
3363 int rpl, dpl, cpl;
3364
3365 selector = selector1 & 0xffff;
a7812ae4 3366 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3367 if ((selector & 0xfffc) == 0)
3368 goto fail;
3369 if (load_segment(&e1, &e2, selector) != 0)
3370 goto fail;
3371 if (!(e2 & DESC_S_MASK))
3372 goto fail;
3373 rpl = selector & 3;
3374 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3375 cpl = env->hflags & HF_CPL_MASK;
3376 if (e2 & DESC_CS_MASK) {
3377 goto fail;
3378 } else {
3379 if (dpl < cpl || dpl < rpl)
3380 goto fail;
3381 if (!(e2 & DESC_W_MASK)) {
3382 fail:
3383 CC_SRC = eflags & ~CC_Z;
3384 return;
3385 }
3386 }
3387 CC_SRC = eflags | CC_Z;
3388}
3389
3390/* x87 FPU helpers */
3391
3392static void fpu_set_exception(int mask)
3393{
3394 env->fpus |= mask;
3395 if (env->fpus & (~env->fpuc & FPUC_EM))
3396 env->fpus |= FPUS_SE | FPUS_B;
3397}
3398
3399static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3400{
3401 if (b == 0.0)
3402 fpu_set_exception(FPUS_ZE);
3403 return a / b;
3404}
3405
d9957a8b 3406static void fpu_raise_exception(void)
eaa728ee
FB
3407{
3408 if (env->cr[0] & CR0_NE_MASK) {
3409 raise_exception(EXCP10_COPR);
3410 }
3411#if !defined(CONFIG_USER_ONLY)
3412 else {
3413 cpu_set_ferr(env);
3414 }
3415#endif
3416}
3417
3418void helper_flds_FT0(uint32_t val)
3419{
3420 union {
3421 float32 f;
3422 uint32_t i;
3423 } u;
3424 u.i = val;
3425 FT0 = float32_to_floatx(u.f, &env->fp_status);
3426}
3427
3428void helper_fldl_FT0(uint64_t val)
3429{
3430 union {
3431 float64 f;
3432 uint64_t i;
3433 } u;
3434 u.i = val;
3435 FT0 = float64_to_floatx(u.f, &env->fp_status);
3436}
3437
3438void helper_fildl_FT0(int32_t val)
3439{
3440 FT0 = int32_to_floatx(val, &env->fp_status);
3441}
3442
3443void helper_flds_ST0(uint32_t val)
3444{
3445 int new_fpstt;
3446 union {
3447 float32 f;
3448 uint32_t i;
3449 } u;
3450 new_fpstt = (env->fpstt - 1) & 7;
3451 u.i = val;
3452 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3453 env->fpstt = new_fpstt;
3454 env->fptags[new_fpstt] = 0; /* validate stack entry */
3455}
3456
3457void helper_fldl_ST0(uint64_t val)
3458{
3459 int new_fpstt;
3460 union {
3461 float64 f;
3462 uint64_t i;
3463 } u;
3464 new_fpstt = (env->fpstt - 1) & 7;
3465 u.i = val;
3466 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3467 env->fpstt = new_fpstt;
3468 env->fptags[new_fpstt] = 0; /* validate stack entry */
3469}
3470
3471void helper_fildl_ST0(int32_t val)
3472{
3473 int new_fpstt;
3474 new_fpstt = (env->fpstt - 1) & 7;
3475 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3476 env->fpstt = new_fpstt;
3477 env->fptags[new_fpstt] = 0; /* validate stack entry */
3478}
3479
3480void helper_fildll_ST0(int64_t val)
3481{
3482 int new_fpstt;
3483 new_fpstt = (env->fpstt - 1) & 7;
3484 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3485 env->fpstt = new_fpstt;
3486 env->fptags[new_fpstt] = 0; /* validate stack entry */
3487}
3488
3489uint32_t helper_fsts_ST0(void)
3490{
3491 union {
3492 float32 f;
3493 uint32_t i;
3494 } u;
3495 u.f = floatx_to_float32(ST0, &env->fp_status);
3496 return u.i;
3497}
3498
3499uint64_t helper_fstl_ST0(void)
3500{
3501 union {
3502 float64 f;
3503 uint64_t i;
3504 } u;
3505 u.f = floatx_to_float64(ST0, &env->fp_status);
3506 return u.i;
3507}
3508
3509int32_t helper_fist_ST0(void)
3510{
3511 int32_t val;
3512 val = floatx_to_int32(ST0, &env->fp_status);
3513 if (val != (int16_t)val)
3514 val = -32768;
3515 return val;
3516}
3517
3518int32_t helper_fistl_ST0(void)
3519{
3520 int32_t val;
3521 val = floatx_to_int32(ST0, &env->fp_status);
3522 return val;
3523}
3524
3525int64_t helper_fistll_ST0(void)
3526{
3527 int64_t val;
3528 val = floatx_to_int64(ST0, &env->fp_status);
3529 return val;
3530}
3531
3532int32_t helper_fistt_ST0(void)
3533{
3534 int32_t val;
3535 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3536 if (val != (int16_t)val)
3537 val = -32768;
3538 return val;
3539}
3540
3541int32_t helper_fisttl_ST0(void)
3542{
3543 int32_t val;
3544 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3545 return val;
3546}
3547
3548int64_t helper_fisttll_ST0(void)
3549{
3550 int64_t val;
3551 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3552 return val;
3553}
3554
3555void helper_fldt_ST0(target_ulong ptr)
3556{
3557 int new_fpstt;
3558 new_fpstt = (env->fpstt - 1) & 7;
3559 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3560 env->fpstt = new_fpstt;
3561 env->fptags[new_fpstt] = 0; /* validate stack entry */
3562}
3563
3564void helper_fstt_ST0(target_ulong ptr)
3565{
3566 helper_fstt(ST0, ptr);
3567}
3568
3569void helper_fpush(void)
3570{
3571 fpush();
3572}
3573
3574void helper_fpop(void)
3575{
3576 fpop();
3577}
3578
3579void helper_fdecstp(void)
3580{
3581 env->fpstt = (env->fpstt - 1) & 7;
3582 env->fpus &= (~0x4700);
3583}
3584
3585void helper_fincstp(void)
3586{
3587 env->fpstt = (env->fpstt + 1) & 7;
3588 env->fpus &= (~0x4700);
3589}
3590
3591/* FPU move */
3592
3593void helper_ffree_STN(int st_index)
3594{
3595 env->fptags[(env->fpstt + st_index) & 7] = 1;
3596}
3597
3598void helper_fmov_ST0_FT0(void)
3599{
3600 ST0 = FT0;
3601}
3602
3603void helper_fmov_FT0_STN(int st_index)
3604{
3605 FT0 = ST(st_index);
3606}
3607
3608void helper_fmov_ST0_STN(int st_index)
3609{
3610 ST0 = ST(st_index);
3611}
3612
3613void helper_fmov_STN_ST0(int st_index)
3614{
3615 ST(st_index) = ST0;
3616}
3617
3618void helper_fxchg_ST0_STN(int st_index)
3619{
3620 CPU86_LDouble tmp;
3621 tmp = ST(st_index);
3622 ST(st_index) = ST0;
3623 ST0 = tmp;
3624}
3625
3626/* FPU operations */
3627
3628static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3629
3630void helper_fcom_ST0_FT0(void)
3631{
3632 int ret;
3633
3634 ret = floatx_compare(ST0, FT0, &env->fp_status);
3635 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
eaa728ee
FB
3636}
3637
3638void helper_fucom_ST0_FT0(void)
3639{
3640 int ret;
3641
3642 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3643 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
eaa728ee
FB
3644}
3645
3646static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3647
3648void helper_fcomi_ST0_FT0(void)
3649{
3650 int eflags;
3651 int ret;
3652
3653 ret = floatx_compare(ST0, FT0, &env->fp_status);
a7812ae4 3654 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3655 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3656 CC_SRC = eflags;
eaa728ee
FB
3657}
3658
3659void helper_fucomi_ST0_FT0(void)
3660{
3661 int eflags;
3662 int ret;
3663
3664 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
a7812ae4 3665 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3666 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3667 CC_SRC = eflags;
eaa728ee
FB
3668}
3669
3670void helper_fadd_ST0_FT0(void)
3671{
3672 ST0 += FT0;
3673}
3674
3675void helper_fmul_ST0_FT0(void)
3676{
3677 ST0 *= FT0;
3678}
3679
3680void helper_fsub_ST0_FT0(void)
3681{
3682 ST0 -= FT0;
3683}
3684
3685void helper_fsubr_ST0_FT0(void)
3686{
3687 ST0 = FT0 - ST0;
3688}
3689
3690void helper_fdiv_ST0_FT0(void)
3691{
3692 ST0 = helper_fdiv(ST0, FT0);
3693}
3694
3695void helper_fdivr_ST0_FT0(void)
3696{
3697 ST0 = helper_fdiv(FT0, ST0);
3698}
3699
3700/* fp operations between STN and ST0 */
3701
3702void helper_fadd_STN_ST0(int st_index)
3703{
3704 ST(st_index) += ST0;
3705}
3706
3707void helper_fmul_STN_ST0(int st_index)
3708{
3709 ST(st_index) *= ST0;
3710}
3711
3712void helper_fsub_STN_ST0(int st_index)
3713{
3714 ST(st_index) -= ST0;
3715}
3716
3717void helper_fsubr_STN_ST0(int st_index)
3718{
3719 CPU86_LDouble *p;
3720 p = &ST(st_index);
3721 *p = ST0 - *p;
3722}
3723
3724void helper_fdiv_STN_ST0(int st_index)
3725{
3726 CPU86_LDouble *p;
3727 p = &ST(st_index);
3728 *p = helper_fdiv(*p, ST0);
3729}
3730
3731void helper_fdivr_STN_ST0(int st_index)
3732{
3733 CPU86_LDouble *p;
3734 p = &ST(st_index);
3735 *p = helper_fdiv(ST0, *p);
3736}
3737
3738/* misc FPU operations */
3739void helper_fchs_ST0(void)
3740{
3741 ST0 = floatx_chs(ST0);
3742}
3743
3744void helper_fabs_ST0(void)
3745{
3746 ST0 = floatx_abs(ST0);
3747}
3748
3749void helper_fld1_ST0(void)
3750{
3751 ST0 = f15rk[1];
3752}
3753
3754void helper_fldl2t_ST0(void)
3755{
3756 ST0 = f15rk[6];
3757}
3758
3759void helper_fldl2e_ST0(void)
3760{
3761 ST0 = f15rk[5];
3762}
3763
3764void helper_fldpi_ST0(void)
3765{
3766 ST0 = f15rk[2];
3767}
3768
3769void helper_fldlg2_ST0(void)
3770{
3771 ST0 = f15rk[3];
3772}
3773
3774void helper_fldln2_ST0(void)
3775{
3776 ST0 = f15rk[4];
3777}
3778
3779void helper_fldz_ST0(void)
3780{
3781 ST0 = f15rk[0];
3782}
3783
3784void helper_fldz_FT0(void)
3785{
3786 FT0 = f15rk[0];
3787}
3788
3789uint32_t helper_fnstsw(void)
3790{
3791 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3792}
3793
3794uint32_t helper_fnstcw(void)
3795{
3796 return env->fpuc;
3797}
3798
3799static void update_fp_status(void)
3800{
3801 int rnd_type;
3802
3803 /* set rounding mode */
3804 switch(env->fpuc & RC_MASK) {
3805 default:
3806 case RC_NEAR:
3807 rnd_type = float_round_nearest_even;
3808 break;
3809 case RC_DOWN:
3810 rnd_type = float_round_down;
3811 break;
3812 case RC_UP:
3813 rnd_type = float_round_up;
3814 break;
3815 case RC_CHOP:
3816 rnd_type = float_round_to_zero;
3817 break;
3818 }
3819 set_float_rounding_mode(rnd_type, &env->fp_status);
3820#ifdef FLOATX80
3821 switch((env->fpuc >> 8) & 3) {
3822 case 0:
3823 rnd_type = 32;
3824 break;
3825 case 2:
3826 rnd_type = 64;
3827 break;
3828 case 3:
3829 default:
3830 rnd_type = 80;
3831 break;
3832 }
3833 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3834#endif
3835}
3836
3837void helper_fldcw(uint32_t val)
3838{
3839 env->fpuc = val;
3840 update_fp_status();
3841}
3842
3843void helper_fclex(void)
3844{
3845 env->fpus &= 0x7f00;
3846}
3847
3848void helper_fwait(void)
3849{
3850 if (env->fpus & FPUS_SE)
3851 fpu_raise_exception();
eaa728ee
FB
3852}
3853
3854void helper_fninit(void)
3855{
3856 env->fpus = 0;
3857 env->fpstt = 0;
3858 env->fpuc = 0x37f;
3859 env->fptags[0] = 1;
3860 env->fptags[1] = 1;
3861 env->fptags[2] = 1;
3862 env->fptags[3] = 1;
3863 env->fptags[4] = 1;
3864 env->fptags[5] = 1;
3865 env->fptags[6] = 1;
3866 env->fptags[7] = 1;
3867}
3868
3869/* BCD ops */
3870
3871void helper_fbld_ST0(target_ulong ptr)
3872{
3873 CPU86_LDouble tmp;
3874 uint64_t val;
3875 unsigned int v;
3876 int i;
3877
3878 val = 0;
3879 for(i = 8; i >= 0; i--) {
3880 v = ldub(ptr + i);
3881 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3882 }
3883 tmp = val;
3884 if (ldub(ptr + 9) & 0x80)
3885 tmp = -tmp;
3886 fpush();
3887 ST0 = tmp;
3888}
3889
3890void helper_fbst_ST0(target_ulong ptr)
3891{
3892 int v;
3893 target_ulong mem_ref, mem_end;
3894 int64_t val;
3895
3896 val = floatx_to_int64(ST0, &env->fp_status);
3897 mem_ref = ptr;
3898 mem_end = mem_ref + 9;
3899 if (val < 0) {
3900 stb(mem_end, 0x80);
3901 val = -val;
3902 } else {
3903 stb(mem_end, 0x00);
3904 }
3905 while (mem_ref < mem_end) {
3906 if (val == 0)
3907 break;
3908 v = val % 100;
3909 val = val / 100;
3910 v = ((v / 10) << 4) | (v % 10);
3911 stb(mem_ref++, v);
3912 }
3913 while (mem_ref < mem_end) {
3914 stb(mem_ref++, 0);
3915 }
3916}
3917
3918void helper_f2xm1(void)
3919{
3920 ST0 = pow(2.0,ST0) - 1.0;
3921}
3922
3923void helper_fyl2x(void)
3924{
3925 CPU86_LDouble fptemp;
3926
3927 fptemp = ST0;
3928 if (fptemp>0.0){
3929 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3930 ST1 *= fptemp;
3931 fpop();
3932 } else {
3933 env->fpus &= (~0x4700);
3934 env->fpus |= 0x400;
3935 }
3936}
3937
3938void helper_fptan(void)
3939{
3940 CPU86_LDouble fptemp;
3941
3942 fptemp = ST0;
3943 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3944 env->fpus |= 0x400;
3945 } else {
3946 ST0 = tan(fptemp);
3947 fpush();
3948 ST0 = 1.0;
3949 env->fpus &= (~0x400); /* C2 <-- 0 */
3950 /* the above code is for |arg| < 2**52 only */
3951 }
3952}
3953
3954void helper_fpatan(void)
3955{
3956 CPU86_LDouble fptemp, fpsrcop;
3957
3958 fpsrcop = ST1;
3959 fptemp = ST0;
3960 ST1 = atan2(fpsrcop,fptemp);
3961 fpop();
3962}
3963
3964void helper_fxtract(void)
3965{
3966 CPU86_LDoubleU temp;
3967 unsigned int expdif;
3968
3969 temp.d = ST0;
3970 expdif = EXPD(temp) - EXPBIAS;
3971 /*DP exponent bias*/
3972 ST0 = expdif;
3973 fpush();
3974 BIASEXPONENT(temp);
3975 ST0 = temp.d;
3976}
3977
3978void helper_fprem1(void)
3979{
3980 CPU86_LDouble dblq, fpsrcop, fptemp;
3981 CPU86_LDoubleU fpsrcop1, fptemp1;
3982 int expdif;
3983 signed long long int q;
3984
3985 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3986 ST0 = 0.0 / 0.0; /* NaN */
3987 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3988 return;
3989 }
3990
3991 fpsrcop = ST0;
3992 fptemp = ST1;
3993 fpsrcop1.d = fpsrcop;
3994 fptemp1.d = fptemp;
3995 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3996
3997 if (expdif < 0) {
3998 /* optimisation? taken from the AMD docs */
3999 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4000 /* ST0 is unchanged */
4001 return;
4002 }
4003
4004 if (expdif < 53) {
4005 dblq = fpsrcop / fptemp;
4006 /* round dblq towards nearest integer */
4007 dblq = rint(dblq);
4008 ST0 = fpsrcop - fptemp * dblq;
4009
4010 /* convert dblq to q by truncating towards zero */
4011 if (dblq < 0.0)
4012 q = (signed long long int)(-dblq);
4013 else
4014 q = (signed long long int)dblq;
4015
4016 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4017 /* (C0,C3,C1) <-- (q2,q1,q0) */
4018 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4019 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4020 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4021 } else {
4022 env->fpus |= 0x400; /* C2 <-- 1 */
4023 fptemp = pow(2.0, expdif - 50);
4024 fpsrcop = (ST0 / ST1) / fptemp;
4025 /* fpsrcop = integer obtained by chopping */
4026 fpsrcop = (fpsrcop < 0.0) ?
4027 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4028 ST0 -= (ST1 * fpsrcop * fptemp);
4029 }
4030}
4031
4032void helper_fprem(void)
4033{
4034 CPU86_LDouble dblq, fpsrcop, fptemp;
4035 CPU86_LDoubleU fpsrcop1, fptemp1;
4036 int expdif;
4037 signed long long int q;
4038
4039 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4040 ST0 = 0.0 / 0.0; /* NaN */
4041 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4042 return;
4043 }
4044
4045 fpsrcop = (CPU86_LDouble)ST0;
4046 fptemp = (CPU86_LDouble)ST1;
4047 fpsrcop1.d = fpsrcop;
4048 fptemp1.d = fptemp;
4049 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4050
4051 if (expdif < 0) {
4052 /* optimisation? taken from the AMD docs */
4053 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4054 /* ST0 is unchanged */
4055 return;
4056 }
4057
4058 if ( expdif < 53 ) {
4059 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4060 /* round dblq towards zero */
4061 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4062 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4063
4064 /* convert dblq to q by truncating towards zero */
4065 if (dblq < 0.0)
4066 q = (signed long long int)(-dblq);
4067 else
4068 q = (signed long long int)dblq;
4069
4070 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4071 /* (C0,C3,C1) <-- (q2,q1,q0) */
4072 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4073 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4074 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4075 } else {
4076 int N = 32 + (expdif % 32); /* as per AMD docs */
4077 env->fpus |= 0x400; /* C2 <-- 1 */
4078 fptemp = pow(2.0, (double)(expdif - N));
4079 fpsrcop = (ST0 / ST1) / fptemp;
4080 /* fpsrcop = integer obtained by chopping */
4081 fpsrcop = (fpsrcop < 0.0) ?
4082 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4083 ST0 -= (ST1 * fpsrcop * fptemp);
4084 }
4085}
4086
4087void helper_fyl2xp1(void)
4088{
4089 CPU86_LDouble fptemp;
4090
4091 fptemp = ST0;
4092 if ((fptemp+1.0)>0.0) {
4093 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4094 ST1 *= fptemp;
4095 fpop();
4096 } else {
4097 env->fpus &= (~0x4700);
4098 env->fpus |= 0x400;
4099 }
4100}
4101
4102void helper_fsqrt(void)
4103{
4104 CPU86_LDouble fptemp;
4105
4106 fptemp = ST0;
4107 if (fptemp<0.0) {
4108 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4109 env->fpus |= 0x400;
4110 }
4111 ST0 = sqrt(fptemp);
4112}
4113
4114void helper_fsincos(void)
4115{
4116 CPU86_LDouble fptemp;
4117
4118 fptemp = ST0;
4119 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4120 env->fpus |= 0x400;
4121 } else {
4122 ST0 = sin(fptemp);
4123 fpush();
4124 ST0 = cos(fptemp);
4125 env->fpus &= (~0x400); /* C2 <-- 0 */
4126 /* the above code is for |arg| < 2**63 only */
4127 }
4128}
4129
4130void helper_frndint(void)
4131{
4132 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4133}
4134
4135void helper_fscale(void)
4136{
4137 ST0 = ldexp (ST0, (int)(ST1));
4138}
4139
4140void helper_fsin(void)
4141{
4142 CPU86_LDouble fptemp;
4143
4144 fptemp = ST0;
4145 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4146 env->fpus |= 0x400;
4147 } else {
4148 ST0 = sin(fptemp);
4149 env->fpus &= (~0x400); /* C2 <-- 0 */
4150 /* the above code is for |arg| < 2**53 only */
4151 }
4152}
4153
4154void helper_fcos(void)
4155{
4156 CPU86_LDouble fptemp;
4157
4158 fptemp = ST0;
4159 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4160 env->fpus |= 0x400;
4161 } else {
4162 ST0 = cos(fptemp);
4163 env->fpus &= (~0x400); /* C2 <-- 0 */
4164 /* the above code is for |arg5 < 2**63 only */
4165 }
4166}
4167
4168void helper_fxam_ST0(void)
4169{
4170 CPU86_LDoubleU temp;
4171 int expdif;
4172
4173 temp.d = ST0;
4174
4175 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4176 if (SIGND(temp))
4177 env->fpus |= 0x200; /* C1 <-- 1 */
4178
4179 /* XXX: test fptags too */
4180 expdif = EXPD(temp);
4181 if (expdif == MAXEXPD) {
4182#ifdef USE_X86LDOUBLE
4183 if (MANTD(temp) == 0x8000000000000000ULL)
4184#else
4185 if (MANTD(temp) == 0)
4186#endif
4187 env->fpus |= 0x500 /*Infinity*/;
4188 else
4189 env->fpus |= 0x100 /*NaN*/;
4190 } else if (expdif == 0) {
4191 if (MANTD(temp) == 0)
4192 env->fpus |= 0x4000 /*Zero*/;
4193 else
4194 env->fpus |= 0x4400 /*Denormal*/;
4195 } else {
4196 env->fpus |= 0x400;
4197 }
4198}
4199
4200void helper_fstenv(target_ulong ptr, int data32)
4201{
4202 int fpus, fptag, exp, i;
4203 uint64_t mant;
4204 CPU86_LDoubleU tmp;
4205
4206 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4207 fptag = 0;
4208 for (i=7; i>=0; i--) {
4209 fptag <<= 2;
4210 if (env->fptags[i]) {
4211 fptag |= 3;
4212 } else {
4213 tmp.d = env->fpregs[i].d;
4214 exp = EXPD(tmp);
4215 mant = MANTD(tmp);
4216 if (exp == 0 && mant == 0) {
4217 /* zero */
4218 fptag |= 1;
4219 } else if (exp == 0 || exp == MAXEXPD
4220#ifdef USE_X86LDOUBLE
4221 || (mant & (1LL << 63)) == 0
4222#endif
4223 ) {
4224 /* NaNs, infinity, denormal */
4225 fptag |= 2;
4226 }
4227 }
4228 }
4229 if (data32) {
4230 /* 32 bit */
4231 stl(ptr, env->fpuc);
4232 stl(ptr + 4, fpus);
4233 stl(ptr + 8, fptag);
4234 stl(ptr + 12, 0); /* fpip */
4235 stl(ptr + 16, 0); /* fpcs */
4236 stl(ptr + 20, 0); /* fpoo */
4237 stl(ptr + 24, 0); /* fpos */
4238 } else {
4239 /* 16 bit */
4240 stw(ptr, env->fpuc);
4241 stw(ptr + 2, fpus);
4242 stw(ptr + 4, fptag);
4243 stw(ptr + 6, 0);
4244 stw(ptr + 8, 0);
4245 stw(ptr + 10, 0);
4246 stw(ptr + 12, 0);
4247 }
4248}
4249
4250void helper_fldenv(target_ulong ptr, int data32)
4251{
4252 int i, fpus, fptag;
4253
4254 if (data32) {
4255 env->fpuc = lduw(ptr);
4256 fpus = lduw(ptr + 4);
4257 fptag = lduw(ptr + 8);
4258 }
4259 else {
4260 env->fpuc = lduw(ptr);
4261 fpus = lduw(ptr + 2);
4262 fptag = lduw(ptr + 4);
4263 }
4264 env->fpstt = (fpus >> 11) & 7;
4265 env->fpus = fpus & ~0x3800;
4266 for(i = 0;i < 8; i++) {
4267 env->fptags[i] = ((fptag & 3) == 3);
4268 fptag >>= 2;
4269 }
4270}
4271
4272void helper_fsave(target_ulong ptr, int data32)
4273{
4274 CPU86_LDouble tmp;
4275 int i;
4276
4277 helper_fstenv(ptr, data32);
4278
4279 ptr += (14 << data32);
4280 for(i = 0;i < 8; i++) {
4281 tmp = ST(i);
4282 helper_fstt(tmp, ptr);
4283 ptr += 10;
4284 }
4285
4286 /* fninit */
4287 env->fpus = 0;
4288 env->fpstt = 0;
4289 env->fpuc = 0x37f;
4290 env->fptags[0] = 1;
4291 env->fptags[1] = 1;
4292 env->fptags[2] = 1;
4293 env->fptags[3] = 1;
4294 env->fptags[4] = 1;
4295 env->fptags[5] = 1;
4296 env->fptags[6] = 1;
4297 env->fptags[7] = 1;
4298}
4299
4300void helper_frstor(target_ulong ptr, int data32)
4301{
4302 CPU86_LDouble tmp;
4303 int i;
4304
4305 helper_fldenv(ptr, data32);
4306 ptr += (14 << data32);
4307
4308 for(i = 0;i < 8; i++) {
4309 tmp = helper_fldt(ptr);
4310 ST(i) = tmp;
4311 ptr += 10;
4312 }
4313}
4314
4315void helper_fxsave(target_ulong ptr, int data64)
4316{
4317 int fpus, fptag, i, nb_xmm_regs;
4318 CPU86_LDouble tmp;
4319 target_ulong addr;
4320
4321 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4322 fptag = 0;
4323 for(i = 0; i < 8; i++) {
4324 fptag |= (env->fptags[i] << i);
4325 }
4326 stw(ptr, env->fpuc);
4327 stw(ptr + 2, fpus);
4328 stw(ptr + 4, fptag ^ 0xff);
4329#ifdef TARGET_X86_64
4330 if (data64) {
4331 stq(ptr + 0x08, 0); /* rip */
4332 stq(ptr + 0x10, 0); /* rdp */
4333 } else
4334#endif
4335 {
4336 stl(ptr + 0x08, 0); /* eip */
4337 stl(ptr + 0x0c, 0); /* sel */
4338 stl(ptr + 0x10, 0); /* dp */
4339 stl(ptr + 0x14, 0); /* sel */
4340 }
4341
4342 addr = ptr + 0x20;
4343 for(i = 0;i < 8; i++) {
4344 tmp = ST(i);
4345 helper_fstt(tmp, addr);
4346 addr += 16;
4347 }
4348
4349 if (env->cr[4] & CR4_OSFXSR_MASK) {
4350 /* XXX: finish it */
4351 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4352 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4353 if (env->hflags & HF_CS64_MASK)
4354 nb_xmm_regs = 16;
4355 else
4356 nb_xmm_regs = 8;
4357 addr = ptr + 0xa0;
eef26553
AL
4358 /* Fast FXSAVE leaves out the XMM registers */
4359 if (!(env->efer & MSR_EFER_FFXSR)
4360 || (env->hflags & HF_CPL_MASK)
4361 || !(env->hflags & HF_LMA_MASK)) {
4362 for(i = 0; i < nb_xmm_regs; i++) {
4363 stq(addr, env->xmm_regs[i].XMM_Q(0));
4364 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4365 addr += 16;
4366 }
eaa728ee
FB
4367 }
4368 }
4369}
4370
4371void helper_fxrstor(target_ulong ptr, int data64)
4372{
4373 int i, fpus, fptag, nb_xmm_regs;
4374 CPU86_LDouble tmp;
4375 target_ulong addr;
4376
4377 env->fpuc = lduw(ptr);
4378 fpus = lduw(ptr + 2);
4379 fptag = lduw(ptr + 4);
4380 env->fpstt = (fpus >> 11) & 7;
4381 env->fpus = fpus & ~0x3800;
4382 fptag ^= 0xff;
4383 for(i = 0;i < 8; i++) {
4384 env->fptags[i] = ((fptag >> i) & 1);
4385 }
4386
4387 addr = ptr + 0x20;
4388 for(i = 0;i < 8; i++) {
4389 tmp = helper_fldt(addr);
4390 ST(i) = tmp;
4391 addr += 16;
4392 }
4393
4394 if (env->cr[4] & CR4_OSFXSR_MASK) {
4395 /* XXX: finish it */
4396 env->mxcsr = ldl(ptr + 0x18);
4397 //ldl(ptr + 0x1c);
4398 if (env->hflags & HF_CS64_MASK)
4399 nb_xmm_regs = 16;
4400 else
4401 nb_xmm_regs = 8;
4402 addr = ptr + 0xa0;
eef26553
AL
4403 /* Fast FXRESTORE leaves out the XMM registers */
4404 if (!(env->efer & MSR_EFER_FFXSR)
4405 || (env->hflags & HF_CPL_MASK)
4406 || !(env->hflags & HF_LMA_MASK)) {
4407 for(i = 0; i < nb_xmm_regs; i++) {
4408 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4409 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4410 addr += 16;
4411 }
eaa728ee
FB
4412 }
4413 }
4414}
4415
4416#ifndef USE_X86LDOUBLE
4417
4418void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4419{
4420 CPU86_LDoubleU temp;
4421 int e;
4422
4423 temp.d = f;
4424 /* mantissa */
4425 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4426 /* exponent + sign */
4427 e = EXPD(temp) - EXPBIAS + 16383;
4428 e |= SIGND(temp) >> 16;
4429 *pexp = e;
4430}
4431
4432CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4433{
4434 CPU86_LDoubleU temp;
4435 int e;
4436 uint64_t ll;
4437
4438 /* XXX: handle overflow ? */
4439 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4440 e |= (upper >> 4) & 0x800; /* sign */
4441 ll = (mant >> 11) & ((1LL << 52) - 1);
4442#ifdef __arm__
4443 temp.l.upper = (e << 20) | (ll >> 32);
4444 temp.l.lower = ll;
4445#else
4446 temp.ll = ll | ((uint64_t)e << 52);
4447#endif
4448 return temp.d;
4449}
4450
4451#else
4452
4453void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4454{
4455 CPU86_LDoubleU temp;
4456
4457 temp.d = f;
4458 *pmant = temp.l.lower;
4459 *pexp = temp.l.upper;
4460}
4461
4462CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4463{
4464 CPU86_LDoubleU temp;
4465
4466 temp.l.upper = upper;
4467 temp.l.lower = mant;
4468 return temp.d;
4469}
4470#endif
4471
4472#ifdef TARGET_X86_64
4473
4474//#define DEBUG_MULDIV
4475
4476static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4477{
4478 *plow += a;
4479 /* carry test */
4480 if (*plow < a)
4481 (*phigh)++;
4482 *phigh += b;
4483}
4484
4485static void neg128(uint64_t *plow, uint64_t *phigh)
4486{
4487 *plow = ~ *plow;
4488 *phigh = ~ *phigh;
4489 add128(plow, phigh, 1, 0);
4490}
4491
4492/* return TRUE if overflow */
4493static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4494{
4495 uint64_t q, r, a1, a0;
4496 int i, qb, ab;
4497
4498 a0 = *plow;
4499 a1 = *phigh;
4500 if (a1 == 0) {
4501 q = a0 / b;
4502 r = a0 % b;
4503 *plow = q;
4504 *phigh = r;
4505 } else {
4506 if (a1 >= b)
4507 return 1;
4508 /* XXX: use a better algorithm */
4509 for(i = 0; i < 64; i++) {
4510 ab = a1 >> 63;
4511 a1 = (a1 << 1) | (a0 >> 63);
4512 if (ab || a1 >= b) {
4513 a1 -= b;
4514 qb = 1;
4515 } else {
4516 qb = 0;
4517 }
4518 a0 = (a0 << 1) | qb;
4519 }
4520#if defined(DEBUG_MULDIV)
4521 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4522 *phigh, *plow, b, a0, a1);
4523#endif
4524 *plow = a0;
4525 *phigh = a1;
4526 }
4527 return 0;
4528}
4529
4530/* return TRUE if overflow */
4531static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4532{
4533 int sa, sb;
4534 sa = ((int64_t)*phigh < 0);
4535 if (sa)
4536 neg128(plow, phigh);
4537 sb = (b < 0);
4538 if (sb)
4539 b = -b;
4540 if (div64(plow, phigh, b) != 0)
4541 return 1;
4542 if (sa ^ sb) {
4543 if (*plow > (1ULL << 63))
4544 return 1;
4545 *plow = - *plow;
4546 } else {
4547 if (*plow >= (1ULL << 63))
4548 return 1;
4549 }
4550 if (sa)
4551 *phigh = - *phigh;
4552 return 0;
4553}
4554
4555void helper_mulq_EAX_T0(target_ulong t0)
4556{
4557 uint64_t r0, r1;
4558
4559 mulu64(&r0, &r1, EAX, t0);
4560 EAX = r0;
4561 EDX = r1;
4562 CC_DST = r0;
4563 CC_SRC = r1;
4564}
4565
4566void helper_imulq_EAX_T0(target_ulong t0)
4567{
4568 uint64_t r0, r1;
4569
4570 muls64(&r0, &r1, EAX, t0);
4571 EAX = r0;
4572 EDX = r1;
4573 CC_DST = r0;
4574 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4575}
4576
4577target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4578{
4579 uint64_t r0, r1;
4580
4581 muls64(&r0, &r1, t0, t1);
4582 CC_DST = r0;
4583 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4584 return r0;
4585}
4586
4587void helper_divq_EAX(target_ulong t0)
4588{
4589 uint64_t r0, r1;
4590 if (t0 == 0) {
4591 raise_exception(EXCP00_DIVZ);
4592 }
4593 r0 = EAX;
4594 r1 = EDX;
4595 if (div64(&r0, &r1, t0))
4596 raise_exception(EXCP00_DIVZ);
4597 EAX = r0;
4598 EDX = r1;
4599}
4600
4601void helper_idivq_EAX(target_ulong t0)
4602{
4603 uint64_t r0, r1;
4604 if (t0 == 0) {
4605 raise_exception(EXCP00_DIVZ);
4606 }
4607 r0 = EAX;
4608 r1 = EDX;
4609 if (idiv64(&r0, &r1, t0))
4610 raise_exception(EXCP00_DIVZ);
4611 EAX = r0;
4612 EDX = r1;
4613}
4614#endif
4615
94451178 4616static void do_hlt(void)
eaa728ee
FB
4617{
4618 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4619 env->halted = 1;
eaa728ee
FB
4620 env->exception_index = EXCP_HLT;
4621 cpu_loop_exit();
4622}
4623
94451178
FB
4624void helper_hlt(int next_eip_addend)
4625{
4626 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4627 EIP += next_eip_addend;
4628
4629 do_hlt();
4630}
4631
eaa728ee
FB
4632void helper_monitor(target_ulong ptr)
4633{
4634 if ((uint32_t)ECX != 0)
4635 raise_exception(EXCP0D_GPF);
4636 /* XXX: store address ? */
872929aa 4637 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4638}
4639
94451178 4640void helper_mwait(int next_eip_addend)
eaa728ee
FB
4641{
4642 if ((uint32_t)ECX != 0)
4643 raise_exception(EXCP0D_GPF);
872929aa 4644 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4645 EIP += next_eip_addend;
4646
eaa728ee
FB
4647 /* XXX: not complete but not completely erroneous */
4648 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4649 /* more than one CPU: do not sleep because another CPU may
4650 wake this one */
4651 } else {
94451178 4652 do_hlt();
eaa728ee
FB
4653 }
4654}
4655
4656void helper_debug(void)
4657{
4658 env->exception_index = EXCP_DEBUG;
4659 cpu_loop_exit();
4660}
4661
4662void helper_raise_interrupt(int intno, int next_eip_addend)
4663{
4664 raise_interrupt(intno, 1, 0, next_eip_addend);
4665}
4666
4667void helper_raise_exception(int exception_index)
4668{
4669 raise_exception(exception_index);
4670}
4671
4672void helper_cli(void)
4673{
4674 env->eflags &= ~IF_MASK;
4675}
4676
4677void helper_sti(void)
4678{
4679 env->eflags |= IF_MASK;
4680}
4681
4682#if 0
4683/* vm86plus instructions */
4684void helper_cli_vm(void)
4685{
4686 env->eflags &= ~VIF_MASK;
4687}
4688
4689void helper_sti_vm(void)
4690{
4691 env->eflags |= VIF_MASK;
4692 if (env->eflags & VIP_MASK) {
4693 raise_exception(EXCP0D_GPF);
4694 }
4695}
4696#endif
4697
4698void helper_set_inhibit_irq(void)
4699{
4700 env->hflags |= HF_INHIBIT_IRQ_MASK;
4701}
4702
4703void helper_reset_inhibit_irq(void)
4704{
4705 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4706}
4707
4708void helper_boundw(target_ulong a0, int v)
4709{
4710 int low, high;
4711 low = ldsw(a0);
4712 high = ldsw(a0 + 2);
4713 v = (int16_t)v;
4714 if (v < low || v > high) {
4715 raise_exception(EXCP05_BOUND);
4716 }
eaa728ee
FB
4717}
4718
4719void helper_boundl(target_ulong a0, int v)
4720{
4721 int low, high;
4722 low = ldl(a0);
4723 high = ldl(a0 + 4);
4724 if (v < low || v > high) {
4725 raise_exception(EXCP05_BOUND);
4726 }
eaa728ee
FB
4727}
4728
4729static float approx_rsqrt(float a)
4730{
4731 return 1.0 / sqrt(a);
4732}
4733
4734static float approx_rcp(float a)
4735{
4736 return 1.0 / a;
4737}
4738
4739#if !defined(CONFIG_USER_ONLY)
4740
4741#define MMUSUFFIX _mmu
4742
4743#define SHIFT 0
4744#include "softmmu_template.h"
4745
4746#define SHIFT 1
4747#include "softmmu_template.h"
4748
4749#define SHIFT 2
4750#include "softmmu_template.h"
4751
4752#define SHIFT 3
4753#include "softmmu_template.h"
4754
4755#endif
4756
d9957a8b 4757#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
4758/* try to fill the TLB and return an exception if error. If retaddr is
4759 NULL, it means that the function was called in C code (i.e. not
4760 from generated code or from helper.c) */
4761/* XXX: fix it to restore all registers */
4762void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4763{
4764 TranslationBlock *tb;
4765 int ret;
4766 unsigned long pc;
4767 CPUX86State *saved_env;
4768
4769 /* XXX: hack to restore env in all cases, even if not called from
4770 generated code */
4771 saved_env = env;
4772 env = cpu_single_env;
4773
4774 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4775 if (ret) {
4776 if (retaddr) {
4777 /* now we have a real cpu fault */
4778 pc = (unsigned long)retaddr;
4779 tb = tb_find_pc(pc);
4780 if (tb) {
4781 /* the PC is inside the translated code. It means that we have
4782 a virtual CPU fault */
4783 cpu_restore_state(tb, env, pc, NULL);
4784 }
4785 }
872929aa 4786 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4787 }
4788 env = saved_env;
4789}
d9957a8b 4790#endif
eaa728ee
FB
4791
4792/* Secure Virtual Machine helpers */
4793
eaa728ee
FB
4794#if defined(CONFIG_USER_ONLY)
4795
db620f46 4796void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4797{
4798}
4799void helper_vmmcall(void)
4800{
4801}
914178d3 4802void helper_vmload(int aflag)
eaa728ee
FB
4803{
4804}
914178d3 4805void helper_vmsave(int aflag)
eaa728ee
FB
4806{
4807}
872929aa
FB
4808void helper_stgi(void)
4809{
4810}
4811void helper_clgi(void)
4812{
4813}
eaa728ee
FB
4814void helper_skinit(void)
4815{
4816}
914178d3 4817void helper_invlpga(int aflag)
eaa728ee
FB
4818{
4819}
4820void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4821{
4822}
4823void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4824{
4825}
4826
4827void helper_svm_check_io(uint32_t port, uint32_t param,
4828 uint32_t next_eip_addend)
4829{
4830}
4831#else
4832
872929aa
FB
4833static inline void svm_save_seg(target_phys_addr_t addr,
4834 const SegmentCache *sc)
eaa728ee 4835{
872929aa
FB
4836 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4837 sc->selector);
4838 stq_phys(addr + offsetof(struct vmcb_seg, base),
4839 sc->base);
4840 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4841 sc->limit);
4842 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4843 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4844}
4845
4846static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4847{
4848 unsigned int flags;
4849
4850 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4851 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4852 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4853 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4854 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4855}
4856
872929aa
FB
4857static inline void svm_load_seg_cache(target_phys_addr_t addr,
4858 CPUState *env, int seg_reg)
eaa728ee 4859{
872929aa
FB
4860 SegmentCache sc1, *sc = &sc1;
4861 svm_load_seg(addr, sc);
4862 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4863 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4864}
4865
db620f46 4866void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4867{
4868 target_ulong addr;
4869 uint32_t event_inj;
4870 uint32_t int_ctl;
4871
872929aa
FB
4872 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4873
914178d3
FB
4874 if (aflag == 2)
4875 addr = EAX;
4876 else
4877 addr = (uint32_t)EAX;
4878
93fcfe39 4879 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
4880
4881 env->vm_vmcb = addr;
4882
4883 /* save the current CPU state in the hsave page */
4884 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4885 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4886
4887 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4888 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4889
4890 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4891 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4892 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4893 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4894 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4895 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4896
4897 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4898 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4899
872929aa
FB
4900 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4901 &env->segs[R_ES]);
4902 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4903 &env->segs[R_CS]);
4904 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4905 &env->segs[R_SS]);
4906 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4907 &env->segs[R_DS]);
eaa728ee 4908
db620f46
FB
4909 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4910 EIP + next_eip_addend);
eaa728ee
FB
4911 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4912 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4913
4914 /* load the interception bitmaps so we do not need to access the
4915 vmcb in svm mode */
872929aa 4916 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
4917 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4918 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4919 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4920 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4921 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4922
872929aa
FB
4923 /* enable intercepts */
4924 env->hflags |= HF_SVMI_MASK;
4925
33c263df
FB
4926 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4927
eaa728ee
FB
4928 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4929 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4930
4931 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4932 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4933
4934 /* clear exit_info_2 so we behave like the real hardware */
4935 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4936
4937 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4938 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4939 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4940 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4941 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 4942 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 4943 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
4944 env->v_tpr = int_ctl & V_TPR_MASK;
4945 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 4946 if (env->eflags & IF_MASK)
db620f46 4947 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
4948 }
4949
5efc27bb
FB
4950 cpu_load_efer(env,
4951 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
4952 env->eflags = 0;
4953 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4954 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4955 CC_OP = CC_OP_EFLAGS;
eaa728ee 4956
872929aa
FB
4957 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4958 env, R_ES);
4959 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4960 env, R_CS);
4961 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4962 env, R_SS);
4963 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4964 env, R_DS);
eaa728ee
FB
4965
4966 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4967 env->eip = EIP;
4968 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4969 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4970 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4971 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4972 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4973
4974 /* FIXME: guest state consistency checks */
4975
4976 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4977 case TLB_CONTROL_DO_NOTHING:
4978 break;
4979 case TLB_CONTROL_FLUSH_ALL_ASID:
4980 /* FIXME: this is not 100% correct but should work for now */
4981 tlb_flush(env, 1);
4982 break;
4983 }
4984
960540b4 4985 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 4986
db620f46
FB
4987 if (int_ctl & V_IRQ_MASK) {
4988 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4989 }
4990
eaa728ee
FB
4991 /* maybe we need to inject an event */
4992 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4993 if (event_inj & SVM_EVTINJ_VALID) {
4994 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4995 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4996 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4997 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4998
93fcfe39 4999 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
5000 /* FIXME: need to implement valid_err */
5001 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5002 case SVM_EVTINJ_TYPE_INTR:
5003 env->exception_index = vector;
5004 env->error_code = event_inj_err;
5005 env->exception_is_int = 0;
5006 env->exception_next_eip = -1;
93fcfe39 5007 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
db620f46
FB
5008 /* XXX: is it always correct ? */
5009 do_interrupt(vector, 0, 0, 0, 1);
eaa728ee
FB
5010 break;
5011 case SVM_EVTINJ_TYPE_NMI:
db620f46 5012 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5013 env->error_code = event_inj_err;
5014 env->exception_is_int = 0;
5015 env->exception_next_eip = EIP;
93fcfe39 5016 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
db620f46 5017 cpu_loop_exit();
eaa728ee
FB
5018 break;
5019 case SVM_EVTINJ_TYPE_EXEPT:
5020 env->exception_index = vector;
5021 env->error_code = event_inj_err;
5022 env->exception_is_int = 0;
5023 env->exception_next_eip = -1;
93fcfe39 5024 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
db620f46 5025 cpu_loop_exit();
eaa728ee
FB
5026 break;
5027 case SVM_EVTINJ_TYPE_SOFT:
5028 env->exception_index = vector;
5029 env->error_code = event_inj_err;
5030 env->exception_is_int = 1;
5031 env->exception_next_eip = EIP;
93fcfe39 5032 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
db620f46 5033 cpu_loop_exit();
eaa728ee
FB
5034 break;
5035 }
93fcfe39 5036 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
eaa728ee 5037 }
eaa728ee
FB
5038}
5039
5040void helper_vmmcall(void)
5041{
872929aa
FB
5042 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5043 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5044}
5045
914178d3 5046void helper_vmload(int aflag)
eaa728ee
FB
5047{
5048 target_ulong addr;
872929aa
FB
5049 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5050
914178d3
FB
5051 if (aflag == 2)
5052 addr = EAX;
5053 else
5054 addr = (uint32_t)EAX;
5055
93fcfe39 5056 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5057 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5058 env->segs[R_FS].base);
5059
872929aa
FB
5060 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5061 env, R_FS);
5062 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5063 env, R_GS);
5064 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5065 &env->tr);
5066 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5067 &env->ldt);
eaa728ee
FB
5068
5069#ifdef TARGET_X86_64
5070 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5071 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5072 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5073 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5074#endif
5075 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5076 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5077 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5078 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5079}
5080
914178d3 5081void helper_vmsave(int aflag)
eaa728ee
FB
5082{
5083 target_ulong addr;
872929aa 5084 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5085
5086 if (aflag == 2)
5087 addr = EAX;
5088 else
5089 addr = (uint32_t)EAX;
5090
93fcfe39 5091 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5092 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5093 env->segs[R_FS].base);
5094
872929aa
FB
5095 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5096 &env->segs[R_FS]);
5097 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5098 &env->segs[R_GS]);
5099 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5100 &env->tr);
5101 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5102 &env->ldt);
eaa728ee
FB
5103
5104#ifdef TARGET_X86_64
5105 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5106 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5107 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5108 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5109#endif
5110 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5111 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5112 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5113 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5114}
5115
872929aa
FB
5116void helper_stgi(void)
5117{
5118 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5119 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5120}
5121
5122void helper_clgi(void)
5123{
5124 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5125 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5126}
5127
eaa728ee
FB
5128void helper_skinit(void)
5129{
872929aa
FB
5130 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5131 /* XXX: not implemented */
872929aa 5132 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5133}
5134
914178d3 5135void helper_invlpga(int aflag)
eaa728ee 5136{
914178d3 5137 target_ulong addr;
872929aa 5138 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5139
5140 if (aflag == 2)
5141 addr = EAX;
5142 else
5143 addr = (uint32_t)EAX;
5144
5145 /* XXX: could use the ASID to see if it is needed to do the
5146 flush */
5147 tlb_flush_page(env, addr);
eaa728ee
FB
5148}
5149
5150void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5151{
872929aa
FB
5152 if (likely(!(env->hflags & HF_SVMI_MASK)))
5153 return;
eaa728ee
FB
5154 switch(type) {
5155 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5156 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5157 helper_vmexit(type, param);
5158 }
5159 break;
872929aa
FB
5160 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5161 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5162 helper_vmexit(type, param);
5163 }
5164 break;
872929aa
FB
5165 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5166 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5167 helper_vmexit(type, param);
5168 }
5169 break;
872929aa
FB
5170 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5171 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5172 helper_vmexit(type, param);
5173 }
5174 break;
872929aa
FB
5175 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5176 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5177 helper_vmexit(type, param);
5178 }
5179 break;
eaa728ee 5180 case SVM_EXIT_MSR:
872929aa 5181 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5182 /* FIXME: this should be read in at vmrun (faster this way?) */
5183 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5184 uint32_t t0, t1;
5185 switch((uint32_t)ECX) {
5186 case 0 ... 0x1fff:
5187 t0 = (ECX * 2) % 8;
5188 t1 = ECX / 8;
5189 break;
5190 case 0xc0000000 ... 0xc0001fff:
5191 t0 = (8192 + ECX - 0xc0000000) * 2;
5192 t1 = (t0 / 8);
5193 t0 %= 8;
5194 break;
5195 case 0xc0010000 ... 0xc0011fff:
5196 t0 = (16384 + ECX - 0xc0010000) * 2;
5197 t1 = (t0 / 8);
5198 t0 %= 8;
5199 break;
5200 default:
5201 helper_vmexit(type, param);
5202 t0 = 0;
5203 t1 = 0;
5204 break;
5205 }
5206 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5207 helper_vmexit(type, param);
5208 }
5209 break;
5210 default:
872929aa 5211 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5212 helper_vmexit(type, param);
5213 }
5214 break;
5215 }
5216}
5217
5218void helper_svm_check_io(uint32_t port, uint32_t param,
5219 uint32_t next_eip_addend)
5220{
872929aa 5221 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5222 /* FIXME: this should be read in at vmrun (faster this way?) */
5223 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5224 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5225 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5226 /* next EIP */
5227 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5228 env->eip + next_eip_addend);
5229 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5230 }
5231 }
5232}
5233
5234/* Note: currently only 32 bits of exit_code are used */
5235void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5236{
5237 uint32_t int_ctl;
5238
93fcfe39 5239 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
eaa728ee
FB
5240 exit_code, exit_info_1,
5241 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5242 EIP);
5243
5244 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5245 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5246 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5247 } else {
5248 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5249 }
5250
5251 /* Save the VM state in the vmcb */
872929aa
FB
5252 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5253 &env->segs[R_ES]);
5254 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5255 &env->segs[R_CS]);
5256 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5257 &env->segs[R_SS]);
5258 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5259 &env->segs[R_DS]);
eaa728ee
FB
5260
5261 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5262 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5263
5264 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5265 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5266
5267 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5268 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5269 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5270 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5271 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5272
db620f46
FB
5273 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5274 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5275 int_ctl |= env->v_tpr & V_TPR_MASK;
5276 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5277 int_ctl |= V_IRQ_MASK;
5278 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5279
5280 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5281 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5282 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5283 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5284 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5285 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5286 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5287
5288 /* Reload the host state from vm_hsave */
db620f46 5289 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5290 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5291 env->intercept = 0;
5292 env->intercept_exceptions = 0;
5293 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5294 env->tsc_offset = 0;
eaa728ee
FB
5295
5296 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5297 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5298
5299 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5300 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5301
5302 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5303 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5304 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5305 /* we need to set the efer after the crs so the hidden flags get
5306 set properly */
5efc27bb
FB
5307 cpu_load_efer(env,
5308 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5309 env->eflags = 0;
5310 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5311 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5312 CC_OP = CC_OP_EFLAGS;
5313
872929aa
FB
5314 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5315 env, R_ES);
5316 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5317 env, R_CS);
5318 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5319 env, R_SS);
5320 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5321 env, R_DS);
eaa728ee
FB
5322
5323 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5324 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5325 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5326
5327 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5328 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5329
5330 /* other setups */
5331 cpu_x86_set_cpl(env, 0);
5332 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5333 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5334
960540b4 5335 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5336 /* FIXME: Resets the current ASID register to zero (host ASID). */
5337
5338 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5339
5340 /* Clears the TSC_OFFSET inside the processor. */
5341
5342 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5343 from the page table indicated the host's CR3. If the PDPEs contain
5344 illegal state, the processor causes a shutdown. */
5345
5346 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5347 env->cr[0] |= CR0_PE_MASK;
5348 env->eflags &= ~VM_MASK;
5349
5350 /* Disables all breakpoints in the host DR7 register. */
5351
5352 /* Checks the reloaded host state for consistency. */
5353
5354 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5355 host's code segment or non-canonical (in the case of long mode), a
5356 #GP fault is delivered inside the host.) */
5357
5358 /* remove any pending exception */
5359 env->exception_index = -1;
5360 env->error_code = 0;
5361 env->old_exception = -1;
5362
5363 cpu_loop_exit();
5364}
5365
5366#endif
5367
5368/* MMX/SSE */
5369/* XXX: optimize by storing fptt and fptags in the static cpu state */
5370void helper_enter_mmx(void)
5371{
5372 env->fpstt = 0;
5373 *(uint32_t *)(env->fptags) = 0;
5374 *(uint32_t *)(env->fptags + 4) = 0;
5375}
5376
5377void helper_emms(void)
5378{
5379 /* set to empty state */
5380 *(uint32_t *)(env->fptags) = 0x01010101;
5381 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5382}
5383
5384/* XXX: suppress */
a7812ae4 5385void helper_movq(void *d, void *s)
eaa728ee 5386{
a7812ae4 5387 *(uint64_t *)d = *(uint64_t *)s;
eaa728ee
FB
5388}
5389
5390#define SHIFT 0
5391#include "ops_sse.h"
5392
5393#define SHIFT 1
5394#include "ops_sse.h"
5395
5396#define SHIFT 0
5397#include "helper_template.h"
5398#undef SHIFT
5399
5400#define SHIFT 1
5401#include "helper_template.h"
5402#undef SHIFT
5403
5404#define SHIFT 2
5405#include "helper_template.h"
5406#undef SHIFT
5407
5408#ifdef TARGET_X86_64
5409
5410#define SHIFT 3
5411#include "helper_template.h"
5412#undef SHIFT
5413
5414#endif
5415
5416/* bit operations */
5417target_ulong helper_bsf(target_ulong t0)
5418{
5419 int count;
5420 target_ulong res;
5421
5422 res = t0;
5423 count = 0;
5424 while ((res & 1) == 0) {
5425 count++;
5426 res >>= 1;
5427 }
5428 return count;
5429}
5430
5431target_ulong helper_bsr(target_ulong t0)
5432{
5433 int count;
5434 target_ulong res, mask;
5435
5436 res = t0;
5437 count = TARGET_LONG_BITS - 1;
5438 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5439 while ((res & mask) == 0) {
5440 count--;
5441 res <<= 1;
5442 }
5443 return count;
5444}
5445
5446
5447static int compute_all_eflags(void)
5448{
5449 return CC_SRC;
5450}
5451
5452static int compute_c_eflags(void)
5453{
5454 return CC_SRC & CC_C;
5455}
5456
a7812ae4
PB
5457uint32_t helper_cc_compute_all(int op)
5458{
5459 switch (op) {
5460 default: /* should never happen */ return 0;
eaa728ee 5461
a7812ae4 5462 case CC_OP_EFLAGS: return compute_all_eflags();
eaa728ee 5463
a7812ae4
PB
5464 case CC_OP_MULB: return compute_all_mulb();
5465 case CC_OP_MULW: return compute_all_mulw();
5466 case CC_OP_MULL: return compute_all_mull();
eaa728ee 5467
a7812ae4
PB
5468 case CC_OP_ADDB: return compute_all_addb();
5469 case CC_OP_ADDW: return compute_all_addw();
5470 case CC_OP_ADDL: return compute_all_addl();
eaa728ee 5471
a7812ae4
PB
5472 case CC_OP_ADCB: return compute_all_adcb();
5473 case CC_OP_ADCW: return compute_all_adcw();
5474 case CC_OP_ADCL: return compute_all_adcl();
eaa728ee 5475
a7812ae4
PB
5476 case CC_OP_SUBB: return compute_all_subb();
5477 case CC_OP_SUBW: return compute_all_subw();
5478 case CC_OP_SUBL: return compute_all_subl();
eaa728ee 5479
a7812ae4
PB
5480 case CC_OP_SBBB: return compute_all_sbbb();
5481 case CC_OP_SBBW: return compute_all_sbbw();
5482 case CC_OP_SBBL: return compute_all_sbbl();
eaa728ee 5483
a7812ae4
PB
5484 case CC_OP_LOGICB: return compute_all_logicb();
5485 case CC_OP_LOGICW: return compute_all_logicw();
5486 case CC_OP_LOGICL: return compute_all_logicl();
eaa728ee 5487
a7812ae4
PB
5488 case CC_OP_INCB: return compute_all_incb();
5489 case CC_OP_INCW: return compute_all_incw();
5490 case CC_OP_INCL: return compute_all_incl();
eaa728ee 5491
a7812ae4
PB
5492 case CC_OP_DECB: return compute_all_decb();
5493 case CC_OP_DECW: return compute_all_decw();
5494 case CC_OP_DECL: return compute_all_decl();
eaa728ee 5495
a7812ae4
PB
5496 case CC_OP_SHLB: return compute_all_shlb();
5497 case CC_OP_SHLW: return compute_all_shlw();
5498 case CC_OP_SHLL: return compute_all_shll();
eaa728ee 5499
a7812ae4
PB
5500 case CC_OP_SARB: return compute_all_sarb();
5501 case CC_OP_SARW: return compute_all_sarw();
5502 case CC_OP_SARL: return compute_all_sarl();
eaa728ee
FB
5503
5504#ifdef TARGET_X86_64
a7812ae4 5505 case CC_OP_MULQ: return compute_all_mulq();
eaa728ee 5506
a7812ae4 5507 case CC_OP_ADDQ: return compute_all_addq();
eaa728ee 5508
a7812ae4 5509 case CC_OP_ADCQ: return compute_all_adcq();
eaa728ee 5510
a7812ae4 5511 case CC_OP_SUBQ: return compute_all_subq();
eaa728ee 5512
a7812ae4 5513 case CC_OP_SBBQ: return compute_all_sbbq();
eaa728ee 5514
a7812ae4 5515 case CC_OP_LOGICQ: return compute_all_logicq();
eaa728ee 5516
a7812ae4 5517 case CC_OP_INCQ: return compute_all_incq();
eaa728ee 5518
a7812ae4 5519 case CC_OP_DECQ: return compute_all_decq();
eaa728ee 5520
a7812ae4 5521 case CC_OP_SHLQ: return compute_all_shlq();
eaa728ee 5522
a7812ae4 5523 case CC_OP_SARQ: return compute_all_sarq();
eaa728ee 5524#endif
a7812ae4
PB
5525 }
5526}
5527
5528uint32_t helper_cc_compute_c(int op)
5529{
5530 switch (op) {
5531 default: /* should never happen */ return 0;
5532
5533 case CC_OP_EFLAGS: return compute_c_eflags();
5534
5535 case CC_OP_MULB: return compute_c_mull();
5536 case CC_OP_MULW: return compute_c_mull();
5537 case CC_OP_MULL: return compute_c_mull();
5538
5539 case CC_OP_ADDB: return compute_c_addb();
5540 case CC_OP_ADDW: return compute_c_addw();
5541 case CC_OP_ADDL: return compute_c_addl();
5542
5543 case CC_OP_ADCB: return compute_c_adcb();
5544 case CC_OP_ADCW: return compute_c_adcw();
5545 case CC_OP_ADCL: return compute_c_adcl();
5546
5547 case CC_OP_SUBB: return compute_c_subb();
5548 case CC_OP_SUBW: return compute_c_subw();
5549 case CC_OP_SUBL: return compute_c_subl();
5550
5551 case CC_OP_SBBB: return compute_c_sbbb();
5552 case CC_OP_SBBW: return compute_c_sbbw();
5553 case CC_OP_SBBL: return compute_c_sbbl();
5554
5555 case CC_OP_LOGICB: return compute_c_logicb();
5556 case CC_OP_LOGICW: return compute_c_logicw();
5557 case CC_OP_LOGICL: return compute_c_logicl();
5558
5559 case CC_OP_INCB: return compute_c_incl();
5560 case CC_OP_INCW: return compute_c_incl();
5561 case CC_OP_INCL: return compute_c_incl();
5562
5563 case CC_OP_DECB: return compute_c_incl();
5564 case CC_OP_DECW: return compute_c_incl();
5565 case CC_OP_DECL: return compute_c_incl();
eaa728ee 5566
a7812ae4
PB
5567 case CC_OP_SHLB: return compute_c_shlb();
5568 case CC_OP_SHLW: return compute_c_shlw();
5569 case CC_OP_SHLL: return compute_c_shll();
5570
5571 case CC_OP_SARB: return compute_c_sarl();
5572 case CC_OP_SARW: return compute_c_sarl();
5573 case CC_OP_SARL: return compute_c_sarl();
5574
5575#ifdef TARGET_X86_64
5576 case CC_OP_MULQ: return compute_c_mull();
5577
5578 case CC_OP_ADDQ: return compute_c_addq();
5579
5580 case CC_OP_ADCQ: return compute_c_adcq();
5581
5582 case CC_OP_SUBQ: return compute_c_subq();
5583
5584 case CC_OP_SBBQ: return compute_c_sbbq();
5585
5586 case CC_OP_LOGICQ: return compute_c_logicq();
5587
5588 case CC_OP_INCQ: return compute_c_incl();
5589
5590 case CC_OP_DECQ: return compute_c_incl();
5591
5592 case CC_OP_SHLQ: return compute_c_shlq();
5593
5594 case CC_OP_SARQ: return compute_c_sarl();
5595#endif
5596 }
5597}