]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
moved halted field to CPU_COMMON
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#define CPU_NO_GLOBAL_REGS
21#include "exec.h"
22#include "host-utils.h"
23
24//#define DEBUG_PCALL
25
26#if 0
27#define raise_exception_err(a, b)\
28do {\
29 if (logfile)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
32} while (0)
33#endif
34
35const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68};
69
70/* modulo 17 table */
71const uint8_t rclw_table[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
76};
77
78/* modulo 9 table */
79const uint8_t rclb_table[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
84};
85
86const CPU86_LDouble f15rk[7] =
87{
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
95};
96
97/* broken thread support */
98
99spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100
101void helper_lock(void)
102{
103 spin_lock(&global_cpu_lock);
104}
105
106void helper_unlock(void)
107{
108 spin_unlock(&global_cpu_lock);
109}
110
111void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112{
113 load_eflags(t0, update_mask);
114}
115
116target_ulong helper_read_eflags(void)
117{
118 uint32_t eflags;
119 eflags = cc_table[CC_OP].compute_all();
120 eflags |= (DF & DF_MASK);
121 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122 return eflags;
123}
124
125/* return non zero if error */
126static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127 int selector)
128{
129 SegmentCache *dt;
130 int index;
131 target_ulong ptr;
132
133 if (selector & 0x4)
134 dt = &env->ldt;
135 else
136 dt = &env->gdt;
137 index = selector & ~7;
138 if ((index + 7) > dt->limit)
139 return -1;
140 ptr = dt->base + index;
141 *e1_ptr = ldl_kernel(ptr);
142 *e2_ptr = ldl_kernel(ptr + 4);
143 return 0;
144}
145
146static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147{
148 unsigned int limit;
149 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150 if (e2 & DESC_G_MASK)
151 limit = (limit << 12) | 0xfff;
152 return limit;
153}
154
155static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156{
157 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158}
159
160static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161{
162 sc->base = get_seg_base(e1, e2);
163 sc->limit = get_seg_limit(e1, e2);
164 sc->flags = e2;
165}
166
167/* init the segment cache in vm86 mode. */
168static inline void load_seg_vm(int seg, int selector)
169{
170 selector &= 0xffff;
171 cpu_x86_load_seg_cache(env, seg, selector,
172 (selector << 4), 0xffff, 0);
173}
174
175static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176 uint32_t *esp_ptr, int dpl)
177{
178 int type, index, shift;
179
180#if 0
181 {
182 int i;
183 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184 for(i=0;i<env->tr.limit;i++) {
185 printf("%02x ", env->tr.base[i]);
186 if ((i & 7) == 7) printf("\n");
187 }
188 printf("\n");
189 }
190#endif
191
192 if (!(env->tr.flags & DESC_P_MASK))
193 cpu_abort(env, "invalid tss");
194 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195 if ((type & 7) != 1)
196 cpu_abort(env, "invalid tss type");
197 shift = type >> 3;
198 index = (dpl * 4 + 2) << shift;
199 if (index + (4 << shift) - 1 > env->tr.limit)
200 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201 if (shift == 0) {
202 *esp_ptr = lduw_kernel(env->tr.base + index);
203 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204 } else {
205 *esp_ptr = ldl_kernel(env->tr.base + index);
206 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207 }
208}
209
210/* XXX: merge with load_seg() */
211static void tss_load_seg(int seg_reg, int selector)
212{
213 uint32_t e1, e2;
214 int rpl, dpl, cpl;
215
216 if ((selector & 0xfffc) != 0) {
217 if (load_segment(&e1, &e2, selector) != 0)
218 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 if (!(e2 & DESC_S_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 rpl = selector & 3;
222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223 cpl = env->hflags & HF_CPL_MASK;
224 if (seg_reg == R_CS) {
225 if (!(e2 & DESC_CS_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* XXX: is it correct ? */
228 if (dpl != rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if ((e2 & DESC_C_MASK) && dpl > rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 } else if (seg_reg == R_SS) {
233 /* SS must be writable data */
234 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (dpl != cpl || dpl != rpl)
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 } else {
239 /* not readable code */
240 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244 if (dpl < cpl || dpl < rpl)
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246 }
247 }
248 if (!(e2 & DESC_P_MASK))
249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250 cpu_x86_load_seg_cache(env, seg_reg, selector,
251 get_seg_base(e1, e2),
252 get_seg_limit(e1, e2),
253 e2);
254 } else {
255 if (seg_reg == R_SS || seg_reg == R_CS)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257 }
258}
259
260#define SWITCH_TSS_JMP 0
261#define SWITCH_TSS_IRET 1
262#define SWITCH_TSS_CALL 2
263
264/* XXX: restore CPU state in registers (PowerPC case) */
265static void switch_tss(int tss_selector,
266 uint32_t e1, uint32_t e2, int source,
267 uint32_t next_eip)
268{
269 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270 target_ulong tss_base;
271 uint32_t new_regs[8], new_segs[6];
272 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273 uint32_t old_eflags, eflags_mask;
274 SegmentCache *dt;
275 int index;
276 target_ulong ptr;
277
278 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279#ifdef DEBUG_PCALL
280 if (loglevel & CPU_LOG_PCALL)
281 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282#endif
283
284 /* if task gate, we read the TSS segment and we load it */
285 if (type == 5) {
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288 tss_selector = e1 >> 16;
289 if (tss_selector & 4)
290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291 if (load_segment(&e1, &e2, tss_selector) != 0)
292 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293 if (e2 & DESC_S_MASK)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296 if ((type & 7) != 1)
297 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298 }
299
300 if (!(e2 & DESC_P_MASK))
301 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302
303 if (type & 8)
304 tss_limit_max = 103;
305 else
306 tss_limit_max = 43;
307 tss_limit = get_seg_limit(e1, e2);
308 tss_base = get_seg_base(e1, e2);
309 if ((tss_selector & 4) != 0 ||
310 tss_limit < tss_limit_max)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if (old_type & 8)
314 old_tss_limit_max = 103;
315 else
316 old_tss_limit_max = 43;
317
318 /* read all the registers from the new TSS */
319 if (type & 8) {
320 /* 32 bit */
321 new_cr3 = ldl_kernel(tss_base + 0x1c);
322 new_eip = ldl_kernel(tss_base + 0x20);
323 new_eflags = ldl_kernel(tss_base + 0x24);
324 for(i = 0; i < 8; i++)
325 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326 for(i = 0; i < 6; i++)
327 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328 new_ldt = lduw_kernel(tss_base + 0x60);
329 new_trap = ldl_kernel(tss_base + 0x64);
330 } else {
331 /* 16 bit */
332 new_cr3 = 0;
333 new_eip = lduw_kernel(tss_base + 0x0e);
334 new_eflags = lduw_kernel(tss_base + 0x10);
335 for(i = 0; i < 8; i++)
336 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337 for(i = 0; i < 4; i++)
338 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339 new_ldt = lduw_kernel(tss_base + 0x2a);
340 new_segs[R_FS] = 0;
341 new_segs[R_GS] = 0;
342 new_trap = 0;
343 }
344
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
349
350 v1 = ldub_kernel(env->tr.base);
351 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352 stb_kernel(env->tr.base, v1);
353 stb_kernel(env->tr.base + old_tss_limit_max, v2);
354
355 /* clear busy bit (it is restartable) */
356 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357 target_ulong ptr;
358 uint32_t e2;
359 ptr = env->gdt.base + (env->tr.selector & ~7);
360 e2 = ldl_kernel(ptr + 4);
361 e2 &= ~DESC_TSS_BUSY_MASK;
362 stl_kernel(ptr + 4, e2);
363 }
364 old_eflags = compute_eflags();
365 if (source == SWITCH_TSS_IRET)
366 old_eflags &= ~NT_MASK;
367
368 /* save the current state in the old TSS */
369 if (type & 8) {
370 /* 32 bit */
371 stl_kernel(env->tr.base + 0x20, next_eip);
372 stl_kernel(env->tr.base + 0x24, old_eflags);
373 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381 for(i = 0; i < 6; i++)
382 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383 } else {
384 /* 16 bit */
385 stw_kernel(env->tr.base + 0x0e, next_eip);
386 stw_kernel(env->tr.base + 0x10, old_eflags);
387 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395 for(i = 0; i < 4; i++)
396 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397 }
398
399 /* now if an exception occurs, it will occurs in the next task
400 context */
401
402 if (source == SWITCH_TSS_CALL) {
403 stw_kernel(tss_base, env->tr.selector);
404 new_eflags |= NT_MASK;
405 }
406
407 /* set busy bit */
408 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409 target_ulong ptr;
410 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = ldl_kernel(ptr + 4);
413 e2 |= DESC_TSS_BUSY_MASK;
414 stl_kernel(ptr + 4, e2);
415 }
416
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
428 }
429
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8))
436 eflags_mask &= 0xffff;
437 load_eflags(new_eflags, eflags_mask);
438 /* XXX: what to do in 16 bit case ? */
439 EAX = new_regs[0];
440 ECX = new_regs[1];
441 EDX = new_regs[2];
442 EBX = new_regs[3];
443 ESP = new_regs[4];
444 EBP = new_regs[5];
445 ESI = new_regs[6];
446 EDI = new_regs[7];
447 if (new_eflags & VM_MASK) {
448 for(i = 0; i < 6; i++)
449 load_seg_vm(i, new_segs[i]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env, 3);
452 } else {
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i = 0; i < 6; i++)
457 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458 }
459
460 env->ldt.selector = new_ldt & ~4;
461 env->ldt.base = 0;
462 env->ldt.limit = 0;
463 env->ldt.flags = 0;
464
465 /* load the LDT */
466 if (new_ldt & 4)
467 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 ptr = dt->base + index;
475 e1 = ldl_kernel(ptr);
476 e2 = ldl_kernel(ptr + 4);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479 if (!(e2 & DESC_P_MASK))
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
482 }
483
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 tss_load_seg(R_CS, new_segs[R_CS]);
487 tss_load_seg(R_SS, new_segs[R_SS]);
488 tss_load_seg(R_ES, new_segs[R_ES]);
489 tss_load_seg(R_DS, new_segs[R_DS]);
490 tss_load_seg(R_FS, new_segs[R_FS]);
491 tss_load_seg(R_GS, new_segs[R_GS]);
492 }
493
494 /* check that EIP is in the CS segment limits */
495 if (new_eip > env->segs[R_CS].limit) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF, 0);
498 }
499}
500
501/* check if Port I/O is allowed in TSS */
502static inline void check_io(int addr, int size)
503{
504 int io_offset, val, mask;
505
506 /* TSS must be a valid 32 bit one */
507 if (!(env->tr.flags & DESC_P_MASK) ||
508 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509 env->tr.limit < 103)
510 goto fail;
511 io_offset = lduw_kernel(env->tr.base + 0x66);
512 io_offset += (addr >> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset + 1) > env->tr.limit)
515 goto fail;
516 val = lduw_kernel(env->tr.base + io_offset);
517 val >>= (addr & 7);
518 mask = (1 << size) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val & mask) != 0) {
521 fail:
522 raise_exception_err(EXCP0D_GPF, 0);
523 }
524}
525
526void helper_check_iob(uint32_t t0)
527{
528 check_io(t0, 1);
529}
530
531void helper_check_iow(uint32_t t0)
532{
533 check_io(t0, 2);
534}
535
536void helper_check_iol(uint32_t t0)
537{
538 check_io(t0, 4);
539}
540
541void helper_outb(uint32_t port, uint32_t data)
542{
543 cpu_outb(env, port, data & 0xff);
544}
545
546target_ulong helper_inb(uint32_t port)
547{
548 return cpu_inb(env, port);
549}
550
551void helper_outw(uint32_t port, uint32_t data)
552{
553 cpu_outw(env, port, data & 0xffff);
554}
555
556target_ulong helper_inw(uint32_t port)
557{
558 return cpu_inw(env, port);
559}
560
561void helper_outl(uint32_t port, uint32_t data)
562{
563 cpu_outl(env, port, data);
564}
565
566target_ulong helper_inl(uint32_t port)
567{
568 return cpu_inl(env, port);
569}
570
571static inline unsigned int get_sp_mask(unsigned int e2)
572{
573 if (e2 & DESC_B_MASK)
574 return 0xffffffff;
575 else
576 return 0xffff;
577}
578
579#ifdef TARGET_X86_64
580#define SET_ESP(val, sp_mask)\
581do {\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
586 else\
587 ESP = (val);\
588} while (0)
589#else
590#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591#endif
592
593/* XXX: add a is_user flag to have proper security support */
594#define PUSHW(ssp, sp, sp_mask, val)\
595{\
596 sp -= 2;\
597 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
598}
599
600#define PUSHL(ssp, sp, sp_mask, val)\
601{\
602 sp -= 4;\
603 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
604}
605
606#define POPW(ssp, sp, sp_mask, val)\
607{\
608 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
609 sp += 2;\
610}
611
612#define POPL(ssp, sp, sp_mask, val)\
613{\
614 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
615 sp += 4;\
616}
617
618/* protected mode interrupt */
619static void do_interrupt_protected(int intno, int is_int, int error_code,
620 unsigned int next_eip, int is_hw)
621{
622 SegmentCache *dt;
623 target_ulong ptr, ssp;
624 int type, dpl, selector, ss_dpl, cpl;
625 int has_error_code, new_stack, shift;
626 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627 uint32_t old_eip, sp_mask;
eaa728ee 628
eaa728ee
FB
629 has_error_code = 0;
630 if (!is_int && !is_hw) {
631 switch(intno) {
632 case 8:
633 case 10:
634 case 11:
635 case 12:
636 case 13:
637 case 14:
638 case 17:
639 has_error_code = 1;
640 break;
641 }
642 }
643 if (is_int)
644 old_eip = next_eip;
645 else
646 old_eip = env->eip;
647
648 dt = &env->idt;
649 if (intno * 8 + 7 > dt->limit)
650 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
651 ptr = dt->base + intno * 8;
652 e1 = ldl_kernel(ptr);
653 e2 = ldl_kernel(ptr + 4);
654 /* check gate type */
655 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
656 switch(type) {
657 case 5: /* task gate */
658 /* must do that check here to return the correct error code */
659 if (!(e2 & DESC_P_MASK))
660 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
661 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
662 if (has_error_code) {
663 int type;
664 uint32_t mask;
665 /* push the error code */
666 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
667 shift = type >> 3;
668 if (env->segs[R_SS].flags & DESC_B_MASK)
669 mask = 0xffffffff;
670 else
671 mask = 0xffff;
672 esp = (ESP - (2 << shift)) & mask;
673 ssp = env->segs[R_SS].base + esp;
674 if (shift)
675 stl_kernel(ssp, error_code);
676 else
677 stw_kernel(ssp, error_code);
678 SET_ESP(esp, mask);
679 }
680 return;
681 case 6: /* 286 interrupt gate */
682 case 7: /* 286 trap gate */
683 case 14: /* 386 interrupt gate */
684 case 15: /* 386 trap gate */
685 break;
686 default:
687 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
688 break;
689 }
690 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
691 cpl = env->hflags & HF_CPL_MASK;
692 /* check privledge if software int */
693 if (is_int && dpl < cpl)
694 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
695 /* check valid bit */
696 if (!(e2 & DESC_P_MASK))
697 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
698 selector = e1 >> 16;
699 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
700 if ((selector & 0xfffc) == 0)
701 raise_exception_err(EXCP0D_GPF, 0);
702
703 if (load_segment(&e1, &e2, selector) != 0)
704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
706 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
708 if (dpl > cpl)
709 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
710 if (!(e2 & DESC_P_MASK))
711 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
712 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
713 /* to inner privilege */
714 get_ss_esp_from_tss(&ss, &esp, dpl);
715 if ((ss & 0xfffc) == 0)
716 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
717 if ((ss & 3) != dpl)
718 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
719 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
722 if (ss_dpl != dpl)
723 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
724 if (!(ss_e2 & DESC_S_MASK) ||
725 (ss_e2 & DESC_CS_MASK) ||
726 !(ss_e2 & DESC_W_MASK))
727 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 if (!(ss_e2 & DESC_P_MASK))
729 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
730 new_stack = 1;
731 sp_mask = get_sp_mask(ss_e2);
732 ssp = get_seg_base(ss_e1, ss_e2);
733 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
734 /* to same privilege */
735 if (env->eflags & VM_MASK)
736 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737 new_stack = 0;
738 sp_mask = get_sp_mask(env->segs[R_SS].flags);
739 ssp = env->segs[R_SS].base;
740 esp = ESP;
741 dpl = cpl;
742 } else {
743 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
744 new_stack = 0; /* avoid warning */
745 sp_mask = 0; /* avoid warning */
746 ssp = 0; /* avoid warning */
747 esp = 0; /* avoid warning */
748 }
749
750 shift = type >> 3;
751
752#if 0
753 /* XXX: check that enough room is available */
754 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
755 if (env->eflags & VM_MASK)
756 push_size += 8;
757 push_size <<= shift;
758#endif
759 if (shift == 1) {
760 if (new_stack) {
761 if (env->eflags & VM_MASK) {
762 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
763 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
764 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
765 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
766 }
767 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
768 PUSHL(ssp, esp, sp_mask, ESP);
769 }
770 PUSHL(ssp, esp, sp_mask, compute_eflags());
771 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
772 PUSHL(ssp, esp, sp_mask, old_eip);
773 if (has_error_code) {
774 PUSHL(ssp, esp, sp_mask, error_code);
775 }
776 } else {
777 if (new_stack) {
778 if (env->eflags & VM_MASK) {
779 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
780 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
781 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
782 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
783 }
784 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
785 PUSHW(ssp, esp, sp_mask, ESP);
786 }
787 PUSHW(ssp, esp, sp_mask, compute_eflags());
788 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
789 PUSHW(ssp, esp, sp_mask, old_eip);
790 if (has_error_code) {
791 PUSHW(ssp, esp, sp_mask, error_code);
792 }
793 }
794
795 if (new_stack) {
796 if (env->eflags & VM_MASK) {
797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
801 }
802 ss = (ss & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_SS, ss,
804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
805 }
806 SET_ESP(esp, sp_mask);
807
808 selector = (selector & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_CS, selector,
810 get_seg_base(e1, e2),
811 get_seg_limit(e1, e2),
812 e2);
813 cpu_x86_set_cpl(env, dpl);
814 env->eip = offset;
815
816 /* interrupt gate clear IF mask */
817 if ((type & 1) == 0) {
818 env->eflags &= ~IF_MASK;
819 }
820 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
821}
822
823#ifdef TARGET_X86_64
824
825#define PUSHQ(sp, val)\
826{\
827 sp -= 8;\
828 stq_kernel(sp, (val));\
829}
830
831#define POPQ(sp, val)\
832{\
833 val = ldq_kernel(sp);\
834 sp += 8;\
835}
836
837static inline target_ulong get_rsp_from_tss(int level)
838{
839 int index;
840
841#if 0
842 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
843 env->tr.base, env->tr.limit);
844#endif
845
846 if (!(env->tr.flags & DESC_P_MASK))
847 cpu_abort(env, "invalid tss");
848 index = 8 * level + 4;
849 if ((index + 7) > env->tr.limit)
850 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
851 return ldq_kernel(env->tr.base + index);
852}
853
854/* 64 bit interrupt */
855static void do_interrupt64(int intno, int is_int, int error_code,
856 target_ulong next_eip, int is_hw)
857{
858 SegmentCache *dt;
859 target_ulong ptr;
860 int type, dpl, selector, cpl, ist;
861 int has_error_code, new_stack;
862 uint32_t e1, e2, e3, ss;
863 target_ulong old_eip, esp, offset;
eaa728ee 864
eaa728ee
FB
865 has_error_code = 0;
866 if (!is_int && !is_hw) {
867 switch(intno) {
868 case 8:
869 case 10:
870 case 11:
871 case 12:
872 case 13:
873 case 14:
874 case 17:
875 has_error_code = 1;
876 break;
877 }
878 }
879 if (is_int)
880 old_eip = next_eip;
881 else
882 old_eip = env->eip;
883
884 dt = &env->idt;
885 if (intno * 16 + 15 > dt->limit)
886 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
887 ptr = dt->base + intno * 16;
888 e1 = ldl_kernel(ptr);
889 e2 = ldl_kernel(ptr + 4);
890 e3 = ldl_kernel(ptr + 8);
891 /* check gate type */
892 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
893 switch(type) {
894 case 14: /* 386 interrupt gate */
895 case 15: /* 386 trap gate */
896 break;
897 default:
898 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
899 break;
900 }
901 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902 cpl = env->hflags & HF_CPL_MASK;
903 /* check privledge if software int */
904 if (is_int && dpl < cpl)
905 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906 /* check valid bit */
907 if (!(e2 & DESC_P_MASK))
908 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
909 selector = e1 >> 16;
910 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
911 ist = e2 & 7;
912 if ((selector & 0xfffc) == 0)
913 raise_exception_err(EXCP0D_GPF, 0);
914
915 if (load_segment(&e1, &e2, selector) != 0)
916 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
918 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
920 if (dpl > cpl)
921 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
922 if (!(e2 & DESC_P_MASK))
923 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
924 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
927 /* to inner privilege */
928 if (ist != 0)
929 esp = get_rsp_from_tss(ist + 3);
930 else
931 esp = get_rsp_from_tss(dpl);
932 esp &= ~0xfLL; /* align stack */
933 ss = 0;
934 new_stack = 1;
935 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
936 /* to same privilege */
937 if (env->eflags & VM_MASK)
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 new_stack = 0;
940 if (ist != 0)
941 esp = get_rsp_from_tss(ist + 3);
942 else
943 esp = ESP;
944 esp &= ~0xfLL; /* align stack */
945 dpl = cpl;
946 } else {
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0; /* avoid warning */
949 esp = 0; /* avoid warning */
950 }
951
952 PUSHQ(esp, env->segs[R_SS].selector);
953 PUSHQ(esp, ESP);
954 PUSHQ(esp, compute_eflags());
955 PUSHQ(esp, env->segs[R_CS].selector);
956 PUSHQ(esp, old_eip);
957 if (has_error_code) {
958 PUSHQ(esp, error_code);
959 }
960
961 if (new_stack) {
962 ss = 0 | dpl;
963 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
964 }
965 ESP = esp;
966
967 selector = (selector & ~3) | dpl;
968 cpu_x86_load_seg_cache(env, R_CS, selector,
969 get_seg_base(e1, e2),
970 get_seg_limit(e1, e2),
971 e2);
972 cpu_x86_set_cpl(env, dpl);
973 env->eip = offset;
974
975 /* interrupt gate clear IF mask */
976 if ((type & 1) == 0) {
977 env->eflags &= ~IF_MASK;
978 }
979 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
980}
981#endif
982
983#if defined(CONFIG_USER_ONLY)
984void helper_syscall(int next_eip_addend)
985{
986 env->exception_index = EXCP_SYSCALL;
987 env->exception_next_eip = env->eip + next_eip_addend;
988 cpu_loop_exit();
989}
990#else
991void helper_syscall(int next_eip_addend)
992{
993 int selector;
994
995 if (!(env->efer & MSR_EFER_SCE)) {
996 raise_exception_err(EXCP06_ILLOP, 0);
997 }
998 selector = (env->star >> 32) & 0xffff;
999#ifdef TARGET_X86_64
1000 if (env->hflags & HF_LMA_MASK) {
1001 int code64;
1002
1003 ECX = env->eip + next_eip_addend;
1004 env->regs[11] = compute_eflags();
1005
1006 code64 = env->hflags & HF_CS64_MASK;
1007
1008 cpu_x86_set_cpl(env, 0);
1009 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1010 0, 0xffffffff,
1011 DESC_G_MASK | DESC_P_MASK |
1012 DESC_S_MASK |
1013 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1014 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1015 0, 0xffffffff,
1016 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1017 DESC_S_MASK |
1018 DESC_W_MASK | DESC_A_MASK);
1019 env->eflags &= ~env->fmask;
1020 load_eflags(env->eflags, 0);
1021 if (code64)
1022 env->eip = env->lstar;
1023 else
1024 env->eip = env->cstar;
1025 } else
1026#endif
1027 {
1028 ECX = (uint32_t)(env->eip + next_eip_addend);
1029
1030 cpu_x86_set_cpl(env, 0);
1031 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1032 0, 0xffffffff,
1033 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1034 DESC_S_MASK |
1035 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1036 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1037 0, 0xffffffff,
1038 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039 DESC_S_MASK |
1040 DESC_W_MASK | DESC_A_MASK);
1041 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1042 env->eip = (uint32_t)env->star;
1043 }
1044}
1045#endif
1046
1047void helper_sysret(int dflag)
1048{
1049 int cpl, selector;
1050
1051 if (!(env->efer & MSR_EFER_SCE)) {
1052 raise_exception_err(EXCP06_ILLOP, 0);
1053 }
1054 cpl = env->hflags & HF_CPL_MASK;
1055 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1056 raise_exception_err(EXCP0D_GPF, 0);
1057 }
1058 selector = (env->star >> 48) & 0xffff;
1059#ifdef TARGET_X86_64
1060 if (env->hflags & HF_LMA_MASK) {
1061 if (dflag == 2) {
1062 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1063 0, 0xffffffff,
1064 DESC_G_MASK | DESC_P_MASK |
1065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1067 DESC_L_MASK);
1068 env->eip = ECX;
1069 } else {
1070 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1071 0, 0xffffffff,
1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1075 env->eip = (uint32_t)ECX;
1076 }
1077 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_W_MASK | DESC_A_MASK);
1082 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1083 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1084 cpu_x86_set_cpl(env, 3);
1085 } else
1086#endif
1087 {
1088 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1089 0, 0xffffffff,
1090 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1091 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1092 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1093 env->eip = (uint32_t)ECX;
1094 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1095 0, 0xffffffff,
1096 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1097 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098 DESC_W_MASK | DESC_A_MASK);
1099 env->eflags |= IF_MASK;
1100 cpu_x86_set_cpl(env, 3);
1101 }
1102#ifdef USE_KQEMU
1103 if (kqemu_is_ok(env)) {
1104 if (env->hflags & HF_LMA_MASK)
1105 CC_OP = CC_OP_EFLAGS;
1106 env->exception_index = -1;
1107 cpu_loop_exit();
1108 }
1109#endif
1110}
1111
1112/* real mode interrupt */
1113static void do_interrupt_real(int intno, int is_int, int error_code,
1114 unsigned int next_eip)
1115{
1116 SegmentCache *dt;
1117 target_ulong ptr, ssp;
1118 int selector;
1119 uint32_t offset, esp;
1120 uint32_t old_cs, old_eip;
eaa728ee 1121
eaa728ee
FB
1122 /* real mode (simpler !) */
1123 dt = &env->idt;
1124 if (intno * 4 + 3 > dt->limit)
1125 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126 ptr = dt->base + intno * 4;
1127 offset = lduw_kernel(ptr);
1128 selector = lduw_kernel(ptr + 2);
1129 esp = ESP;
1130 ssp = env->segs[R_SS].base;
1131 if (is_int)
1132 old_eip = next_eip;
1133 else
1134 old_eip = env->eip;
1135 old_cs = env->segs[R_CS].selector;
1136 /* XXX: use SS segment size ? */
1137 PUSHW(ssp, esp, 0xffff, compute_eflags());
1138 PUSHW(ssp, esp, 0xffff, old_cs);
1139 PUSHW(ssp, esp, 0xffff, old_eip);
1140
1141 /* update processor state */
1142 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143 env->eip = offset;
1144 env->segs[R_CS].selector = selector;
1145 env->segs[R_CS].base = (selector << 4);
1146 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1147}
1148
1149/* fake user mode interrupt */
1150void do_interrupt_user(int intno, int is_int, int error_code,
1151 target_ulong next_eip)
1152{
1153 SegmentCache *dt;
1154 target_ulong ptr;
1155 int dpl, cpl, shift;
1156 uint32_t e2;
1157
1158 dt = &env->idt;
1159 if (env->hflags & HF_LMA_MASK) {
1160 shift = 4;
1161 } else {
1162 shift = 3;
1163 }
1164 ptr = dt->base + (intno << shift);
1165 e2 = ldl_kernel(ptr + 4);
1166
1167 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168 cpl = env->hflags & HF_CPL_MASK;
1169 /* check privledge if software int */
1170 if (is_int && dpl < cpl)
1171 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1172
1173 /* Since we emulate only user space, we cannot do more than
1174 exiting the emulation with the suitable exception and error
1175 code */
1176 if (is_int)
1177 EIP = next_eip;
1178}
1179
1180/*
1181 * Begin execution of an interruption. is_int is TRUE if coming from
1182 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183 * instruction. It is only relevant if is_int is TRUE.
1184 */
1185void do_interrupt(int intno, int is_int, int error_code,
1186 target_ulong next_eip, int is_hw)
1187{
1188 if (loglevel & CPU_LOG_INT) {
1189 if ((env->cr[0] & CR0_PE_MASK)) {
1190 static int count;
1191 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192 count, intno, error_code, is_int,
1193 env->hflags & HF_CPL_MASK,
1194 env->segs[R_CS].selector, EIP,
1195 (int)env->segs[R_CS].base + EIP,
1196 env->segs[R_SS].selector, ESP);
1197 if (intno == 0x0e) {
1198 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1199 } else {
1200 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1201 }
1202 fprintf(logfile, "\n");
1203 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1204#if 0
1205 {
1206 int i;
1207 uint8_t *ptr;
1208 fprintf(logfile, " code=");
1209 ptr = env->segs[R_CS].base + env->eip;
1210 for(i = 0; i < 16; i++) {
1211 fprintf(logfile, " %02x", ldub(ptr + i));
1212 }
1213 fprintf(logfile, "\n");
1214 }
1215#endif
1216 count++;
1217 }
1218 }
1219 if (env->cr[0] & CR0_PE_MASK) {
1220#if TARGET_X86_64
1221 if (env->hflags & HF_LMA_MASK) {
1222 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1223 } else
1224#endif
1225 {
1226 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1227 }
1228 } else {
1229 do_interrupt_real(intno, is_int, error_code, next_eip);
1230 }
1231}
1232
1233/*
1234 * Check nested exceptions and change to double or triple fault if
1235 * needed. It should only be called, if this is not an interrupt.
1236 * Returns the new exception number.
1237 */
1238static int check_exception(int intno, int *error_code)
1239{
1240 int first_contributory = env->old_exception == 0 ||
1241 (env->old_exception >= 10 &&
1242 env->old_exception <= 13);
1243 int second_contributory = intno == 0 ||
1244 (intno >= 10 && intno <= 13);
1245
1246 if (loglevel & CPU_LOG_INT)
1247 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1248 env->old_exception, intno);
1249
1250 if (env->old_exception == EXCP08_DBLE)
1251 cpu_abort(env, "triple fault");
1252
1253 if ((first_contributory && second_contributory)
1254 || (env->old_exception == EXCP0E_PAGE &&
1255 (second_contributory || (intno == EXCP0E_PAGE)))) {
1256 intno = EXCP08_DBLE;
1257 *error_code = 0;
1258 }
1259
1260 if (second_contributory || (intno == EXCP0E_PAGE) ||
1261 (intno == EXCP08_DBLE))
1262 env->old_exception = intno;
1263
1264 return intno;
1265}
1266
1267/*
1268 * Signal an interruption. It is executed in the main CPU loop.
1269 * is_int is TRUE if coming from the int instruction. next_eip is the
1270 * EIP value AFTER the interrupt instruction. It is only relevant if
1271 * is_int is TRUE.
1272 */
1273void raise_interrupt(int intno, int is_int, int error_code,
1274 int next_eip_addend)
1275{
1276 if (!is_int) {
1277 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278 intno = check_exception(intno, &error_code);
872929aa
FB
1279 } else {
1280 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1281 }
1282
1283 env->exception_index = intno;
1284 env->error_code = error_code;
1285 env->exception_is_int = is_int;
1286 env->exception_next_eip = env->eip + next_eip_addend;
1287 cpu_loop_exit();
1288}
1289
eaa728ee
FB
1290/* shortcuts to generate exceptions */
1291
1292void (raise_exception_err)(int exception_index, int error_code)
1293{
1294 raise_interrupt(exception_index, 0, error_code, 0);
1295}
1296
1297void raise_exception(int exception_index)
1298{
1299 raise_interrupt(exception_index, 0, 0, 0);
1300}
1301
1302/* SMM support */
1303
1304#if defined(CONFIG_USER_ONLY)
1305
1306void do_smm_enter(void)
1307{
1308}
1309
1310void helper_rsm(void)
1311{
1312}
1313
1314#else
1315
1316#ifdef TARGET_X86_64
1317#define SMM_REVISION_ID 0x00020064
1318#else
1319#define SMM_REVISION_ID 0x00020000
1320#endif
1321
1322void do_smm_enter(void)
1323{
1324 target_ulong sm_state;
1325 SegmentCache *dt;
1326 int i, offset;
1327
1328 if (loglevel & CPU_LOG_INT) {
1329 fprintf(logfile, "SMM: enter\n");
1330 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1331 }
1332
1333 env->hflags |= HF_SMM_MASK;
1334 cpu_smm_update(env);
1335
1336 sm_state = env->smbase + 0x8000;
1337
1338#ifdef TARGET_X86_64
1339 for(i = 0; i < 6; i++) {
1340 dt = &env->segs[i];
1341 offset = 0x7e00 + i * 16;
1342 stw_phys(sm_state + offset, dt->selector);
1343 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1344 stl_phys(sm_state + offset + 4, dt->limit);
1345 stq_phys(sm_state + offset + 8, dt->base);
1346 }
1347
1348 stq_phys(sm_state + 0x7e68, env->gdt.base);
1349 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1350
1351 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1352 stq_phys(sm_state + 0x7e78, env->ldt.base);
1353 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1354 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1355
1356 stq_phys(sm_state + 0x7e88, env->idt.base);
1357 stl_phys(sm_state + 0x7e84, env->idt.limit);
1358
1359 stw_phys(sm_state + 0x7e90, env->tr.selector);
1360 stq_phys(sm_state + 0x7e98, env->tr.base);
1361 stl_phys(sm_state + 0x7e94, env->tr.limit);
1362 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1363
1364 stq_phys(sm_state + 0x7ed0, env->efer);
1365
1366 stq_phys(sm_state + 0x7ff8, EAX);
1367 stq_phys(sm_state + 0x7ff0, ECX);
1368 stq_phys(sm_state + 0x7fe8, EDX);
1369 stq_phys(sm_state + 0x7fe0, EBX);
1370 stq_phys(sm_state + 0x7fd8, ESP);
1371 stq_phys(sm_state + 0x7fd0, EBP);
1372 stq_phys(sm_state + 0x7fc8, ESI);
1373 stq_phys(sm_state + 0x7fc0, EDI);
1374 for(i = 8; i < 16; i++)
1375 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1376 stq_phys(sm_state + 0x7f78, env->eip);
1377 stl_phys(sm_state + 0x7f70, compute_eflags());
1378 stl_phys(sm_state + 0x7f68, env->dr[6]);
1379 stl_phys(sm_state + 0x7f60, env->dr[7]);
1380
1381 stl_phys(sm_state + 0x7f48, env->cr[4]);
1382 stl_phys(sm_state + 0x7f50, env->cr[3]);
1383 stl_phys(sm_state + 0x7f58, env->cr[0]);
1384
1385 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1386 stl_phys(sm_state + 0x7f00, env->smbase);
1387#else
1388 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1389 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1390 stl_phys(sm_state + 0x7ff4, compute_eflags());
1391 stl_phys(sm_state + 0x7ff0, env->eip);
1392 stl_phys(sm_state + 0x7fec, EDI);
1393 stl_phys(sm_state + 0x7fe8, ESI);
1394 stl_phys(sm_state + 0x7fe4, EBP);
1395 stl_phys(sm_state + 0x7fe0, ESP);
1396 stl_phys(sm_state + 0x7fdc, EBX);
1397 stl_phys(sm_state + 0x7fd8, EDX);
1398 stl_phys(sm_state + 0x7fd4, ECX);
1399 stl_phys(sm_state + 0x7fd0, EAX);
1400 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1401 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1402
1403 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1404 stl_phys(sm_state + 0x7f64, env->tr.base);
1405 stl_phys(sm_state + 0x7f60, env->tr.limit);
1406 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1407
1408 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1409 stl_phys(sm_state + 0x7f80, env->ldt.base);
1410 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1411 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1412
1413 stl_phys(sm_state + 0x7f74, env->gdt.base);
1414 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1415
1416 stl_phys(sm_state + 0x7f58, env->idt.base);
1417 stl_phys(sm_state + 0x7f54, env->idt.limit);
1418
1419 for(i = 0; i < 6; i++) {
1420 dt = &env->segs[i];
1421 if (i < 3)
1422 offset = 0x7f84 + i * 12;
1423 else
1424 offset = 0x7f2c + (i - 3) * 12;
1425 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1426 stl_phys(sm_state + offset + 8, dt->base);
1427 stl_phys(sm_state + offset + 4, dt->limit);
1428 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1429 }
1430 stl_phys(sm_state + 0x7f14, env->cr[4]);
1431
1432 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1433 stl_phys(sm_state + 0x7ef8, env->smbase);
1434#endif
1435 /* init SMM cpu state */
1436
1437#ifdef TARGET_X86_64
1438 env->efer = 0;
1439 env->hflags &= ~HF_LMA_MASK;
1440#endif
1441 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1442 env->eip = 0x00008000;
1443 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1444 0xffffffff, 0);
1445 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1446 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1447 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1449 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1450
1451 cpu_x86_update_cr0(env,
1452 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1453 cpu_x86_update_cr4(env, 0);
1454 env->dr[7] = 0x00000400;
1455 CC_OP = CC_OP_EFLAGS;
1456}
1457
1458void helper_rsm(void)
1459{
1460 target_ulong sm_state;
1461 int i, offset;
1462 uint32_t val;
1463
1464 sm_state = env->smbase + 0x8000;
1465#ifdef TARGET_X86_64
1466 env->efer = ldq_phys(sm_state + 0x7ed0);
1467 if (env->efer & MSR_EFER_LMA)
1468 env->hflags |= HF_LMA_MASK;
1469 else
1470 env->hflags &= ~HF_LMA_MASK;
1471
1472 for(i = 0; i < 6; i++) {
1473 offset = 0x7e00 + i * 16;
1474 cpu_x86_load_seg_cache(env, i,
1475 lduw_phys(sm_state + offset),
1476 ldq_phys(sm_state + offset + 8),
1477 ldl_phys(sm_state + offset + 4),
1478 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1479 }
1480
1481 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1482 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1483
1484 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1485 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1486 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1487 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1488
1489 env->idt.base = ldq_phys(sm_state + 0x7e88);
1490 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1491
1492 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1493 env->tr.base = ldq_phys(sm_state + 0x7e98);
1494 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1495 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1496
1497 EAX = ldq_phys(sm_state + 0x7ff8);
1498 ECX = ldq_phys(sm_state + 0x7ff0);
1499 EDX = ldq_phys(sm_state + 0x7fe8);
1500 EBX = ldq_phys(sm_state + 0x7fe0);
1501 ESP = ldq_phys(sm_state + 0x7fd8);
1502 EBP = ldq_phys(sm_state + 0x7fd0);
1503 ESI = ldq_phys(sm_state + 0x7fc8);
1504 EDI = ldq_phys(sm_state + 0x7fc0);
1505 for(i = 8; i < 16; i++)
1506 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1507 env->eip = ldq_phys(sm_state + 0x7f78);
1508 load_eflags(ldl_phys(sm_state + 0x7f70),
1509 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1510 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1511 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1512
1513 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1514 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1515 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1516
1517 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1518 if (val & 0x20000) {
1519 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1520 }
1521#else
1522 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1523 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1524 load_eflags(ldl_phys(sm_state + 0x7ff4),
1525 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1526 env->eip = ldl_phys(sm_state + 0x7ff0);
1527 EDI = ldl_phys(sm_state + 0x7fec);
1528 ESI = ldl_phys(sm_state + 0x7fe8);
1529 EBP = ldl_phys(sm_state + 0x7fe4);
1530 ESP = ldl_phys(sm_state + 0x7fe0);
1531 EBX = ldl_phys(sm_state + 0x7fdc);
1532 EDX = ldl_phys(sm_state + 0x7fd8);
1533 ECX = ldl_phys(sm_state + 0x7fd4);
1534 EAX = ldl_phys(sm_state + 0x7fd0);
1535 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1536 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1537
1538 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1539 env->tr.base = ldl_phys(sm_state + 0x7f64);
1540 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1541 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1542
1543 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1544 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1545 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1546 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1547
1548 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1549 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1550
1551 env->idt.base = ldl_phys(sm_state + 0x7f58);
1552 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1553
1554 for(i = 0; i < 6; i++) {
1555 if (i < 3)
1556 offset = 0x7f84 + i * 12;
1557 else
1558 offset = 0x7f2c + (i - 3) * 12;
1559 cpu_x86_load_seg_cache(env, i,
1560 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1561 ldl_phys(sm_state + offset + 8),
1562 ldl_phys(sm_state + offset + 4),
1563 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1564 }
1565 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1566
1567 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1568 if (val & 0x20000) {
1569 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1570 }
1571#endif
1572 CC_OP = CC_OP_EFLAGS;
1573 env->hflags &= ~HF_SMM_MASK;
1574 cpu_smm_update(env);
1575
1576 if (loglevel & CPU_LOG_INT) {
1577 fprintf(logfile, "SMM: after RSM\n");
1578 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1579 }
1580}
1581
1582#endif /* !CONFIG_USER_ONLY */
1583
1584
1585/* division, flags are undefined */
1586
1587void helper_divb_AL(target_ulong t0)
1588{
1589 unsigned int num, den, q, r;
1590
1591 num = (EAX & 0xffff);
1592 den = (t0 & 0xff);
1593 if (den == 0) {
1594 raise_exception(EXCP00_DIVZ);
1595 }
1596 q = (num / den);
1597 if (q > 0xff)
1598 raise_exception(EXCP00_DIVZ);
1599 q &= 0xff;
1600 r = (num % den) & 0xff;
1601 EAX = (EAX & ~0xffff) | (r << 8) | q;
1602}
1603
1604void helper_idivb_AL(target_ulong t0)
1605{
1606 int num, den, q, r;
1607
1608 num = (int16_t)EAX;
1609 den = (int8_t)t0;
1610 if (den == 0) {
1611 raise_exception(EXCP00_DIVZ);
1612 }
1613 q = (num / den);
1614 if (q != (int8_t)q)
1615 raise_exception(EXCP00_DIVZ);
1616 q &= 0xff;
1617 r = (num % den) & 0xff;
1618 EAX = (EAX & ~0xffff) | (r << 8) | q;
1619}
1620
1621void helper_divw_AX(target_ulong t0)
1622{
1623 unsigned int num, den, q, r;
1624
1625 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1626 den = (t0 & 0xffff);
1627 if (den == 0) {
1628 raise_exception(EXCP00_DIVZ);
1629 }
1630 q = (num / den);
1631 if (q > 0xffff)
1632 raise_exception(EXCP00_DIVZ);
1633 q &= 0xffff;
1634 r = (num % den) & 0xffff;
1635 EAX = (EAX & ~0xffff) | q;
1636 EDX = (EDX & ~0xffff) | r;
1637}
1638
1639void helper_idivw_AX(target_ulong t0)
1640{
1641 int num, den, q, r;
1642
1643 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1644 den = (int16_t)t0;
1645 if (den == 0) {
1646 raise_exception(EXCP00_DIVZ);
1647 }
1648 q = (num / den);
1649 if (q != (int16_t)q)
1650 raise_exception(EXCP00_DIVZ);
1651 q &= 0xffff;
1652 r = (num % den) & 0xffff;
1653 EAX = (EAX & ~0xffff) | q;
1654 EDX = (EDX & ~0xffff) | r;
1655}
1656
1657void helper_divl_EAX(target_ulong t0)
1658{
1659 unsigned int den, r;
1660 uint64_t num, q;
1661
1662 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1663 den = t0;
1664 if (den == 0) {
1665 raise_exception(EXCP00_DIVZ);
1666 }
1667 q = (num / den);
1668 r = (num % den);
1669 if (q > 0xffffffff)
1670 raise_exception(EXCP00_DIVZ);
1671 EAX = (uint32_t)q;
1672 EDX = (uint32_t)r;
1673}
1674
1675void helper_idivl_EAX(target_ulong t0)
1676{
1677 int den, r;
1678 int64_t num, q;
1679
1680 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1681 den = t0;
1682 if (den == 0) {
1683 raise_exception(EXCP00_DIVZ);
1684 }
1685 q = (num / den);
1686 r = (num % den);
1687 if (q != (int32_t)q)
1688 raise_exception(EXCP00_DIVZ);
1689 EAX = (uint32_t)q;
1690 EDX = (uint32_t)r;
1691}
1692
1693/* bcd */
1694
1695/* XXX: exception */
1696void helper_aam(int base)
1697{
1698 int al, ah;
1699 al = EAX & 0xff;
1700 ah = al / base;
1701 al = al % base;
1702 EAX = (EAX & ~0xffff) | al | (ah << 8);
1703 CC_DST = al;
1704}
1705
1706void helper_aad(int base)
1707{
1708 int al, ah;
1709 al = EAX & 0xff;
1710 ah = (EAX >> 8) & 0xff;
1711 al = ((ah * base) + al) & 0xff;
1712 EAX = (EAX & ~0xffff) | al;
1713 CC_DST = al;
1714}
1715
1716void helper_aaa(void)
1717{
1718 int icarry;
1719 int al, ah, af;
1720 int eflags;
1721
1722 eflags = cc_table[CC_OP].compute_all();
1723 af = eflags & CC_A;
1724 al = EAX & 0xff;
1725 ah = (EAX >> 8) & 0xff;
1726
1727 icarry = (al > 0xf9);
1728 if (((al & 0x0f) > 9 ) || af) {
1729 al = (al + 6) & 0x0f;
1730 ah = (ah + 1 + icarry) & 0xff;
1731 eflags |= CC_C | CC_A;
1732 } else {
1733 eflags &= ~(CC_C | CC_A);
1734 al &= 0x0f;
1735 }
1736 EAX = (EAX & ~0xffff) | al | (ah << 8);
1737 CC_SRC = eflags;
1738 FORCE_RET();
1739}
1740
1741void helper_aas(void)
1742{
1743 int icarry;
1744 int al, ah, af;
1745 int eflags;
1746
1747 eflags = cc_table[CC_OP].compute_all();
1748 af = eflags & CC_A;
1749 al = EAX & 0xff;
1750 ah = (EAX >> 8) & 0xff;
1751
1752 icarry = (al < 6);
1753 if (((al & 0x0f) > 9 ) || af) {
1754 al = (al - 6) & 0x0f;
1755 ah = (ah - 1 - icarry) & 0xff;
1756 eflags |= CC_C | CC_A;
1757 } else {
1758 eflags &= ~(CC_C | CC_A);
1759 al &= 0x0f;
1760 }
1761 EAX = (EAX & ~0xffff) | al | (ah << 8);
1762 CC_SRC = eflags;
1763 FORCE_RET();
1764}
1765
1766void helper_daa(void)
1767{
1768 int al, af, cf;
1769 int eflags;
1770
1771 eflags = cc_table[CC_OP].compute_all();
1772 cf = eflags & CC_C;
1773 af = eflags & CC_A;
1774 al = EAX & 0xff;
1775
1776 eflags = 0;
1777 if (((al & 0x0f) > 9 ) || af) {
1778 al = (al + 6) & 0xff;
1779 eflags |= CC_A;
1780 }
1781 if ((al > 0x9f) || cf) {
1782 al = (al + 0x60) & 0xff;
1783 eflags |= CC_C;
1784 }
1785 EAX = (EAX & ~0xff) | al;
1786 /* well, speed is not an issue here, so we compute the flags by hand */
1787 eflags |= (al == 0) << 6; /* zf */
1788 eflags |= parity_table[al]; /* pf */
1789 eflags |= (al & 0x80); /* sf */
1790 CC_SRC = eflags;
1791 FORCE_RET();
1792}
1793
1794void helper_das(void)
1795{
1796 int al, al1, af, cf;
1797 int eflags;
1798
1799 eflags = cc_table[CC_OP].compute_all();
1800 cf = eflags & CC_C;
1801 af = eflags & CC_A;
1802 al = EAX & 0xff;
1803
1804 eflags = 0;
1805 al1 = al;
1806 if (((al & 0x0f) > 9 ) || af) {
1807 eflags |= CC_A;
1808 if (al < 6 || cf)
1809 eflags |= CC_C;
1810 al = (al - 6) & 0xff;
1811 }
1812 if ((al1 > 0x99) || cf) {
1813 al = (al - 0x60) & 0xff;
1814 eflags |= CC_C;
1815 }
1816 EAX = (EAX & ~0xff) | al;
1817 /* well, speed is not an issue here, so we compute the flags by hand */
1818 eflags |= (al == 0) << 6; /* zf */
1819 eflags |= parity_table[al]; /* pf */
1820 eflags |= (al & 0x80); /* sf */
1821 CC_SRC = eflags;
1822 FORCE_RET();
1823}
1824
1825void helper_into(int next_eip_addend)
1826{
1827 int eflags;
1828 eflags = cc_table[CC_OP].compute_all();
1829 if (eflags & CC_O) {
1830 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1831 }
1832}
1833
1834void helper_cmpxchg8b(target_ulong a0)
1835{
1836 uint64_t d;
1837 int eflags;
1838
1839 eflags = cc_table[CC_OP].compute_all();
1840 d = ldq(a0);
1841 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1842 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1843 eflags |= CC_Z;
1844 } else {
1845 EDX = (uint32_t)(d >> 32);
1846 EAX = (uint32_t)d;
1847 eflags &= ~CC_Z;
1848 }
1849 CC_SRC = eflags;
1850}
1851
1852#ifdef TARGET_X86_64
1853void helper_cmpxchg16b(target_ulong a0)
1854{
1855 uint64_t d0, d1;
1856 int eflags;
1857
1858 eflags = cc_table[CC_OP].compute_all();
1859 d0 = ldq(a0);
1860 d1 = ldq(a0 + 8);
1861 if (d0 == EAX && d1 == EDX) {
1862 stq(a0, EBX);
1863 stq(a0 + 8, ECX);
1864 eflags |= CC_Z;
1865 } else {
1866 EDX = d1;
1867 EAX = d0;
1868 eflags &= ~CC_Z;
1869 }
1870 CC_SRC = eflags;
1871}
1872#endif
1873
1874void helper_single_step(void)
1875{
1876 env->dr[6] |= 0x4000;
1877 raise_exception(EXCP01_SSTP);
1878}
1879
1880void helper_cpuid(void)
1881{
1882 uint32_t index;
eaa728ee 1883
872929aa
FB
1884 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1885
1886 index = (uint32_t)EAX;
eaa728ee
FB
1887 /* test if maximum index reached */
1888 if (index & 0x80000000) {
1889 if (index > env->cpuid_xlevel)
1890 index = env->cpuid_level;
1891 } else {
1892 if (index > env->cpuid_level)
1893 index = env->cpuid_level;
1894 }
1895
1896 switch(index) {
1897 case 0:
1898 EAX = env->cpuid_level;
1899 EBX = env->cpuid_vendor1;
1900 EDX = env->cpuid_vendor2;
1901 ECX = env->cpuid_vendor3;
1902 break;
1903 case 1:
1904 EAX = env->cpuid_version;
1905 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1906 ECX = env->cpuid_ext_features;
1907 EDX = env->cpuid_features;
1908 break;
1909 case 2:
1910 /* cache info: needed for Pentium Pro compatibility */
1911 EAX = 1;
1912 EBX = 0;
1913 ECX = 0;
1914 EDX = 0x2c307d;
1915 break;
1916 case 0x80000000:
1917 EAX = env->cpuid_xlevel;
1918 EBX = env->cpuid_vendor1;
1919 EDX = env->cpuid_vendor2;
1920 ECX = env->cpuid_vendor3;
1921 break;
1922 case 0x80000001:
1923 EAX = env->cpuid_features;
1924 EBX = 0;
1925 ECX = env->cpuid_ext3_features;
1926 EDX = env->cpuid_ext2_features;
1927 break;
1928 case 0x80000002:
1929 case 0x80000003:
1930 case 0x80000004:
1931 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1932 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1933 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1934 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1935 break;
1936 case 0x80000005:
1937 /* cache info (L1 cache) */
1938 EAX = 0x01ff01ff;
1939 EBX = 0x01ff01ff;
1940 ECX = 0x40020140;
1941 EDX = 0x40020140;
1942 break;
1943 case 0x80000006:
1944 /* cache info (L2 cache) */
1945 EAX = 0;
1946 EBX = 0x42004200;
1947 ECX = 0x02008140;
1948 EDX = 0;
1949 break;
1950 case 0x80000008:
1951 /* virtual & phys address size in low 2 bytes. */
1952/* XXX: This value must match the one used in the MMU code. */
1953#if defined(TARGET_X86_64)
1954# if defined(USE_KQEMU)
1955 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1956# else
1957/* XXX: The physical address space is limited to 42 bits in exec.c. */
1958 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1959# endif
1960#else
1961# if defined(USE_KQEMU)
1962 EAX = 0x00000020; /* 32 bits physical */
1963# else
1964 EAX = 0x00000024; /* 36 bits physical */
1965# endif
1966#endif
1967 EBX = 0;
1968 ECX = 0;
1969 EDX = 0;
1970 break;
1971 case 0x8000000A:
1972 EAX = 0x00000001;
1973 EBX = 0;
1974 ECX = 0;
1975 EDX = 0;
1976 break;
1977 default:
1978 /* reserved values: zero */
1979 EAX = 0;
1980 EBX = 0;
1981 ECX = 0;
1982 EDX = 0;
1983 break;
1984 }
1985}
1986
1987void helper_enter_level(int level, int data32, target_ulong t1)
1988{
1989 target_ulong ssp;
1990 uint32_t esp_mask, esp, ebp;
1991
1992 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1993 ssp = env->segs[R_SS].base;
1994 ebp = EBP;
1995 esp = ESP;
1996 if (data32) {
1997 /* 32 bit */
1998 esp -= 4;
1999 while (--level) {
2000 esp -= 4;
2001 ebp -= 4;
2002 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2003 }
2004 esp -= 4;
2005 stl(ssp + (esp & esp_mask), t1);
2006 } else {
2007 /* 16 bit */
2008 esp -= 2;
2009 while (--level) {
2010 esp -= 2;
2011 ebp -= 2;
2012 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2013 }
2014 esp -= 2;
2015 stw(ssp + (esp & esp_mask), t1);
2016 }
2017}
2018
2019#ifdef TARGET_X86_64
2020void helper_enter64_level(int level, int data64, target_ulong t1)
2021{
2022 target_ulong esp, ebp;
2023 ebp = EBP;
2024 esp = ESP;
2025
2026 if (data64) {
2027 /* 64 bit */
2028 esp -= 8;
2029 while (--level) {
2030 esp -= 8;
2031 ebp -= 8;
2032 stq(esp, ldq(ebp));
2033 }
2034 esp -= 8;
2035 stq(esp, t1);
2036 } else {
2037 /* 16 bit */
2038 esp -= 2;
2039 while (--level) {
2040 esp -= 2;
2041 ebp -= 2;
2042 stw(esp, lduw(ebp));
2043 }
2044 esp -= 2;
2045 stw(esp, t1);
2046 }
2047}
2048#endif
2049
2050void helper_lldt(int selector)
2051{
2052 SegmentCache *dt;
2053 uint32_t e1, e2;
2054 int index, entry_limit;
2055 target_ulong ptr;
2056
2057 selector &= 0xffff;
2058 if ((selector & 0xfffc) == 0) {
2059 /* XXX: NULL selector case: invalid LDT */
2060 env->ldt.base = 0;
2061 env->ldt.limit = 0;
2062 } else {
2063 if (selector & 0x4)
2064 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2065 dt = &env->gdt;
2066 index = selector & ~7;
2067#ifdef TARGET_X86_64
2068 if (env->hflags & HF_LMA_MASK)
2069 entry_limit = 15;
2070 else
2071#endif
2072 entry_limit = 7;
2073 if ((index + entry_limit) > dt->limit)
2074 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2075 ptr = dt->base + index;
2076 e1 = ldl_kernel(ptr);
2077 e2 = ldl_kernel(ptr + 4);
2078 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2079 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2080 if (!(e2 & DESC_P_MASK))
2081 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2082#ifdef TARGET_X86_64
2083 if (env->hflags & HF_LMA_MASK) {
2084 uint32_t e3;
2085 e3 = ldl_kernel(ptr + 8);
2086 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2087 env->ldt.base |= (target_ulong)e3 << 32;
2088 } else
2089#endif
2090 {
2091 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2092 }
2093 }
2094 env->ldt.selector = selector;
2095}
2096
2097void helper_ltr(int selector)
2098{
2099 SegmentCache *dt;
2100 uint32_t e1, e2;
2101 int index, type, entry_limit;
2102 target_ulong ptr;
2103
2104 selector &= 0xffff;
2105 if ((selector & 0xfffc) == 0) {
2106 /* NULL selector case: invalid TR */
2107 env->tr.base = 0;
2108 env->tr.limit = 0;
2109 env->tr.flags = 0;
2110 } else {
2111 if (selector & 0x4)
2112 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2113 dt = &env->gdt;
2114 index = selector & ~7;
2115#ifdef TARGET_X86_64
2116 if (env->hflags & HF_LMA_MASK)
2117 entry_limit = 15;
2118 else
2119#endif
2120 entry_limit = 7;
2121 if ((index + entry_limit) > dt->limit)
2122 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2123 ptr = dt->base + index;
2124 e1 = ldl_kernel(ptr);
2125 e2 = ldl_kernel(ptr + 4);
2126 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2127 if ((e2 & DESC_S_MASK) ||
2128 (type != 1 && type != 9))
2129 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2130 if (!(e2 & DESC_P_MASK))
2131 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2132#ifdef TARGET_X86_64
2133 if (env->hflags & HF_LMA_MASK) {
2134 uint32_t e3, e4;
2135 e3 = ldl_kernel(ptr + 8);
2136 e4 = ldl_kernel(ptr + 12);
2137 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2138 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2139 load_seg_cache_raw_dt(&env->tr, e1, e2);
2140 env->tr.base |= (target_ulong)e3 << 32;
2141 } else
2142#endif
2143 {
2144 load_seg_cache_raw_dt(&env->tr, e1, e2);
2145 }
2146 e2 |= DESC_TSS_BUSY_MASK;
2147 stl_kernel(ptr + 4, e2);
2148 }
2149 env->tr.selector = selector;
2150}
2151
2152/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2153void helper_load_seg(int seg_reg, int selector)
2154{
2155 uint32_t e1, e2;
2156 int cpl, dpl, rpl;
2157 SegmentCache *dt;
2158 int index;
2159 target_ulong ptr;
2160
2161 selector &= 0xffff;
2162 cpl = env->hflags & HF_CPL_MASK;
2163 if ((selector & 0xfffc) == 0) {
2164 /* null selector case */
2165 if (seg_reg == R_SS
2166#ifdef TARGET_X86_64
2167 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2168#endif
2169 )
2170 raise_exception_err(EXCP0D_GPF, 0);
2171 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2172 } else {
2173
2174 if (selector & 0x4)
2175 dt = &env->ldt;
2176 else
2177 dt = &env->gdt;
2178 index = selector & ~7;
2179 if ((index + 7) > dt->limit)
2180 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2181 ptr = dt->base + index;
2182 e1 = ldl_kernel(ptr);
2183 e2 = ldl_kernel(ptr + 4);
2184
2185 if (!(e2 & DESC_S_MASK))
2186 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2187 rpl = selector & 3;
2188 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2189 if (seg_reg == R_SS) {
2190 /* must be writable segment */
2191 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2192 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2193 if (rpl != cpl || dpl != cpl)
2194 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2195 } else {
2196 /* must be readable segment */
2197 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2198 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2199
2200 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2201 /* if not conforming code, test rights */
2202 if (dpl < cpl || dpl < rpl)
2203 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2204 }
2205 }
2206
2207 if (!(e2 & DESC_P_MASK)) {
2208 if (seg_reg == R_SS)
2209 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2210 else
2211 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2212 }
2213
2214 /* set the access bit if not already set */
2215 if (!(e2 & DESC_A_MASK)) {
2216 e2 |= DESC_A_MASK;
2217 stl_kernel(ptr + 4, e2);
2218 }
2219
2220 cpu_x86_load_seg_cache(env, seg_reg, selector,
2221 get_seg_base(e1, e2),
2222 get_seg_limit(e1, e2),
2223 e2);
2224#if 0
2225 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2226 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2227#endif
2228 }
2229}
2230
2231/* protected mode jump */
2232void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2233 int next_eip_addend)
2234{
2235 int gate_cs, type;
2236 uint32_t e1, e2, cpl, dpl, rpl, limit;
2237 target_ulong next_eip;
2238
2239 if ((new_cs & 0xfffc) == 0)
2240 raise_exception_err(EXCP0D_GPF, 0);
2241 if (load_segment(&e1, &e2, new_cs) != 0)
2242 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2243 cpl = env->hflags & HF_CPL_MASK;
2244 if (e2 & DESC_S_MASK) {
2245 if (!(e2 & DESC_CS_MASK))
2246 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2247 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2248 if (e2 & DESC_C_MASK) {
2249 /* conforming code segment */
2250 if (dpl > cpl)
2251 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2252 } else {
2253 /* non conforming code segment */
2254 rpl = new_cs & 3;
2255 if (rpl > cpl)
2256 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2257 if (dpl != cpl)
2258 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2259 }
2260 if (!(e2 & DESC_P_MASK))
2261 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2262 limit = get_seg_limit(e1, e2);
2263 if (new_eip > limit &&
2264 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2265 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2266 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2267 get_seg_base(e1, e2), limit, e2);
2268 EIP = new_eip;
2269 } else {
2270 /* jump to call or task gate */
2271 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2272 rpl = new_cs & 3;
2273 cpl = env->hflags & HF_CPL_MASK;
2274 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2275 switch(type) {
2276 case 1: /* 286 TSS */
2277 case 9: /* 386 TSS */
2278 case 5: /* task gate */
2279 if (dpl < cpl || dpl < rpl)
2280 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2281 next_eip = env->eip + next_eip_addend;
2282 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2283 CC_OP = CC_OP_EFLAGS;
2284 break;
2285 case 4: /* 286 call gate */
2286 case 12: /* 386 call gate */
2287 if ((dpl < cpl) || (dpl < rpl))
2288 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2289 if (!(e2 & DESC_P_MASK))
2290 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2291 gate_cs = e1 >> 16;
2292 new_eip = (e1 & 0xffff);
2293 if (type == 12)
2294 new_eip |= (e2 & 0xffff0000);
2295 if (load_segment(&e1, &e2, gate_cs) != 0)
2296 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2297 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2298 /* must be code segment */
2299 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2300 (DESC_S_MASK | DESC_CS_MASK)))
2301 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2302 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2303 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2304 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2305 if (!(e2 & DESC_P_MASK))
2306 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2307 limit = get_seg_limit(e1, e2);
2308 if (new_eip > limit)
2309 raise_exception_err(EXCP0D_GPF, 0);
2310 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2311 get_seg_base(e1, e2), limit, e2);
2312 EIP = new_eip;
2313 break;
2314 default:
2315 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2316 break;
2317 }
2318 }
2319}
2320
2321/* real mode call */
2322void helper_lcall_real(int new_cs, target_ulong new_eip1,
2323 int shift, int next_eip)
2324{
2325 int new_eip;
2326 uint32_t esp, esp_mask;
2327 target_ulong ssp;
2328
2329 new_eip = new_eip1;
2330 esp = ESP;
2331 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2332 ssp = env->segs[R_SS].base;
2333 if (shift) {
2334 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2335 PUSHL(ssp, esp, esp_mask, next_eip);
2336 } else {
2337 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2338 PUSHW(ssp, esp, esp_mask, next_eip);
2339 }
2340
2341 SET_ESP(esp, esp_mask);
2342 env->eip = new_eip;
2343 env->segs[R_CS].selector = new_cs;
2344 env->segs[R_CS].base = (new_cs << 4);
2345}
2346
2347/* protected mode call */
2348void helper_lcall_protected(int new_cs, target_ulong new_eip,
2349 int shift, int next_eip_addend)
2350{
2351 int new_stack, i;
2352 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2353 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2354 uint32_t val, limit, old_sp_mask;
2355 target_ulong ssp, old_ssp, next_eip;
2356
2357 next_eip = env->eip + next_eip_addend;
2358#ifdef DEBUG_PCALL
2359 if (loglevel & CPU_LOG_PCALL) {
2360 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2361 new_cs, (uint32_t)new_eip, shift);
2362 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2363 }
2364#endif
2365 if ((new_cs & 0xfffc) == 0)
2366 raise_exception_err(EXCP0D_GPF, 0);
2367 if (load_segment(&e1, &e2, new_cs) != 0)
2368 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2369 cpl = env->hflags & HF_CPL_MASK;
2370#ifdef DEBUG_PCALL
2371 if (loglevel & CPU_LOG_PCALL) {
2372 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2373 }
2374#endif
2375 if (e2 & DESC_S_MASK) {
2376 if (!(e2 & DESC_CS_MASK))
2377 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2378 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2379 if (e2 & DESC_C_MASK) {
2380 /* conforming code segment */
2381 if (dpl > cpl)
2382 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2383 } else {
2384 /* non conforming code segment */
2385 rpl = new_cs & 3;
2386 if (rpl > cpl)
2387 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2388 if (dpl != cpl)
2389 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2390 }
2391 if (!(e2 & DESC_P_MASK))
2392 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2393
2394#ifdef TARGET_X86_64
2395 /* XXX: check 16/32 bit cases in long mode */
2396 if (shift == 2) {
2397 target_ulong rsp;
2398 /* 64 bit case */
2399 rsp = ESP;
2400 PUSHQ(rsp, env->segs[R_CS].selector);
2401 PUSHQ(rsp, next_eip);
2402 /* from this point, not restartable */
2403 ESP = rsp;
2404 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2405 get_seg_base(e1, e2),
2406 get_seg_limit(e1, e2), e2);
2407 EIP = new_eip;
2408 } else
2409#endif
2410 {
2411 sp = ESP;
2412 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2413 ssp = env->segs[R_SS].base;
2414 if (shift) {
2415 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2416 PUSHL(ssp, sp, sp_mask, next_eip);
2417 } else {
2418 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2419 PUSHW(ssp, sp, sp_mask, next_eip);
2420 }
2421
2422 limit = get_seg_limit(e1, e2);
2423 if (new_eip > limit)
2424 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2425 /* from this point, not restartable */
2426 SET_ESP(sp, sp_mask);
2427 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2428 get_seg_base(e1, e2), limit, e2);
2429 EIP = new_eip;
2430 }
2431 } else {
2432 /* check gate type */
2433 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2434 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2435 rpl = new_cs & 3;
2436 switch(type) {
2437 case 1: /* available 286 TSS */
2438 case 9: /* available 386 TSS */
2439 case 5: /* task gate */
2440 if (dpl < cpl || dpl < rpl)
2441 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2442 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2443 CC_OP = CC_OP_EFLAGS;
2444 return;
2445 case 4: /* 286 call gate */
2446 case 12: /* 386 call gate */
2447 break;
2448 default:
2449 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2450 break;
2451 }
2452 shift = type >> 3;
2453
2454 if (dpl < cpl || dpl < rpl)
2455 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2456 /* check valid bit */
2457 if (!(e2 & DESC_P_MASK))
2458 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2459 selector = e1 >> 16;
2460 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2461 param_count = e2 & 0x1f;
2462 if ((selector & 0xfffc) == 0)
2463 raise_exception_err(EXCP0D_GPF, 0);
2464
2465 if (load_segment(&e1, &e2, selector) != 0)
2466 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2467 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2468 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2469 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2470 if (dpl > cpl)
2471 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2472 if (!(e2 & DESC_P_MASK))
2473 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2474
2475 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2476 /* to inner privilege */
2477 get_ss_esp_from_tss(&ss, &sp, dpl);
2478#ifdef DEBUG_PCALL
2479 if (loglevel & CPU_LOG_PCALL)
2480 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2481 ss, sp, param_count, ESP);
2482#endif
2483 if ((ss & 0xfffc) == 0)
2484 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2485 if ((ss & 3) != dpl)
2486 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2487 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2488 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2489 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2490 if (ss_dpl != dpl)
2491 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492 if (!(ss_e2 & DESC_S_MASK) ||
2493 (ss_e2 & DESC_CS_MASK) ||
2494 !(ss_e2 & DESC_W_MASK))
2495 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2496 if (!(ss_e2 & DESC_P_MASK))
2497 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2498
2499 // push_size = ((param_count * 2) + 8) << shift;
2500
2501 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2502 old_ssp = env->segs[R_SS].base;
2503
2504 sp_mask = get_sp_mask(ss_e2);
2505 ssp = get_seg_base(ss_e1, ss_e2);
2506 if (shift) {
2507 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2508 PUSHL(ssp, sp, sp_mask, ESP);
2509 for(i = param_count - 1; i >= 0; i--) {
2510 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2511 PUSHL(ssp, sp, sp_mask, val);
2512 }
2513 } else {
2514 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2515 PUSHW(ssp, sp, sp_mask, ESP);
2516 for(i = param_count - 1; i >= 0; i--) {
2517 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2518 PUSHW(ssp, sp, sp_mask, val);
2519 }
2520 }
2521 new_stack = 1;
2522 } else {
2523 /* to same privilege */
2524 sp = ESP;
2525 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2526 ssp = env->segs[R_SS].base;
2527 // push_size = (4 << shift);
2528 new_stack = 0;
2529 }
2530
2531 if (shift) {
2532 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2533 PUSHL(ssp, sp, sp_mask, next_eip);
2534 } else {
2535 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2536 PUSHW(ssp, sp, sp_mask, next_eip);
2537 }
2538
2539 /* from this point, not restartable */
2540
2541 if (new_stack) {
2542 ss = (ss & ~3) | dpl;
2543 cpu_x86_load_seg_cache(env, R_SS, ss,
2544 ssp,
2545 get_seg_limit(ss_e1, ss_e2),
2546 ss_e2);
2547 }
2548
2549 selector = (selector & ~3) | dpl;
2550 cpu_x86_load_seg_cache(env, R_CS, selector,
2551 get_seg_base(e1, e2),
2552 get_seg_limit(e1, e2),
2553 e2);
2554 cpu_x86_set_cpl(env, dpl);
2555 SET_ESP(sp, sp_mask);
2556 EIP = offset;
2557 }
2558#ifdef USE_KQEMU
2559 if (kqemu_is_ok(env)) {
2560 env->exception_index = -1;
2561 cpu_loop_exit();
2562 }
2563#endif
2564}
2565
2566/* real and vm86 mode iret */
2567void helper_iret_real(int shift)
2568{
2569 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2570 target_ulong ssp;
2571 int eflags_mask;
2572
2573 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2574 sp = ESP;
2575 ssp = env->segs[R_SS].base;
2576 if (shift == 1) {
2577 /* 32 bits */
2578 POPL(ssp, sp, sp_mask, new_eip);
2579 POPL(ssp, sp, sp_mask, new_cs);
2580 new_cs &= 0xffff;
2581 POPL(ssp, sp, sp_mask, new_eflags);
2582 } else {
2583 /* 16 bits */
2584 POPW(ssp, sp, sp_mask, new_eip);
2585 POPW(ssp, sp, sp_mask, new_cs);
2586 POPW(ssp, sp, sp_mask, new_eflags);
2587 }
2588 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2589 load_seg_vm(R_CS, new_cs);
2590 env->eip = new_eip;
2591 if (env->eflags & VM_MASK)
2592 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2593 else
2594 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2595 if (shift == 0)
2596 eflags_mask &= 0xffff;
2597 load_eflags(new_eflags, eflags_mask);
2598 env->hflags &= ~HF_NMI_MASK;
2599}
2600
2601static inline void validate_seg(int seg_reg, int cpl)
2602{
2603 int dpl;
2604 uint32_t e2;
2605
2606 /* XXX: on x86_64, we do not want to nullify FS and GS because
2607 they may still contain a valid base. I would be interested to
2608 know how a real x86_64 CPU behaves */
2609 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2610 (env->segs[seg_reg].selector & 0xfffc) == 0)
2611 return;
2612
2613 e2 = env->segs[seg_reg].flags;
2614 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2615 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2616 /* data or non conforming code segment */
2617 if (dpl < cpl) {
2618 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2619 }
2620 }
2621}
2622
2623/* protected mode iret */
2624static inline void helper_ret_protected(int shift, int is_iret, int addend)
2625{
2626 uint32_t new_cs, new_eflags, new_ss;
2627 uint32_t new_es, new_ds, new_fs, new_gs;
2628 uint32_t e1, e2, ss_e1, ss_e2;
2629 int cpl, dpl, rpl, eflags_mask, iopl;
2630 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2631
2632#ifdef TARGET_X86_64
2633 if (shift == 2)
2634 sp_mask = -1;
2635 else
2636#endif
2637 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2638 sp = ESP;
2639 ssp = env->segs[R_SS].base;
2640 new_eflags = 0; /* avoid warning */
2641#ifdef TARGET_X86_64
2642 if (shift == 2) {
2643 POPQ(sp, new_eip);
2644 POPQ(sp, new_cs);
2645 new_cs &= 0xffff;
2646 if (is_iret) {
2647 POPQ(sp, new_eflags);
2648 }
2649 } else
2650#endif
2651 if (shift == 1) {
2652 /* 32 bits */
2653 POPL(ssp, sp, sp_mask, new_eip);
2654 POPL(ssp, sp, sp_mask, new_cs);
2655 new_cs &= 0xffff;
2656 if (is_iret) {
2657 POPL(ssp, sp, sp_mask, new_eflags);
2658 if (new_eflags & VM_MASK)
2659 goto return_to_vm86;
2660 }
2661 } else {
2662 /* 16 bits */
2663 POPW(ssp, sp, sp_mask, new_eip);
2664 POPW(ssp, sp, sp_mask, new_cs);
2665 if (is_iret)
2666 POPW(ssp, sp, sp_mask, new_eflags);
2667 }
2668#ifdef DEBUG_PCALL
2669 if (loglevel & CPU_LOG_PCALL) {
2670 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2671 new_cs, new_eip, shift, addend);
2672 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2673 }
2674#endif
2675 if ((new_cs & 0xfffc) == 0)
2676 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2677 if (load_segment(&e1, &e2, new_cs) != 0)
2678 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2679 if (!(e2 & DESC_S_MASK) ||
2680 !(e2 & DESC_CS_MASK))
2681 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682 cpl = env->hflags & HF_CPL_MASK;
2683 rpl = new_cs & 3;
2684 if (rpl < cpl)
2685 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2686 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2687 if (e2 & DESC_C_MASK) {
2688 if (dpl > rpl)
2689 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2690 } else {
2691 if (dpl != rpl)
2692 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2693 }
2694 if (!(e2 & DESC_P_MASK))
2695 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2696
2697 sp += addend;
2698 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2699 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2700 /* return to same priledge level */
2701 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2702 get_seg_base(e1, e2),
2703 get_seg_limit(e1, e2),
2704 e2);
2705 } else {
2706 /* return to different privilege level */
2707#ifdef TARGET_X86_64
2708 if (shift == 2) {
2709 POPQ(sp, new_esp);
2710 POPQ(sp, new_ss);
2711 new_ss &= 0xffff;
2712 } else
2713#endif
2714 if (shift == 1) {
2715 /* 32 bits */
2716 POPL(ssp, sp, sp_mask, new_esp);
2717 POPL(ssp, sp, sp_mask, new_ss);
2718 new_ss &= 0xffff;
2719 } else {
2720 /* 16 bits */
2721 POPW(ssp, sp, sp_mask, new_esp);
2722 POPW(ssp, sp, sp_mask, new_ss);
2723 }
2724#ifdef DEBUG_PCALL
2725 if (loglevel & CPU_LOG_PCALL) {
2726 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2727 new_ss, new_esp);
2728 }
2729#endif
2730 if ((new_ss & 0xfffc) == 0) {
2731#ifdef TARGET_X86_64
2732 /* NULL ss is allowed in long mode if cpl != 3*/
2733 /* XXX: test CS64 ? */
2734 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2735 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2736 0, 0xffffffff,
2737 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2738 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2739 DESC_W_MASK | DESC_A_MASK);
2740 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2741 } else
2742#endif
2743 {
2744 raise_exception_err(EXCP0D_GPF, 0);
2745 }
2746 } else {
2747 if ((new_ss & 3) != rpl)
2748 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2749 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2750 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2751 if (!(ss_e2 & DESC_S_MASK) ||
2752 (ss_e2 & DESC_CS_MASK) ||
2753 !(ss_e2 & DESC_W_MASK))
2754 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2755 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2756 if (dpl != rpl)
2757 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2758 if (!(ss_e2 & DESC_P_MASK))
2759 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2760 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2761 get_seg_base(ss_e1, ss_e2),
2762 get_seg_limit(ss_e1, ss_e2),
2763 ss_e2);
2764 }
2765
2766 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2767 get_seg_base(e1, e2),
2768 get_seg_limit(e1, e2),
2769 e2);
2770 cpu_x86_set_cpl(env, rpl);
2771 sp = new_esp;
2772#ifdef TARGET_X86_64
2773 if (env->hflags & HF_CS64_MASK)
2774 sp_mask = -1;
2775 else
2776#endif
2777 sp_mask = get_sp_mask(ss_e2);
2778
2779 /* validate data segments */
2780 validate_seg(R_ES, rpl);
2781 validate_seg(R_DS, rpl);
2782 validate_seg(R_FS, rpl);
2783 validate_seg(R_GS, rpl);
2784
2785 sp += addend;
2786 }
2787 SET_ESP(sp, sp_mask);
2788 env->eip = new_eip;
2789 if (is_iret) {
2790 /* NOTE: 'cpl' is the _old_ CPL */
2791 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2792 if (cpl == 0)
2793 eflags_mask |= IOPL_MASK;
2794 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2795 if (cpl <= iopl)
2796 eflags_mask |= IF_MASK;
2797 if (shift == 0)
2798 eflags_mask &= 0xffff;
2799 load_eflags(new_eflags, eflags_mask);
2800 }
2801 return;
2802
2803 return_to_vm86:
2804 POPL(ssp, sp, sp_mask, new_esp);
2805 POPL(ssp, sp, sp_mask, new_ss);
2806 POPL(ssp, sp, sp_mask, new_es);
2807 POPL(ssp, sp, sp_mask, new_ds);
2808 POPL(ssp, sp, sp_mask, new_fs);
2809 POPL(ssp, sp, sp_mask, new_gs);
2810
2811 /* modify processor state */
2812 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2813 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2814 load_seg_vm(R_CS, new_cs & 0xffff);
2815 cpu_x86_set_cpl(env, 3);
2816 load_seg_vm(R_SS, new_ss & 0xffff);
2817 load_seg_vm(R_ES, new_es & 0xffff);
2818 load_seg_vm(R_DS, new_ds & 0xffff);
2819 load_seg_vm(R_FS, new_fs & 0xffff);
2820 load_seg_vm(R_GS, new_gs & 0xffff);
2821
2822 env->eip = new_eip & 0xffff;
2823 ESP = new_esp;
2824}
2825
2826void helper_iret_protected(int shift, int next_eip)
2827{
2828 int tss_selector, type;
2829 uint32_t e1, e2;
2830
2831 /* specific case for TSS */
2832 if (env->eflags & NT_MASK) {
2833#ifdef TARGET_X86_64
2834 if (env->hflags & HF_LMA_MASK)
2835 raise_exception_err(EXCP0D_GPF, 0);
2836#endif
2837 tss_selector = lduw_kernel(env->tr.base + 0);
2838 if (tss_selector & 4)
2839 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2840 if (load_segment(&e1, &e2, tss_selector) != 0)
2841 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2842 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2843 /* NOTE: we check both segment and busy TSS */
2844 if (type != 3)
2845 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2846 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2847 } else {
2848 helper_ret_protected(shift, 1, 0);
2849 }
2850 env->hflags &= ~HF_NMI_MASK;
2851#ifdef USE_KQEMU
2852 if (kqemu_is_ok(env)) {
2853 CC_OP = CC_OP_EFLAGS;
2854 env->exception_index = -1;
2855 cpu_loop_exit();
2856 }
2857#endif
2858}
2859
2860void helper_lret_protected(int shift, int addend)
2861{
2862 helper_ret_protected(shift, 0, addend);
2863#ifdef USE_KQEMU
2864 if (kqemu_is_ok(env)) {
2865 env->exception_index = -1;
2866 cpu_loop_exit();
2867 }
2868#endif
2869}
2870
2871void helper_sysenter(void)
2872{
2873 if (env->sysenter_cs == 0) {
2874 raise_exception_err(EXCP0D_GPF, 0);
2875 }
2876 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2877 cpu_x86_set_cpl(env, 0);
2878 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2879 0, 0xffffffff,
2880 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2881 DESC_S_MASK |
2882 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2883 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2884 0, 0xffffffff,
2885 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2886 DESC_S_MASK |
2887 DESC_W_MASK | DESC_A_MASK);
2888 ESP = env->sysenter_esp;
2889 EIP = env->sysenter_eip;
2890}
2891
2892void helper_sysexit(void)
2893{
2894 int cpl;
2895
2896 cpl = env->hflags & HF_CPL_MASK;
2897 if (env->sysenter_cs == 0 || cpl != 0) {
2898 raise_exception_err(EXCP0D_GPF, 0);
2899 }
2900 cpu_x86_set_cpl(env, 3);
2901 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2902 0, 0xffffffff,
2903 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2904 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2905 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2906 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2907 0, 0xffffffff,
2908 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2909 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2910 DESC_W_MASK | DESC_A_MASK);
2911 ESP = ECX;
2912 EIP = EDX;
2913#ifdef USE_KQEMU
2914 if (kqemu_is_ok(env)) {
2915 env->exception_index = -1;
2916 cpu_loop_exit();
2917 }
2918#endif
2919}
2920
872929aa
FB
2921#if defined(CONFIG_USER_ONLY)
2922target_ulong helper_read_crN(int reg)
eaa728ee 2923{
872929aa
FB
2924 return 0;
2925}
2926
2927void helper_write_crN(int reg, target_ulong t0)
2928{
2929}
2930#else
2931target_ulong helper_read_crN(int reg)
2932{
2933 target_ulong val;
2934
2935 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2936 switch(reg) {
2937 default:
2938 val = env->cr[reg];
2939 break;
2940 case 8:
2941 val = cpu_get_apic_tpr(env);
2942 break;
2943 }
2944 return val;
2945}
2946
2947void helper_write_crN(int reg, target_ulong t0)
2948{
2949 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2950 switch(reg) {
2951 case 0:
2952 cpu_x86_update_cr0(env, t0);
2953 break;
2954 case 3:
2955 cpu_x86_update_cr3(env, t0);
2956 break;
2957 case 4:
2958 cpu_x86_update_cr4(env, t0);
2959 break;
2960 case 8:
2961 cpu_set_apic_tpr(env, t0);
2962 env->cr[8] = t0;
2963 break;
2964 default:
2965 env->cr[reg] = t0;
2966 break;
2967 }
eaa728ee 2968}
872929aa 2969#endif
eaa728ee
FB
2970
2971void helper_lmsw(target_ulong t0)
2972{
2973 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2974 if already set to one. */
2975 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2976 helper_write_crN(0, t0);
eaa728ee
FB
2977}
2978
2979void helper_clts(void)
2980{
2981 env->cr[0] &= ~CR0_TS_MASK;
2982 env->hflags &= ~HF_TS_MASK;
2983}
2984
2985#if !defined(CONFIG_USER_ONLY)
2986target_ulong helper_movtl_T0_cr8(void)
2987{
2988 return cpu_get_apic_tpr(env);
2989}
2990#endif
2991
2992/* XXX: do more */
2993void helper_movl_drN_T0(int reg, target_ulong t0)
2994{
2995 env->dr[reg] = t0;
2996}
2997
2998void helper_invlpg(target_ulong addr)
2999{
872929aa 3000 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
eaa728ee
FB
3001 cpu_x86_flush_tlb(env, addr);
3002}
3003
3004void helper_rdtsc(void)
3005{
3006 uint64_t val;
3007
3008 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3009 raise_exception(EXCP0D_GPF);
3010 }
872929aa
FB
3011 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3012
eaa728ee
FB
3013 val = cpu_get_tsc(env);
3014 EAX = (uint32_t)(val);
3015 EDX = (uint32_t)(val >> 32);
3016}
3017
3018void helper_rdpmc(void)
3019{
3020 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3021 raise_exception(EXCP0D_GPF);
3022 }
eaa728ee
FB
3023 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3024
3025 /* currently unimplemented */
3026 raise_exception_err(EXCP06_ILLOP, 0);
3027}
3028
3029#if defined(CONFIG_USER_ONLY)
3030void helper_wrmsr(void)
3031{
3032}
3033
3034void helper_rdmsr(void)
3035{
3036}
3037#else
3038void helper_wrmsr(void)
3039{
3040 uint64_t val;
3041
872929aa
FB
3042 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3043
eaa728ee
FB
3044 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3045
3046 switch((uint32_t)ECX) {
3047 case MSR_IA32_SYSENTER_CS:
3048 env->sysenter_cs = val & 0xffff;
3049 break;
3050 case MSR_IA32_SYSENTER_ESP:
3051 env->sysenter_esp = val;
3052 break;
3053 case MSR_IA32_SYSENTER_EIP:
3054 env->sysenter_eip = val;
3055 break;
3056 case MSR_IA32_APICBASE:
3057 cpu_set_apic_base(env, val);
3058 break;
3059 case MSR_EFER:
3060 {
3061 uint64_t update_mask;
3062 update_mask = 0;
3063 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3064 update_mask |= MSR_EFER_SCE;
3065 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3066 update_mask |= MSR_EFER_LME;
3067 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3068 update_mask |= MSR_EFER_FFXSR;
3069 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3070 update_mask |= MSR_EFER_NXE;
3071 env->efer = (env->efer & ~update_mask) |
3072 (val & update_mask);
3073 }
3074 break;
3075 case MSR_STAR:
3076 env->star = val;
3077 break;
3078 case MSR_PAT:
3079 env->pat = val;
3080 break;
3081 case MSR_VM_HSAVE_PA:
3082 env->vm_hsave = val;
3083 break;
3084#ifdef TARGET_X86_64
3085 case MSR_LSTAR:
3086 env->lstar = val;
3087 break;
3088 case MSR_CSTAR:
3089 env->cstar = val;
3090 break;
3091 case MSR_FMASK:
3092 env->fmask = val;
3093 break;
3094 case MSR_FSBASE:
3095 env->segs[R_FS].base = val;
3096 break;
3097 case MSR_GSBASE:
3098 env->segs[R_GS].base = val;
3099 break;
3100 case MSR_KERNELGSBASE:
3101 env->kernelgsbase = val;
3102 break;
3103#endif
3104 default:
3105 /* XXX: exception ? */
3106 break;
3107 }
3108}
3109
3110void helper_rdmsr(void)
3111{
3112 uint64_t val;
872929aa
FB
3113
3114 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3115
eaa728ee
FB
3116 switch((uint32_t)ECX) {
3117 case MSR_IA32_SYSENTER_CS:
3118 val = env->sysenter_cs;
3119 break;
3120 case MSR_IA32_SYSENTER_ESP:
3121 val = env->sysenter_esp;
3122 break;
3123 case MSR_IA32_SYSENTER_EIP:
3124 val = env->sysenter_eip;
3125 break;
3126 case MSR_IA32_APICBASE:
3127 val = cpu_get_apic_base(env);
3128 break;
3129 case MSR_EFER:
3130 val = env->efer;
3131 break;
3132 case MSR_STAR:
3133 val = env->star;
3134 break;
3135 case MSR_PAT:
3136 val = env->pat;
3137 break;
3138 case MSR_VM_HSAVE_PA:
3139 val = env->vm_hsave;
3140 break;
3141#ifdef TARGET_X86_64
3142 case MSR_LSTAR:
3143 val = env->lstar;
3144 break;
3145 case MSR_CSTAR:
3146 val = env->cstar;
3147 break;
3148 case MSR_FMASK:
3149 val = env->fmask;
3150 break;
3151 case MSR_FSBASE:
3152 val = env->segs[R_FS].base;
3153 break;
3154 case MSR_GSBASE:
3155 val = env->segs[R_GS].base;
3156 break;
3157 case MSR_KERNELGSBASE:
3158 val = env->kernelgsbase;
3159 break;
3160#endif
3161 default:
3162 /* XXX: exception ? */
3163 val = 0;
3164 break;
3165 }
3166 EAX = (uint32_t)(val);
3167 EDX = (uint32_t)(val >> 32);
3168}
3169#endif
3170
3171target_ulong helper_lsl(target_ulong selector1)
3172{
3173 unsigned int limit;
3174 uint32_t e1, e2, eflags, selector;
3175 int rpl, dpl, cpl, type;
3176
3177 selector = selector1 & 0xffff;
3178 eflags = cc_table[CC_OP].compute_all();
3179 if (load_segment(&e1, &e2, selector) != 0)
3180 goto fail;
3181 rpl = selector & 3;
3182 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3183 cpl = env->hflags & HF_CPL_MASK;
3184 if (e2 & DESC_S_MASK) {
3185 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3186 /* conforming */
3187 } else {
3188 if (dpl < cpl || dpl < rpl)
3189 goto fail;
3190 }
3191 } else {
3192 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3193 switch(type) {
3194 case 1:
3195 case 2:
3196 case 3:
3197 case 9:
3198 case 11:
3199 break;
3200 default:
3201 goto fail;
3202 }
3203 if (dpl < cpl || dpl < rpl) {
3204 fail:
3205 CC_SRC = eflags & ~CC_Z;
3206 return 0;
3207 }
3208 }
3209 limit = get_seg_limit(e1, e2);
3210 CC_SRC = eflags | CC_Z;
3211 return limit;
3212}
3213
3214target_ulong helper_lar(target_ulong selector1)
3215{
3216 uint32_t e1, e2, eflags, selector;
3217 int rpl, dpl, cpl, type;
3218
3219 selector = selector1 & 0xffff;
3220 eflags = cc_table[CC_OP].compute_all();
3221 if ((selector & 0xfffc) == 0)
3222 goto fail;
3223 if (load_segment(&e1, &e2, selector) != 0)
3224 goto fail;
3225 rpl = selector & 3;
3226 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3227 cpl = env->hflags & HF_CPL_MASK;
3228 if (e2 & DESC_S_MASK) {
3229 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3230 /* conforming */
3231 } else {
3232 if (dpl < cpl || dpl < rpl)
3233 goto fail;
3234 }
3235 } else {
3236 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3237 switch(type) {
3238 case 1:
3239 case 2:
3240 case 3:
3241 case 4:
3242 case 5:
3243 case 9:
3244 case 11:
3245 case 12:
3246 break;
3247 default:
3248 goto fail;
3249 }
3250 if (dpl < cpl || dpl < rpl) {
3251 fail:
3252 CC_SRC = eflags & ~CC_Z;
3253 return 0;
3254 }
3255 }
3256 CC_SRC = eflags | CC_Z;
3257 return e2 & 0x00f0ff00;
3258}
3259
3260void helper_verr(target_ulong selector1)
3261{
3262 uint32_t e1, e2, eflags, selector;
3263 int rpl, dpl, cpl;
3264
3265 selector = selector1 & 0xffff;
3266 eflags = cc_table[CC_OP].compute_all();
3267 if ((selector & 0xfffc) == 0)
3268 goto fail;
3269 if (load_segment(&e1, &e2, selector) != 0)
3270 goto fail;
3271 if (!(e2 & DESC_S_MASK))
3272 goto fail;
3273 rpl = selector & 3;
3274 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3275 cpl = env->hflags & HF_CPL_MASK;
3276 if (e2 & DESC_CS_MASK) {
3277 if (!(e2 & DESC_R_MASK))
3278 goto fail;
3279 if (!(e2 & DESC_C_MASK)) {
3280 if (dpl < cpl || dpl < rpl)
3281 goto fail;
3282 }
3283 } else {
3284 if (dpl < cpl || dpl < rpl) {
3285 fail:
3286 CC_SRC = eflags & ~CC_Z;
3287 return;
3288 }
3289 }
3290 CC_SRC = eflags | CC_Z;
3291}
3292
3293void helper_verw(target_ulong selector1)
3294{
3295 uint32_t e1, e2, eflags, selector;
3296 int rpl, dpl, cpl;
3297
3298 selector = selector1 & 0xffff;
3299 eflags = cc_table[CC_OP].compute_all();
3300 if ((selector & 0xfffc) == 0)
3301 goto fail;
3302 if (load_segment(&e1, &e2, selector) != 0)
3303 goto fail;
3304 if (!(e2 & DESC_S_MASK))
3305 goto fail;
3306 rpl = selector & 3;
3307 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3308 cpl = env->hflags & HF_CPL_MASK;
3309 if (e2 & DESC_CS_MASK) {
3310 goto fail;
3311 } else {
3312 if (dpl < cpl || dpl < rpl)
3313 goto fail;
3314 if (!(e2 & DESC_W_MASK)) {
3315 fail:
3316 CC_SRC = eflags & ~CC_Z;
3317 return;
3318 }
3319 }
3320 CC_SRC = eflags | CC_Z;
3321}
3322
3323/* x87 FPU helpers */
3324
3325static void fpu_set_exception(int mask)
3326{
3327 env->fpus |= mask;
3328 if (env->fpus & (~env->fpuc & FPUC_EM))
3329 env->fpus |= FPUS_SE | FPUS_B;
3330}
3331
3332static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3333{
3334 if (b == 0.0)
3335 fpu_set_exception(FPUS_ZE);
3336 return a / b;
3337}
3338
3339void fpu_raise_exception(void)
3340{
3341 if (env->cr[0] & CR0_NE_MASK) {
3342 raise_exception(EXCP10_COPR);
3343 }
3344#if !defined(CONFIG_USER_ONLY)
3345 else {
3346 cpu_set_ferr(env);
3347 }
3348#endif
3349}
3350
3351void helper_flds_FT0(uint32_t val)
3352{
3353 union {
3354 float32 f;
3355 uint32_t i;
3356 } u;
3357 u.i = val;
3358 FT0 = float32_to_floatx(u.f, &env->fp_status);
3359}
3360
3361void helper_fldl_FT0(uint64_t val)
3362{
3363 union {
3364 float64 f;
3365 uint64_t i;
3366 } u;
3367 u.i = val;
3368 FT0 = float64_to_floatx(u.f, &env->fp_status);
3369}
3370
3371void helper_fildl_FT0(int32_t val)
3372{
3373 FT0 = int32_to_floatx(val, &env->fp_status);
3374}
3375
3376void helper_flds_ST0(uint32_t val)
3377{
3378 int new_fpstt;
3379 union {
3380 float32 f;
3381 uint32_t i;
3382 } u;
3383 new_fpstt = (env->fpstt - 1) & 7;
3384 u.i = val;
3385 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3386 env->fpstt = new_fpstt;
3387 env->fptags[new_fpstt] = 0; /* validate stack entry */
3388}
3389
3390void helper_fldl_ST0(uint64_t val)
3391{
3392 int new_fpstt;
3393 union {
3394 float64 f;
3395 uint64_t i;
3396 } u;
3397 new_fpstt = (env->fpstt - 1) & 7;
3398 u.i = val;
3399 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3400 env->fpstt = new_fpstt;
3401 env->fptags[new_fpstt] = 0; /* validate stack entry */
3402}
3403
3404void helper_fildl_ST0(int32_t val)
3405{
3406 int new_fpstt;
3407 new_fpstt = (env->fpstt - 1) & 7;
3408 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3409 env->fpstt = new_fpstt;
3410 env->fptags[new_fpstt] = 0; /* validate stack entry */
3411}
3412
3413void helper_fildll_ST0(int64_t val)
3414{
3415 int new_fpstt;
3416 new_fpstt = (env->fpstt - 1) & 7;
3417 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3418 env->fpstt = new_fpstt;
3419 env->fptags[new_fpstt] = 0; /* validate stack entry */
3420}
3421
3422uint32_t helper_fsts_ST0(void)
3423{
3424 union {
3425 float32 f;
3426 uint32_t i;
3427 } u;
3428 u.f = floatx_to_float32(ST0, &env->fp_status);
3429 return u.i;
3430}
3431
3432uint64_t helper_fstl_ST0(void)
3433{
3434 union {
3435 float64 f;
3436 uint64_t i;
3437 } u;
3438 u.f = floatx_to_float64(ST0, &env->fp_status);
3439 return u.i;
3440}
3441
3442int32_t helper_fist_ST0(void)
3443{
3444 int32_t val;
3445 val = floatx_to_int32(ST0, &env->fp_status);
3446 if (val != (int16_t)val)
3447 val = -32768;
3448 return val;
3449}
3450
3451int32_t helper_fistl_ST0(void)
3452{
3453 int32_t val;
3454 val = floatx_to_int32(ST0, &env->fp_status);
3455 return val;
3456}
3457
3458int64_t helper_fistll_ST0(void)
3459{
3460 int64_t val;
3461 val = floatx_to_int64(ST0, &env->fp_status);
3462 return val;
3463}
3464
3465int32_t helper_fistt_ST0(void)
3466{
3467 int32_t val;
3468 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3469 if (val != (int16_t)val)
3470 val = -32768;
3471 return val;
3472}
3473
3474int32_t helper_fisttl_ST0(void)
3475{
3476 int32_t val;
3477 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3478 return val;
3479}
3480
3481int64_t helper_fisttll_ST0(void)
3482{
3483 int64_t val;
3484 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3485 return val;
3486}
3487
3488void helper_fldt_ST0(target_ulong ptr)
3489{
3490 int new_fpstt;
3491 new_fpstt = (env->fpstt - 1) & 7;
3492 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3493 env->fpstt = new_fpstt;
3494 env->fptags[new_fpstt] = 0; /* validate stack entry */
3495}
3496
3497void helper_fstt_ST0(target_ulong ptr)
3498{
3499 helper_fstt(ST0, ptr);
3500}
3501
3502void helper_fpush(void)
3503{
3504 fpush();
3505}
3506
3507void helper_fpop(void)
3508{
3509 fpop();
3510}
3511
3512void helper_fdecstp(void)
3513{
3514 env->fpstt = (env->fpstt - 1) & 7;
3515 env->fpus &= (~0x4700);
3516}
3517
3518void helper_fincstp(void)
3519{
3520 env->fpstt = (env->fpstt + 1) & 7;
3521 env->fpus &= (~0x4700);
3522}
3523
3524/* FPU move */
3525
3526void helper_ffree_STN(int st_index)
3527{
3528 env->fptags[(env->fpstt + st_index) & 7] = 1;
3529}
3530
3531void helper_fmov_ST0_FT0(void)
3532{
3533 ST0 = FT0;
3534}
3535
3536void helper_fmov_FT0_STN(int st_index)
3537{
3538 FT0 = ST(st_index);
3539}
3540
3541void helper_fmov_ST0_STN(int st_index)
3542{
3543 ST0 = ST(st_index);
3544}
3545
3546void helper_fmov_STN_ST0(int st_index)
3547{
3548 ST(st_index) = ST0;
3549}
3550
3551void helper_fxchg_ST0_STN(int st_index)
3552{
3553 CPU86_LDouble tmp;
3554 tmp = ST(st_index);
3555 ST(st_index) = ST0;
3556 ST0 = tmp;
3557}
3558
3559/* FPU operations */
3560
3561static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3562
3563void helper_fcom_ST0_FT0(void)
3564{
3565 int ret;
3566
3567 ret = floatx_compare(ST0, FT0, &env->fp_status);
3568 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3569 FORCE_RET();
3570}
3571
3572void helper_fucom_ST0_FT0(void)
3573{
3574 int ret;
3575
3576 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3577 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3578 FORCE_RET();
3579}
3580
3581static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3582
3583void helper_fcomi_ST0_FT0(void)
3584{
3585 int eflags;
3586 int ret;
3587
3588 ret = floatx_compare(ST0, FT0, &env->fp_status);
3589 eflags = cc_table[CC_OP].compute_all();
3590 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3591 CC_SRC = eflags;
3592 FORCE_RET();
3593}
3594
3595void helper_fucomi_ST0_FT0(void)
3596{
3597 int eflags;
3598 int ret;
3599
3600 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3601 eflags = cc_table[CC_OP].compute_all();
3602 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3603 CC_SRC = eflags;
3604 FORCE_RET();
3605}
3606
3607void helper_fadd_ST0_FT0(void)
3608{
3609 ST0 += FT0;
3610}
3611
3612void helper_fmul_ST0_FT0(void)
3613{
3614 ST0 *= FT0;
3615}
3616
3617void helper_fsub_ST0_FT0(void)
3618{
3619 ST0 -= FT0;
3620}
3621
3622void helper_fsubr_ST0_FT0(void)
3623{
3624 ST0 = FT0 - ST0;
3625}
3626
3627void helper_fdiv_ST0_FT0(void)
3628{
3629 ST0 = helper_fdiv(ST0, FT0);
3630}
3631
3632void helper_fdivr_ST0_FT0(void)
3633{
3634 ST0 = helper_fdiv(FT0, ST0);
3635}
3636
3637/* fp operations between STN and ST0 */
3638
3639void helper_fadd_STN_ST0(int st_index)
3640{
3641 ST(st_index) += ST0;
3642}
3643
3644void helper_fmul_STN_ST0(int st_index)
3645{
3646 ST(st_index) *= ST0;
3647}
3648
3649void helper_fsub_STN_ST0(int st_index)
3650{
3651 ST(st_index) -= ST0;
3652}
3653
3654void helper_fsubr_STN_ST0(int st_index)
3655{
3656 CPU86_LDouble *p;
3657 p = &ST(st_index);
3658 *p = ST0 - *p;
3659}
3660
3661void helper_fdiv_STN_ST0(int st_index)
3662{
3663 CPU86_LDouble *p;
3664 p = &ST(st_index);
3665 *p = helper_fdiv(*p, ST0);
3666}
3667
3668void helper_fdivr_STN_ST0(int st_index)
3669{
3670 CPU86_LDouble *p;
3671 p = &ST(st_index);
3672 *p = helper_fdiv(ST0, *p);
3673}
3674
3675/* misc FPU operations */
3676void helper_fchs_ST0(void)
3677{
3678 ST0 = floatx_chs(ST0);
3679}
3680
3681void helper_fabs_ST0(void)
3682{
3683 ST0 = floatx_abs(ST0);
3684}
3685
3686void helper_fld1_ST0(void)
3687{
3688 ST0 = f15rk[1];
3689}
3690
3691void helper_fldl2t_ST0(void)
3692{
3693 ST0 = f15rk[6];
3694}
3695
3696void helper_fldl2e_ST0(void)
3697{
3698 ST0 = f15rk[5];
3699}
3700
3701void helper_fldpi_ST0(void)
3702{
3703 ST0 = f15rk[2];
3704}
3705
3706void helper_fldlg2_ST0(void)
3707{
3708 ST0 = f15rk[3];
3709}
3710
3711void helper_fldln2_ST0(void)
3712{
3713 ST0 = f15rk[4];
3714}
3715
3716void helper_fldz_ST0(void)
3717{
3718 ST0 = f15rk[0];
3719}
3720
3721void helper_fldz_FT0(void)
3722{
3723 FT0 = f15rk[0];
3724}
3725
3726uint32_t helper_fnstsw(void)
3727{
3728 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3729}
3730
3731uint32_t helper_fnstcw(void)
3732{
3733 return env->fpuc;
3734}
3735
3736static void update_fp_status(void)
3737{
3738 int rnd_type;
3739
3740 /* set rounding mode */
3741 switch(env->fpuc & RC_MASK) {
3742 default:
3743 case RC_NEAR:
3744 rnd_type = float_round_nearest_even;
3745 break;
3746 case RC_DOWN:
3747 rnd_type = float_round_down;
3748 break;
3749 case RC_UP:
3750 rnd_type = float_round_up;
3751 break;
3752 case RC_CHOP:
3753 rnd_type = float_round_to_zero;
3754 break;
3755 }
3756 set_float_rounding_mode(rnd_type, &env->fp_status);
3757#ifdef FLOATX80
3758 switch((env->fpuc >> 8) & 3) {
3759 case 0:
3760 rnd_type = 32;
3761 break;
3762 case 2:
3763 rnd_type = 64;
3764 break;
3765 case 3:
3766 default:
3767 rnd_type = 80;
3768 break;
3769 }
3770 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3771#endif
3772}
3773
3774void helper_fldcw(uint32_t val)
3775{
3776 env->fpuc = val;
3777 update_fp_status();
3778}
3779
3780void helper_fclex(void)
3781{
3782 env->fpus &= 0x7f00;
3783}
3784
3785void helper_fwait(void)
3786{
3787 if (env->fpus & FPUS_SE)
3788 fpu_raise_exception();
3789 FORCE_RET();
3790}
3791
3792void helper_fninit(void)
3793{
3794 env->fpus = 0;
3795 env->fpstt = 0;
3796 env->fpuc = 0x37f;
3797 env->fptags[0] = 1;
3798 env->fptags[1] = 1;
3799 env->fptags[2] = 1;
3800 env->fptags[3] = 1;
3801 env->fptags[4] = 1;
3802 env->fptags[5] = 1;
3803 env->fptags[6] = 1;
3804 env->fptags[7] = 1;
3805}
3806
3807/* BCD ops */
3808
3809void helper_fbld_ST0(target_ulong ptr)
3810{
3811 CPU86_LDouble tmp;
3812 uint64_t val;
3813 unsigned int v;
3814 int i;
3815
3816 val = 0;
3817 for(i = 8; i >= 0; i--) {
3818 v = ldub(ptr + i);
3819 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3820 }
3821 tmp = val;
3822 if (ldub(ptr + 9) & 0x80)
3823 tmp = -tmp;
3824 fpush();
3825 ST0 = tmp;
3826}
3827
3828void helper_fbst_ST0(target_ulong ptr)
3829{
3830 int v;
3831 target_ulong mem_ref, mem_end;
3832 int64_t val;
3833
3834 val = floatx_to_int64(ST0, &env->fp_status);
3835 mem_ref = ptr;
3836 mem_end = mem_ref + 9;
3837 if (val < 0) {
3838 stb(mem_end, 0x80);
3839 val = -val;
3840 } else {
3841 stb(mem_end, 0x00);
3842 }
3843 while (mem_ref < mem_end) {
3844 if (val == 0)
3845 break;
3846 v = val % 100;
3847 val = val / 100;
3848 v = ((v / 10) << 4) | (v % 10);
3849 stb(mem_ref++, v);
3850 }
3851 while (mem_ref < mem_end) {
3852 stb(mem_ref++, 0);
3853 }
3854}
3855
3856void helper_f2xm1(void)
3857{
3858 ST0 = pow(2.0,ST0) - 1.0;
3859}
3860
3861void helper_fyl2x(void)
3862{
3863 CPU86_LDouble fptemp;
3864
3865 fptemp = ST0;
3866 if (fptemp>0.0){
3867 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3868 ST1 *= fptemp;
3869 fpop();
3870 } else {
3871 env->fpus &= (~0x4700);
3872 env->fpus |= 0x400;
3873 }
3874}
3875
3876void helper_fptan(void)
3877{
3878 CPU86_LDouble fptemp;
3879
3880 fptemp = ST0;
3881 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3882 env->fpus |= 0x400;
3883 } else {
3884 ST0 = tan(fptemp);
3885 fpush();
3886 ST0 = 1.0;
3887 env->fpus &= (~0x400); /* C2 <-- 0 */
3888 /* the above code is for |arg| < 2**52 only */
3889 }
3890}
3891
3892void helper_fpatan(void)
3893{
3894 CPU86_LDouble fptemp, fpsrcop;
3895
3896 fpsrcop = ST1;
3897 fptemp = ST0;
3898 ST1 = atan2(fpsrcop,fptemp);
3899 fpop();
3900}
3901
3902void helper_fxtract(void)
3903{
3904 CPU86_LDoubleU temp;
3905 unsigned int expdif;
3906
3907 temp.d = ST0;
3908 expdif = EXPD(temp) - EXPBIAS;
3909 /*DP exponent bias*/
3910 ST0 = expdif;
3911 fpush();
3912 BIASEXPONENT(temp);
3913 ST0 = temp.d;
3914}
3915
3916void helper_fprem1(void)
3917{
3918 CPU86_LDouble dblq, fpsrcop, fptemp;
3919 CPU86_LDoubleU fpsrcop1, fptemp1;
3920 int expdif;
3921 signed long long int q;
3922
3923 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3924 ST0 = 0.0 / 0.0; /* NaN */
3925 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3926 return;
3927 }
3928
3929 fpsrcop = ST0;
3930 fptemp = ST1;
3931 fpsrcop1.d = fpsrcop;
3932 fptemp1.d = fptemp;
3933 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3934
3935 if (expdif < 0) {
3936 /* optimisation? taken from the AMD docs */
3937 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3938 /* ST0 is unchanged */
3939 return;
3940 }
3941
3942 if (expdif < 53) {
3943 dblq = fpsrcop / fptemp;
3944 /* round dblq towards nearest integer */
3945 dblq = rint(dblq);
3946 ST0 = fpsrcop - fptemp * dblq;
3947
3948 /* convert dblq to q by truncating towards zero */
3949 if (dblq < 0.0)
3950 q = (signed long long int)(-dblq);
3951 else
3952 q = (signed long long int)dblq;
3953
3954 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3955 /* (C0,C3,C1) <-- (q2,q1,q0) */
3956 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3957 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3958 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3959 } else {
3960 env->fpus |= 0x400; /* C2 <-- 1 */
3961 fptemp = pow(2.0, expdif - 50);
3962 fpsrcop = (ST0 / ST1) / fptemp;
3963 /* fpsrcop = integer obtained by chopping */
3964 fpsrcop = (fpsrcop < 0.0) ?
3965 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3966 ST0 -= (ST1 * fpsrcop * fptemp);
3967 }
3968}
3969
3970void helper_fprem(void)
3971{
3972 CPU86_LDouble dblq, fpsrcop, fptemp;
3973 CPU86_LDoubleU fpsrcop1, fptemp1;
3974 int expdif;
3975 signed long long int q;
3976
3977 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3978 ST0 = 0.0 / 0.0; /* NaN */
3979 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3980 return;
3981 }
3982
3983 fpsrcop = (CPU86_LDouble)ST0;
3984 fptemp = (CPU86_LDouble)ST1;
3985 fpsrcop1.d = fpsrcop;
3986 fptemp1.d = fptemp;
3987 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3988
3989 if (expdif < 0) {
3990 /* optimisation? taken from the AMD docs */
3991 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3992 /* ST0 is unchanged */
3993 return;
3994 }
3995
3996 if ( expdif < 53 ) {
3997 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3998 /* round dblq towards zero */
3999 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4000 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4001
4002 /* convert dblq to q by truncating towards zero */
4003 if (dblq < 0.0)
4004 q = (signed long long int)(-dblq);
4005 else
4006 q = (signed long long int)dblq;
4007
4008 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4009 /* (C0,C3,C1) <-- (q2,q1,q0) */
4010 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4011 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4012 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4013 } else {
4014 int N = 32 + (expdif % 32); /* as per AMD docs */
4015 env->fpus |= 0x400; /* C2 <-- 1 */
4016 fptemp = pow(2.0, (double)(expdif - N));
4017 fpsrcop = (ST0 / ST1) / fptemp;
4018 /* fpsrcop = integer obtained by chopping */
4019 fpsrcop = (fpsrcop < 0.0) ?
4020 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4021 ST0 -= (ST1 * fpsrcop * fptemp);
4022 }
4023}
4024
4025void helper_fyl2xp1(void)
4026{
4027 CPU86_LDouble fptemp;
4028
4029 fptemp = ST0;
4030 if ((fptemp+1.0)>0.0) {
4031 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4032 ST1 *= fptemp;
4033 fpop();
4034 } else {
4035 env->fpus &= (~0x4700);
4036 env->fpus |= 0x400;
4037 }
4038}
4039
4040void helper_fsqrt(void)
4041{
4042 CPU86_LDouble fptemp;
4043
4044 fptemp = ST0;
4045 if (fptemp<0.0) {
4046 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4047 env->fpus |= 0x400;
4048 }
4049 ST0 = sqrt(fptemp);
4050}
4051
4052void helper_fsincos(void)
4053{
4054 CPU86_LDouble fptemp;
4055
4056 fptemp = ST0;
4057 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4058 env->fpus |= 0x400;
4059 } else {
4060 ST0 = sin(fptemp);
4061 fpush();
4062 ST0 = cos(fptemp);
4063 env->fpus &= (~0x400); /* C2 <-- 0 */
4064 /* the above code is for |arg| < 2**63 only */
4065 }
4066}
4067
4068void helper_frndint(void)
4069{
4070 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4071}
4072
4073void helper_fscale(void)
4074{
4075 ST0 = ldexp (ST0, (int)(ST1));
4076}
4077
4078void helper_fsin(void)
4079{
4080 CPU86_LDouble fptemp;
4081
4082 fptemp = ST0;
4083 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4084 env->fpus |= 0x400;
4085 } else {
4086 ST0 = sin(fptemp);
4087 env->fpus &= (~0x400); /* C2 <-- 0 */
4088 /* the above code is for |arg| < 2**53 only */
4089 }
4090}
4091
4092void helper_fcos(void)
4093{
4094 CPU86_LDouble fptemp;
4095
4096 fptemp = ST0;
4097 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4098 env->fpus |= 0x400;
4099 } else {
4100 ST0 = cos(fptemp);
4101 env->fpus &= (~0x400); /* C2 <-- 0 */
4102 /* the above code is for |arg5 < 2**63 only */
4103 }
4104}
4105
4106void helper_fxam_ST0(void)
4107{
4108 CPU86_LDoubleU temp;
4109 int expdif;
4110
4111 temp.d = ST0;
4112
4113 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4114 if (SIGND(temp))
4115 env->fpus |= 0x200; /* C1 <-- 1 */
4116
4117 /* XXX: test fptags too */
4118 expdif = EXPD(temp);
4119 if (expdif == MAXEXPD) {
4120#ifdef USE_X86LDOUBLE
4121 if (MANTD(temp) == 0x8000000000000000ULL)
4122#else
4123 if (MANTD(temp) == 0)
4124#endif
4125 env->fpus |= 0x500 /*Infinity*/;
4126 else
4127 env->fpus |= 0x100 /*NaN*/;
4128 } else if (expdif == 0) {
4129 if (MANTD(temp) == 0)
4130 env->fpus |= 0x4000 /*Zero*/;
4131 else
4132 env->fpus |= 0x4400 /*Denormal*/;
4133 } else {
4134 env->fpus |= 0x400;
4135 }
4136}
4137
4138void helper_fstenv(target_ulong ptr, int data32)
4139{
4140 int fpus, fptag, exp, i;
4141 uint64_t mant;
4142 CPU86_LDoubleU tmp;
4143
4144 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4145 fptag = 0;
4146 for (i=7; i>=0; i--) {
4147 fptag <<= 2;
4148 if (env->fptags[i]) {
4149 fptag |= 3;
4150 } else {
4151 tmp.d = env->fpregs[i].d;
4152 exp = EXPD(tmp);
4153 mant = MANTD(tmp);
4154 if (exp == 0 && mant == 0) {
4155 /* zero */
4156 fptag |= 1;
4157 } else if (exp == 0 || exp == MAXEXPD
4158#ifdef USE_X86LDOUBLE
4159 || (mant & (1LL << 63)) == 0
4160#endif
4161 ) {
4162 /* NaNs, infinity, denormal */
4163 fptag |= 2;
4164 }
4165 }
4166 }
4167 if (data32) {
4168 /* 32 bit */
4169 stl(ptr, env->fpuc);
4170 stl(ptr + 4, fpus);
4171 stl(ptr + 8, fptag);
4172 stl(ptr + 12, 0); /* fpip */
4173 stl(ptr + 16, 0); /* fpcs */
4174 stl(ptr + 20, 0); /* fpoo */
4175 stl(ptr + 24, 0); /* fpos */
4176 } else {
4177 /* 16 bit */
4178 stw(ptr, env->fpuc);
4179 stw(ptr + 2, fpus);
4180 stw(ptr + 4, fptag);
4181 stw(ptr + 6, 0);
4182 stw(ptr + 8, 0);
4183 stw(ptr + 10, 0);
4184 stw(ptr + 12, 0);
4185 }
4186}
4187
4188void helper_fldenv(target_ulong ptr, int data32)
4189{
4190 int i, fpus, fptag;
4191
4192 if (data32) {
4193 env->fpuc = lduw(ptr);
4194 fpus = lduw(ptr + 4);
4195 fptag = lduw(ptr + 8);
4196 }
4197 else {
4198 env->fpuc = lduw(ptr);
4199 fpus = lduw(ptr + 2);
4200 fptag = lduw(ptr + 4);
4201 }
4202 env->fpstt = (fpus >> 11) & 7;
4203 env->fpus = fpus & ~0x3800;
4204 for(i = 0;i < 8; i++) {
4205 env->fptags[i] = ((fptag & 3) == 3);
4206 fptag >>= 2;
4207 }
4208}
4209
4210void helper_fsave(target_ulong ptr, int data32)
4211{
4212 CPU86_LDouble tmp;
4213 int i;
4214
4215 helper_fstenv(ptr, data32);
4216
4217 ptr += (14 << data32);
4218 for(i = 0;i < 8; i++) {
4219 tmp = ST(i);
4220 helper_fstt(tmp, ptr);
4221 ptr += 10;
4222 }
4223
4224 /* fninit */
4225 env->fpus = 0;
4226 env->fpstt = 0;
4227 env->fpuc = 0x37f;
4228 env->fptags[0] = 1;
4229 env->fptags[1] = 1;
4230 env->fptags[2] = 1;
4231 env->fptags[3] = 1;
4232 env->fptags[4] = 1;
4233 env->fptags[5] = 1;
4234 env->fptags[6] = 1;
4235 env->fptags[7] = 1;
4236}
4237
4238void helper_frstor(target_ulong ptr, int data32)
4239{
4240 CPU86_LDouble tmp;
4241 int i;
4242
4243 helper_fldenv(ptr, data32);
4244 ptr += (14 << data32);
4245
4246 for(i = 0;i < 8; i++) {
4247 tmp = helper_fldt(ptr);
4248 ST(i) = tmp;
4249 ptr += 10;
4250 }
4251}
4252
4253void helper_fxsave(target_ulong ptr, int data64)
4254{
4255 int fpus, fptag, i, nb_xmm_regs;
4256 CPU86_LDouble tmp;
4257 target_ulong addr;
4258
4259 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4260 fptag = 0;
4261 for(i = 0; i < 8; i++) {
4262 fptag |= (env->fptags[i] << i);
4263 }
4264 stw(ptr, env->fpuc);
4265 stw(ptr + 2, fpus);
4266 stw(ptr + 4, fptag ^ 0xff);
4267#ifdef TARGET_X86_64
4268 if (data64) {
4269 stq(ptr + 0x08, 0); /* rip */
4270 stq(ptr + 0x10, 0); /* rdp */
4271 } else
4272#endif
4273 {
4274 stl(ptr + 0x08, 0); /* eip */
4275 stl(ptr + 0x0c, 0); /* sel */
4276 stl(ptr + 0x10, 0); /* dp */
4277 stl(ptr + 0x14, 0); /* sel */
4278 }
4279
4280 addr = ptr + 0x20;
4281 for(i = 0;i < 8; i++) {
4282 tmp = ST(i);
4283 helper_fstt(tmp, addr);
4284 addr += 16;
4285 }
4286
4287 if (env->cr[4] & CR4_OSFXSR_MASK) {
4288 /* XXX: finish it */
4289 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4290 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4291 if (env->hflags & HF_CS64_MASK)
4292 nb_xmm_regs = 16;
4293 else
4294 nb_xmm_regs = 8;
4295 addr = ptr + 0xa0;
4296 for(i = 0; i < nb_xmm_regs; i++) {
4297 stq(addr, env->xmm_regs[i].XMM_Q(0));
4298 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4299 addr += 16;
4300 }
4301 }
4302}
4303
4304void helper_fxrstor(target_ulong ptr, int data64)
4305{
4306 int i, fpus, fptag, nb_xmm_regs;
4307 CPU86_LDouble tmp;
4308 target_ulong addr;
4309
4310 env->fpuc = lduw(ptr);
4311 fpus = lduw(ptr + 2);
4312 fptag = lduw(ptr + 4);
4313 env->fpstt = (fpus >> 11) & 7;
4314 env->fpus = fpus & ~0x3800;
4315 fptag ^= 0xff;
4316 for(i = 0;i < 8; i++) {
4317 env->fptags[i] = ((fptag >> i) & 1);
4318 }
4319
4320 addr = ptr + 0x20;
4321 for(i = 0;i < 8; i++) {
4322 tmp = helper_fldt(addr);
4323 ST(i) = tmp;
4324 addr += 16;
4325 }
4326
4327 if (env->cr[4] & CR4_OSFXSR_MASK) {
4328 /* XXX: finish it */
4329 env->mxcsr = ldl(ptr + 0x18);
4330 //ldl(ptr + 0x1c);
4331 if (env->hflags & HF_CS64_MASK)
4332 nb_xmm_regs = 16;
4333 else
4334 nb_xmm_regs = 8;
4335 addr = ptr + 0xa0;
4336 for(i = 0; i < nb_xmm_regs; i++) {
4337 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4338 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4339 addr += 16;
4340 }
4341 }
4342}
4343
4344#ifndef USE_X86LDOUBLE
4345
4346void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4347{
4348 CPU86_LDoubleU temp;
4349 int e;
4350
4351 temp.d = f;
4352 /* mantissa */
4353 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4354 /* exponent + sign */
4355 e = EXPD(temp) - EXPBIAS + 16383;
4356 e |= SIGND(temp) >> 16;
4357 *pexp = e;
4358}
4359
4360CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4361{
4362 CPU86_LDoubleU temp;
4363 int e;
4364 uint64_t ll;
4365
4366 /* XXX: handle overflow ? */
4367 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4368 e |= (upper >> 4) & 0x800; /* sign */
4369 ll = (mant >> 11) & ((1LL << 52) - 1);
4370#ifdef __arm__
4371 temp.l.upper = (e << 20) | (ll >> 32);
4372 temp.l.lower = ll;
4373#else
4374 temp.ll = ll | ((uint64_t)e << 52);
4375#endif
4376 return temp.d;
4377}
4378
4379#else
4380
4381void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4382{
4383 CPU86_LDoubleU temp;
4384
4385 temp.d = f;
4386 *pmant = temp.l.lower;
4387 *pexp = temp.l.upper;
4388}
4389
4390CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4391{
4392 CPU86_LDoubleU temp;
4393
4394 temp.l.upper = upper;
4395 temp.l.lower = mant;
4396 return temp.d;
4397}
4398#endif
4399
4400#ifdef TARGET_X86_64
4401
4402//#define DEBUG_MULDIV
4403
4404static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4405{
4406 *plow += a;
4407 /* carry test */
4408 if (*plow < a)
4409 (*phigh)++;
4410 *phigh += b;
4411}
4412
4413static void neg128(uint64_t *plow, uint64_t *phigh)
4414{
4415 *plow = ~ *plow;
4416 *phigh = ~ *phigh;
4417 add128(plow, phigh, 1, 0);
4418}
4419
4420/* return TRUE if overflow */
4421static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4422{
4423 uint64_t q, r, a1, a0;
4424 int i, qb, ab;
4425
4426 a0 = *plow;
4427 a1 = *phigh;
4428 if (a1 == 0) {
4429 q = a0 / b;
4430 r = a0 % b;
4431 *plow = q;
4432 *phigh = r;
4433 } else {
4434 if (a1 >= b)
4435 return 1;
4436 /* XXX: use a better algorithm */
4437 for(i = 0; i < 64; i++) {
4438 ab = a1 >> 63;
4439 a1 = (a1 << 1) | (a0 >> 63);
4440 if (ab || a1 >= b) {
4441 a1 -= b;
4442 qb = 1;
4443 } else {
4444 qb = 0;
4445 }
4446 a0 = (a0 << 1) | qb;
4447 }
4448#if defined(DEBUG_MULDIV)
4449 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4450 *phigh, *plow, b, a0, a1);
4451#endif
4452 *plow = a0;
4453 *phigh = a1;
4454 }
4455 return 0;
4456}
4457
4458/* return TRUE if overflow */
4459static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4460{
4461 int sa, sb;
4462 sa = ((int64_t)*phigh < 0);
4463 if (sa)
4464 neg128(plow, phigh);
4465 sb = (b < 0);
4466 if (sb)
4467 b = -b;
4468 if (div64(plow, phigh, b) != 0)
4469 return 1;
4470 if (sa ^ sb) {
4471 if (*plow > (1ULL << 63))
4472 return 1;
4473 *plow = - *plow;
4474 } else {
4475 if (*plow >= (1ULL << 63))
4476 return 1;
4477 }
4478 if (sa)
4479 *phigh = - *phigh;
4480 return 0;
4481}
4482
4483void helper_mulq_EAX_T0(target_ulong t0)
4484{
4485 uint64_t r0, r1;
4486
4487 mulu64(&r0, &r1, EAX, t0);
4488 EAX = r0;
4489 EDX = r1;
4490 CC_DST = r0;
4491 CC_SRC = r1;
4492}
4493
4494void helper_imulq_EAX_T0(target_ulong t0)
4495{
4496 uint64_t r0, r1;
4497
4498 muls64(&r0, &r1, EAX, t0);
4499 EAX = r0;
4500 EDX = r1;
4501 CC_DST = r0;
4502 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4503}
4504
4505target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4506{
4507 uint64_t r0, r1;
4508
4509 muls64(&r0, &r1, t0, t1);
4510 CC_DST = r0;
4511 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4512 return r0;
4513}
4514
4515void helper_divq_EAX(target_ulong t0)
4516{
4517 uint64_t r0, r1;
4518 if (t0 == 0) {
4519 raise_exception(EXCP00_DIVZ);
4520 }
4521 r0 = EAX;
4522 r1 = EDX;
4523 if (div64(&r0, &r1, t0))
4524 raise_exception(EXCP00_DIVZ);
4525 EAX = r0;
4526 EDX = r1;
4527}
4528
4529void helper_idivq_EAX(target_ulong t0)
4530{
4531 uint64_t r0, r1;
4532 if (t0 == 0) {
4533 raise_exception(EXCP00_DIVZ);
4534 }
4535 r0 = EAX;
4536 r1 = EDX;
4537 if (idiv64(&r0, &r1, t0))
4538 raise_exception(EXCP00_DIVZ);
4539 EAX = r0;
4540 EDX = r1;
4541}
4542#endif
4543
4544void helper_hlt(void)
4545{
872929aa
FB
4546 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4547
eaa728ee 4548 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4549 env->halted = 1;
eaa728ee
FB
4550 env->exception_index = EXCP_HLT;
4551 cpu_loop_exit();
4552}
4553
4554void helper_monitor(target_ulong ptr)
4555{
4556 if ((uint32_t)ECX != 0)
4557 raise_exception(EXCP0D_GPF);
4558 /* XXX: store address ? */
872929aa 4559 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4560}
4561
4562void helper_mwait(void)
4563{
4564 if ((uint32_t)ECX != 0)
4565 raise_exception(EXCP0D_GPF);
872929aa 4566 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
eaa728ee
FB
4567 /* XXX: not complete but not completely erroneous */
4568 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4569 /* more than one CPU: do not sleep because another CPU may
4570 wake this one */
4571 } else {
4572 helper_hlt();
4573 }
4574}
4575
4576void helper_debug(void)
4577{
4578 env->exception_index = EXCP_DEBUG;
4579 cpu_loop_exit();
4580}
4581
4582void helper_raise_interrupt(int intno, int next_eip_addend)
4583{
4584 raise_interrupt(intno, 1, 0, next_eip_addend);
4585}
4586
4587void helper_raise_exception(int exception_index)
4588{
4589 raise_exception(exception_index);
4590}
4591
4592void helper_cli(void)
4593{
4594 env->eflags &= ~IF_MASK;
4595}
4596
4597void helper_sti(void)
4598{
4599 env->eflags |= IF_MASK;
4600}
4601
4602#if 0
4603/* vm86plus instructions */
4604void helper_cli_vm(void)
4605{
4606 env->eflags &= ~VIF_MASK;
4607}
4608
4609void helper_sti_vm(void)
4610{
4611 env->eflags |= VIF_MASK;
4612 if (env->eflags & VIP_MASK) {
4613 raise_exception(EXCP0D_GPF);
4614 }
4615}
4616#endif
4617
4618void helper_set_inhibit_irq(void)
4619{
4620 env->hflags |= HF_INHIBIT_IRQ_MASK;
4621}
4622
4623void helper_reset_inhibit_irq(void)
4624{
4625 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4626}
4627
4628void helper_boundw(target_ulong a0, int v)
4629{
4630 int low, high;
4631 low = ldsw(a0);
4632 high = ldsw(a0 + 2);
4633 v = (int16_t)v;
4634 if (v < low || v > high) {
4635 raise_exception(EXCP05_BOUND);
4636 }
4637 FORCE_RET();
4638}
4639
4640void helper_boundl(target_ulong a0, int v)
4641{
4642 int low, high;
4643 low = ldl(a0);
4644 high = ldl(a0 + 4);
4645 if (v < low || v > high) {
4646 raise_exception(EXCP05_BOUND);
4647 }
4648 FORCE_RET();
4649}
4650
4651static float approx_rsqrt(float a)
4652{
4653 return 1.0 / sqrt(a);
4654}
4655
4656static float approx_rcp(float a)
4657{
4658 return 1.0 / a;
4659}
4660
4661#if !defined(CONFIG_USER_ONLY)
4662
4663#define MMUSUFFIX _mmu
4664
4665#define SHIFT 0
4666#include "softmmu_template.h"
4667
4668#define SHIFT 1
4669#include "softmmu_template.h"
4670
4671#define SHIFT 2
4672#include "softmmu_template.h"
4673
4674#define SHIFT 3
4675#include "softmmu_template.h"
4676
4677#endif
4678
4679/* try to fill the TLB and return an exception if error. If retaddr is
4680 NULL, it means that the function was called in C code (i.e. not
4681 from generated code or from helper.c) */
4682/* XXX: fix it to restore all registers */
4683void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4684{
4685 TranslationBlock *tb;
4686 int ret;
4687 unsigned long pc;
4688 CPUX86State *saved_env;
4689
4690 /* XXX: hack to restore env in all cases, even if not called from
4691 generated code */
4692 saved_env = env;
4693 env = cpu_single_env;
4694
4695 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4696 if (ret) {
4697 if (retaddr) {
4698 /* now we have a real cpu fault */
4699 pc = (unsigned long)retaddr;
4700 tb = tb_find_pc(pc);
4701 if (tb) {
4702 /* the PC is inside the translated code. It means that we have
4703 a virtual CPU fault */
4704 cpu_restore_state(tb, env, pc, NULL);
4705 }
4706 }
872929aa 4707 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4708 }
4709 env = saved_env;
4710}
4711
4712
4713/* Secure Virtual Machine helpers */
4714
eaa728ee
FB
4715#if defined(CONFIG_USER_ONLY)
4716
4717void helper_vmrun(void)
4718{
4719}
4720void helper_vmmcall(void)
4721{
4722}
4723void helper_vmload(void)
4724{
4725}
4726void helper_vmsave(void)
4727{
4728}
872929aa
FB
4729void helper_stgi(void)
4730{
4731}
4732void helper_clgi(void)
4733{
4734}
eaa728ee
FB
4735void helper_skinit(void)
4736{
4737}
4738void helper_invlpga(void)
4739{
4740}
4741void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4742{
4743}
4744void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4745{
4746}
4747
4748void helper_svm_check_io(uint32_t port, uint32_t param,
4749 uint32_t next_eip_addend)
4750{
4751}
4752#else
4753
872929aa
FB
4754static inline void svm_save_seg(target_phys_addr_t addr,
4755 const SegmentCache *sc)
eaa728ee 4756{
872929aa
FB
4757 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4758 sc->selector);
4759 stq_phys(addr + offsetof(struct vmcb_seg, base),
4760 sc->base);
4761 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4762 sc->limit);
4763 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4764 (sc->flags >> 8) | ((sc->flags >> 12) & 0x0f00));
4765}
4766
4767static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4768{
4769 unsigned int flags;
4770
4771 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4772 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4773 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4774 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4775 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4776}
4777
872929aa
FB
4778static inline void svm_load_seg_cache(target_phys_addr_t addr,
4779 CPUState *env, int seg_reg)
eaa728ee 4780{
872929aa
FB
4781 SegmentCache sc1, *sc = &sc1;
4782 svm_load_seg(addr, sc);
4783 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4784 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4785}
4786
4787void helper_vmrun(void)
4788{
4789 target_ulong addr;
4790 uint32_t event_inj;
4791 uint32_t int_ctl;
4792
872929aa
FB
4793 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4794
eaa728ee
FB
4795 addr = EAX;
4796 if (loglevel & CPU_LOG_TB_IN_ASM)
4797 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4798
4799 env->vm_vmcb = addr;
4800
4801 /* save the current CPU state in the hsave page */
4802 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4803 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4804
4805 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4806 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4807
4808 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4809 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4810 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4811 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4812 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4813 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4814 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4815
4816 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4817 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4818
872929aa
FB
4819 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4820 &env->segs[R_ES]);
4821 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4822 &env->segs[R_CS]);
4823 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4824 &env->segs[R_SS]);
4825 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4826 &env->segs[R_DS]);
eaa728ee
FB
4827
4828 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4829 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4830 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4831
4832 /* load the interception bitmaps so we do not need to access the
4833 vmcb in svm mode */
872929aa 4834 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
4835 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4836 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4837 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4838 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4839 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4840
872929aa
FB
4841 /* enable intercepts */
4842 env->hflags |= HF_SVMI_MASK;
4843
eaa728ee
FB
4844 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4845 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4846
4847 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4848 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4849
4850 /* clear exit_info_2 so we behave like the real hardware */
4851 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4852
4853 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4854 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4855 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4856 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4857 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4858 if (int_ctl & V_INTR_MASKING_MASK) {
4859 env->cr[8] = int_ctl & V_TPR_MASK;
4860 cpu_set_apic_tpr(env, env->cr[8]);
4861 if (env->eflags & IF_MASK)
4862 env->hflags |= HF_HIF_MASK;
4863 }
4864
4865#ifdef TARGET_X86_64
4866 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4867 env->hflags &= ~HF_LMA_MASK;
4868 if (env->efer & MSR_EFER_LMA)
4869 env->hflags |= HF_LMA_MASK;
4870#endif
4871 env->eflags = 0;
4872 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4873 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4874 CC_OP = CC_OP_EFLAGS;
eaa728ee 4875
872929aa
FB
4876 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4877 env, R_ES);
4878 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4879 env, R_CS);
4880 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4881 env, R_SS);
4882 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4883 env, R_DS);
eaa728ee
FB
4884
4885 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4886 env->eip = EIP;
4887 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4888 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4889 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4890 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4891 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4892
4893 /* FIXME: guest state consistency checks */
4894
4895 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4896 case TLB_CONTROL_DO_NOTHING:
4897 break;
4898 case TLB_CONTROL_FLUSH_ALL_ASID:
4899 /* FIXME: this is not 100% correct but should work for now */
4900 tlb_flush(env, 1);
4901 break;
4902 }
4903
4904 helper_stgi();
4905
4906 /* maybe we need to inject an event */
4907 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4908 if (event_inj & SVM_EVTINJ_VALID) {
4909 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4910 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4911 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4912 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4913
4914 if (loglevel & CPU_LOG_TB_IN_ASM)
4915 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4916 /* FIXME: need to implement valid_err */
4917 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4918 case SVM_EVTINJ_TYPE_INTR:
4919 env->exception_index = vector;
4920 env->error_code = event_inj_err;
4921 env->exception_is_int = 0;
4922 env->exception_next_eip = -1;
4923 if (loglevel & CPU_LOG_TB_IN_ASM)
4924 fprintf(logfile, "INTR");
4925 break;
4926 case SVM_EVTINJ_TYPE_NMI:
4927 env->exception_index = vector;
4928 env->error_code = event_inj_err;
4929 env->exception_is_int = 0;
4930 env->exception_next_eip = EIP;
4931 if (loglevel & CPU_LOG_TB_IN_ASM)
4932 fprintf(logfile, "NMI");
4933 break;
4934 case SVM_EVTINJ_TYPE_EXEPT:
4935 env->exception_index = vector;
4936 env->error_code = event_inj_err;
4937 env->exception_is_int = 0;
4938 env->exception_next_eip = -1;
4939 if (loglevel & CPU_LOG_TB_IN_ASM)
4940 fprintf(logfile, "EXEPT");
4941 break;
4942 case SVM_EVTINJ_TYPE_SOFT:
4943 env->exception_index = vector;
4944 env->error_code = event_inj_err;
4945 env->exception_is_int = 1;
4946 env->exception_next_eip = EIP;
4947 if (loglevel & CPU_LOG_TB_IN_ASM)
4948 fprintf(logfile, "SOFT");
4949 break;
4950 }
4951 if (loglevel & CPU_LOG_TB_IN_ASM)
4952 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4953 }
872929aa
FB
4954 if ((int_ctl & V_IRQ_MASK) ||
4955 (env->intercept & (1ULL << (SVM_EXIT_INTR - SVM_EXIT_INTR)))) {
eaa728ee
FB
4956 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4957 }
4958
4959 cpu_loop_exit();
4960}
4961
4962void helper_vmmcall(void)
4963{
872929aa
FB
4964 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4965 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
4966}
4967
4968void helper_vmload(void)
4969{
4970 target_ulong addr;
872929aa
FB
4971 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4972
4973 /* XXX: invalid in 32 bit */
eaa728ee
FB
4974 addr = EAX;
4975 if (loglevel & CPU_LOG_TB_IN_ASM)
4976 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4977 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4978 env->segs[R_FS].base);
4979
872929aa
FB
4980 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
4981 env, R_FS);
4982 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
4983 env, R_GS);
4984 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
4985 &env->tr);
4986 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
4987 &env->ldt);
eaa728ee
FB
4988
4989#ifdef TARGET_X86_64
4990 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4991 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4992 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4993 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4994#endif
4995 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4996 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4997 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4998 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4999}
5000
5001void helper_vmsave(void)
5002{
5003 target_ulong addr;
872929aa 5004 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
eaa728ee
FB
5005 addr = EAX;
5006 if (loglevel & CPU_LOG_TB_IN_ASM)
5007 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5008 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5009 env->segs[R_FS].base);
5010
872929aa
FB
5011 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5012 &env->segs[R_FS]);
5013 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5014 &env->segs[R_GS]);
5015 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5016 &env->tr);
5017 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5018 &env->ldt);
eaa728ee
FB
5019
5020#ifdef TARGET_X86_64
5021 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5022 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5023 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5024 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5025#endif
5026 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5027 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5028 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5029 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5030}
5031
872929aa
FB
5032void helper_stgi(void)
5033{
5034 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5035 env->hflags |= HF_GIF_MASK;
5036}
5037
5038void helper_clgi(void)
5039{
5040 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5041 env->hflags &= ~HF_GIF_MASK;
5042}
5043
eaa728ee
FB
5044void helper_skinit(void)
5045{
872929aa
FB
5046 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5047 /* XXX: not implemented */
eaa728ee
FB
5048 if (loglevel & CPU_LOG_TB_IN_ASM)
5049 fprintf(logfile,"skinit!\n");
872929aa 5050 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5051}
5052
5053void helper_invlpga(void)
5054{
872929aa 5055 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
eaa728ee
FB
5056 tlb_flush(env, 0);
5057}
5058
5059void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5060{
872929aa
FB
5061 if (likely(!(env->hflags & HF_SVMI_MASK)))
5062 return;
eaa728ee
FB
5063 switch(type) {
5064 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5065 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5066 helper_vmexit(type, param);
5067 }
5068 break;
872929aa
FB
5069 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5070 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5071 helper_vmexit(type, param);
5072 }
5073 break;
872929aa
FB
5074 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5075 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5076 helper_vmexit(type, param);
5077 }
5078 break;
872929aa
FB
5079 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5080 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5081 helper_vmexit(type, param);
5082 }
5083 break;
872929aa
FB
5084 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5085 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5086 helper_vmexit(type, param);
5087 }
5088 break;
eaa728ee 5089 case SVM_EXIT_MSR:
872929aa 5090 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5091 /* FIXME: this should be read in at vmrun (faster this way?) */
5092 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5093 uint32_t t0, t1;
5094 switch((uint32_t)ECX) {
5095 case 0 ... 0x1fff:
5096 t0 = (ECX * 2) % 8;
5097 t1 = ECX / 8;
5098 break;
5099 case 0xc0000000 ... 0xc0001fff:
5100 t0 = (8192 + ECX - 0xc0000000) * 2;
5101 t1 = (t0 / 8);
5102 t0 %= 8;
5103 break;
5104 case 0xc0010000 ... 0xc0011fff:
5105 t0 = (16384 + ECX - 0xc0010000) * 2;
5106 t1 = (t0 / 8);
5107 t0 %= 8;
5108 break;
5109 default:
5110 helper_vmexit(type, param);
5111 t0 = 0;
5112 t1 = 0;
5113 break;
5114 }
5115 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5116 helper_vmexit(type, param);
5117 }
5118 break;
5119 default:
872929aa 5120 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5121 helper_vmexit(type, param);
5122 }
5123 break;
5124 }
5125}
5126
5127void helper_svm_check_io(uint32_t port, uint32_t param,
5128 uint32_t next_eip_addend)
5129{
872929aa 5130 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5131 /* FIXME: this should be read in at vmrun (faster this way?) */
5132 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5133 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5134 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5135 /* next EIP */
5136 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5137 env->eip + next_eip_addend);
5138 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5139 }
5140 }
5141}
5142
5143/* Note: currently only 32 bits of exit_code are used */
5144void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5145{
5146 uint32_t int_ctl;
5147
5148 if (loglevel & CPU_LOG_TB_IN_ASM)
5149 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5150 exit_code, exit_info_1,
5151 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5152 EIP);
5153
5154 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5155 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5156 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5157 } else {
5158 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5159 }
5160
5161 /* Save the VM state in the vmcb */
872929aa
FB
5162 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5163 &env->segs[R_ES]);
5164 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5165 &env->segs[R_CS]);
5166 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5167 &env->segs[R_SS]);
5168 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5169 &env->segs[R_DS]);
eaa728ee
FB
5170
5171 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5172 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5173
5174 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5175 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5176
5177 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5178 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5179 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5180 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5181 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5182
5183 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5184 int_ctl &= ~V_TPR_MASK;
5185 int_ctl |= env->cr[8] & V_TPR_MASK;
5186 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5187 }
5188
5189 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5190 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5191 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5192 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5193 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5194 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5195 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5196
5197 /* Reload the host state from vm_hsave */
5198 env->hflags &= ~HF_HIF_MASK;
872929aa 5199 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5200 env->intercept = 0;
5201 env->intercept_exceptions = 0;
5202 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5203
5204 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5205 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5206
5207 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5208 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5209
5210 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5211 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5212 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5213 if (int_ctl & V_INTR_MASKING_MASK) {
5214 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5215 cpu_set_apic_tpr(env, env->cr[8]);
5216 }
5217 /* we need to set the efer after the crs so the hidden flags get set properly */
5218#ifdef TARGET_X86_64
5219 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5220 env->hflags &= ~HF_LMA_MASK;
5221 if (env->efer & MSR_EFER_LMA)
5222 env->hflags |= HF_LMA_MASK;
872929aa
FB
5223 /* XXX: should also emulate the VM_CR MSR */
5224 env->hflags &= ~HF_SVME_MASK;
5225 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
5226 if (env->efer & MSR_EFER_SVME)
5227 env->hflags |= HF_SVME_MASK;
5228 } else {
5229 env->efer &= ~MSR_EFER_SVME;
5230 }
eaa728ee
FB
5231#endif
5232
5233 env->eflags = 0;
5234 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5235 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5236 CC_OP = CC_OP_EFLAGS;
5237
872929aa
FB
5238 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5239 env, R_ES);
5240 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5241 env, R_CS);
5242 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5243 env, R_SS);
5244 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5245 env, R_DS);
eaa728ee
FB
5246
5247 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5248 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5249 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5250
5251 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5252 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5253
5254 /* other setups */
5255 cpu_x86_set_cpl(env, 0);
5256 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5257 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5258
5259 helper_clgi();
5260 /* FIXME: Resets the current ASID register to zero (host ASID). */
5261
5262 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5263
5264 /* Clears the TSC_OFFSET inside the processor. */
5265
5266 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5267 from the page table indicated the host's CR3. If the PDPEs contain
5268 illegal state, the processor causes a shutdown. */
5269
5270 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5271 env->cr[0] |= CR0_PE_MASK;
5272 env->eflags &= ~VM_MASK;
5273
5274 /* Disables all breakpoints in the host DR7 register. */
5275
5276 /* Checks the reloaded host state for consistency. */
5277
5278 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5279 host's code segment or non-canonical (in the case of long mode), a
5280 #GP fault is delivered inside the host.) */
5281
5282 /* remove any pending exception */
5283 env->exception_index = -1;
5284 env->error_code = 0;
5285 env->old_exception = -1;
5286
5287 cpu_loop_exit();
5288}
5289
5290#endif
5291
5292/* MMX/SSE */
5293/* XXX: optimize by storing fptt and fptags in the static cpu state */
5294void helper_enter_mmx(void)
5295{
5296 env->fpstt = 0;
5297 *(uint32_t *)(env->fptags) = 0;
5298 *(uint32_t *)(env->fptags + 4) = 0;
5299}
5300
5301void helper_emms(void)
5302{
5303 /* set to empty state */
5304 *(uint32_t *)(env->fptags) = 0x01010101;
5305 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5306}
5307
5308/* XXX: suppress */
5309void helper_movq(uint64_t *d, uint64_t *s)
5310{
5311 *d = *s;
5312}
5313
5314#define SHIFT 0
5315#include "ops_sse.h"
5316
5317#define SHIFT 1
5318#include "ops_sse.h"
5319
5320#define SHIFT 0
5321#include "helper_template.h"
5322#undef SHIFT
5323
5324#define SHIFT 1
5325#include "helper_template.h"
5326#undef SHIFT
5327
5328#define SHIFT 2
5329#include "helper_template.h"
5330#undef SHIFT
5331
5332#ifdef TARGET_X86_64
5333
5334#define SHIFT 3
5335#include "helper_template.h"
5336#undef SHIFT
5337
5338#endif
5339
5340/* bit operations */
5341target_ulong helper_bsf(target_ulong t0)
5342{
5343 int count;
5344 target_ulong res;
5345
5346 res = t0;
5347 count = 0;
5348 while ((res & 1) == 0) {
5349 count++;
5350 res >>= 1;
5351 }
5352 return count;
5353}
5354
5355target_ulong helper_bsr(target_ulong t0)
5356{
5357 int count;
5358 target_ulong res, mask;
5359
5360 res = t0;
5361 count = TARGET_LONG_BITS - 1;
5362 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5363 while ((res & mask) == 0) {
5364 count--;
5365 res <<= 1;
5366 }
5367 return count;
5368}
5369
5370
5371static int compute_all_eflags(void)
5372{
5373 return CC_SRC;
5374}
5375
5376static int compute_c_eflags(void)
5377{
5378 return CC_SRC & CC_C;
5379}
5380
5381CCTable cc_table[CC_OP_NB] = {
5382 [CC_OP_DYNAMIC] = { /* should never happen */ },
5383
5384 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5385
5386 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5387 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5388 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5389
5390 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5391 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5392 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5393
5394 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5395 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5396 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5397
5398 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5399 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5400 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5401
5402 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5403 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5404 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5405
5406 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5407 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5408 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5409
5410 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5411 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5412 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5413
5414 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5415 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5416 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5417
5418 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5419 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5420 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5421
5422 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5423 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5424 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5425
5426#ifdef TARGET_X86_64
5427 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5428
5429 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5430
5431 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5432
5433 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5434
5435 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5436
5437 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5438
5439 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5440
5441 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5442
5443 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5444
5445 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5446#endif
5447};
5448