]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
Core 2 Duo specification (Alexander Graf).
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#define CPU_NO_GLOBAL_REGS
21#include "exec.h"
22#include "host-utils.h"
23
24//#define DEBUG_PCALL
25
26#if 0
27#define raise_exception_err(a, b)\
28do {\
29 if (logfile)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
32} while (0)
33#endif
34
35const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68};
69
70/* modulo 17 table */
71const uint8_t rclw_table[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
76};
77
78/* modulo 9 table */
79const uint8_t rclb_table[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
84};
85
86const CPU86_LDouble f15rk[7] =
87{
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
95};
96
97/* broken thread support */
98
99spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100
101void helper_lock(void)
102{
103 spin_lock(&global_cpu_lock);
104}
105
106void helper_unlock(void)
107{
108 spin_unlock(&global_cpu_lock);
109}
110
111void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112{
113 load_eflags(t0, update_mask);
114}
115
116target_ulong helper_read_eflags(void)
117{
118 uint32_t eflags;
119 eflags = cc_table[CC_OP].compute_all();
120 eflags |= (DF & DF_MASK);
121 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122 return eflags;
123}
124
125/* return non zero if error */
126static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127 int selector)
128{
129 SegmentCache *dt;
130 int index;
131 target_ulong ptr;
132
133 if (selector & 0x4)
134 dt = &env->ldt;
135 else
136 dt = &env->gdt;
137 index = selector & ~7;
138 if ((index + 7) > dt->limit)
139 return -1;
140 ptr = dt->base + index;
141 *e1_ptr = ldl_kernel(ptr);
142 *e2_ptr = ldl_kernel(ptr + 4);
143 return 0;
144}
145
146static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147{
148 unsigned int limit;
149 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150 if (e2 & DESC_G_MASK)
151 limit = (limit << 12) | 0xfff;
152 return limit;
153}
154
155static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156{
157 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158}
159
160static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161{
162 sc->base = get_seg_base(e1, e2);
163 sc->limit = get_seg_limit(e1, e2);
164 sc->flags = e2;
165}
166
167/* init the segment cache in vm86 mode. */
168static inline void load_seg_vm(int seg, int selector)
169{
170 selector &= 0xffff;
171 cpu_x86_load_seg_cache(env, seg, selector,
172 (selector << 4), 0xffff, 0);
173}
174
175static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176 uint32_t *esp_ptr, int dpl)
177{
178 int type, index, shift;
179
180#if 0
181 {
182 int i;
183 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184 for(i=0;i<env->tr.limit;i++) {
185 printf("%02x ", env->tr.base[i]);
186 if ((i & 7) == 7) printf("\n");
187 }
188 printf("\n");
189 }
190#endif
191
192 if (!(env->tr.flags & DESC_P_MASK))
193 cpu_abort(env, "invalid tss");
194 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195 if ((type & 7) != 1)
196 cpu_abort(env, "invalid tss type");
197 shift = type >> 3;
198 index = (dpl * 4 + 2) << shift;
199 if (index + (4 << shift) - 1 > env->tr.limit)
200 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201 if (shift == 0) {
202 *esp_ptr = lduw_kernel(env->tr.base + index);
203 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204 } else {
205 *esp_ptr = ldl_kernel(env->tr.base + index);
206 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207 }
208}
209
210/* XXX: merge with load_seg() */
211static void tss_load_seg(int seg_reg, int selector)
212{
213 uint32_t e1, e2;
214 int rpl, dpl, cpl;
215
216 if ((selector & 0xfffc) != 0) {
217 if (load_segment(&e1, &e2, selector) != 0)
218 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 if (!(e2 & DESC_S_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 rpl = selector & 3;
222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223 cpl = env->hflags & HF_CPL_MASK;
224 if (seg_reg == R_CS) {
225 if (!(e2 & DESC_CS_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* XXX: is it correct ? */
228 if (dpl != rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if ((e2 & DESC_C_MASK) && dpl > rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 } else if (seg_reg == R_SS) {
233 /* SS must be writable data */
234 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (dpl != cpl || dpl != rpl)
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 } else {
239 /* not readable code */
240 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244 if (dpl < cpl || dpl < rpl)
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246 }
247 }
248 if (!(e2 & DESC_P_MASK))
249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250 cpu_x86_load_seg_cache(env, seg_reg, selector,
251 get_seg_base(e1, e2),
252 get_seg_limit(e1, e2),
253 e2);
254 } else {
255 if (seg_reg == R_SS || seg_reg == R_CS)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257 }
258}
259
260#define SWITCH_TSS_JMP 0
261#define SWITCH_TSS_IRET 1
262#define SWITCH_TSS_CALL 2
263
264/* XXX: restore CPU state in registers (PowerPC case) */
265static void switch_tss(int tss_selector,
266 uint32_t e1, uint32_t e2, int source,
267 uint32_t next_eip)
268{
269 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270 target_ulong tss_base;
271 uint32_t new_regs[8], new_segs[6];
272 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273 uint32_t old_eflags, eflags_mask;
274 SegmentCache *dt;
275 int index;
276 target_ulong ptr;
277
278 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279#ifdef DEBUG_PCALL
280 if (loglevel & CPU_LOG_PCALL)
281 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282#endif
283
284 /* if task gate, we read the TSS segment and we load it */
285 if (type == 5) {
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288 tss_selector = e1 >> 16;
289 if (tss_selector & 4)
290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291 if (load_segment(&e1, &e2, tss_selector) != 0)
292 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293 if (e2 & DESC_S_MASK)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296 if ((type & 7) != 1)
297 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298 }
299
300 if (!(e2 & DESC_P_MASK))
301 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302
303 if (type & 8)
304 tss_limit_max = 103;
305 else
306 tss_limit_max = 43;
307 tss_limit = get_seg_limit(e1, e2);
308 tss_base = get_seg_base(e1, e2);
309 if ((tss_selector & 4) != 0 ||
310 tss_limit < tss_limit_max)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if (old_type & 8)
314 old_tss_limit_max = 103;
315 else
316 old_tss_limit_max = 43;
317
318 /* read all the registers from the new TSS */
319 if (type & 8) {
320 /* 32 bit */
321 new_cr3 = ldl_kernel(tss_base + 0x1c);
322 new_eip = ldl_kernel(tss_base + 0x20);
323 new_eflags = ldl_kernel(tss_base + 0x24);
324 for(i = 0; i < 8; i++)
325 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326 for(i = 0; i < 6; i++)
327 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328 new_ldt = lduw_kernel(tss_base + 0x60);
329 new_trap = ldl_kernel(tss_base + 0x64);
330 } else {
331 /* 16 bit */
332 new_cr3 = 0;
333 new_eip = lduw_kernel(tss_base + 0x0e);
334 new_eflags = lduw_kernel(tss_base + 0x10);
335 for(i = 0; i < 8; i++)
336 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337 for(i = 0; i < 4; i++)
338 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339 new_ldt = lduw_kernel(tss_base + 0x2a);
340 new_segs[R_FS] = 0;
341 new_segs[R_GS] = 0;
342 new_trap = 0;
343 }
344
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
349
350 v1 = ldub_kernel(env->tr.base);
351 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352 stb_kernel(env->tr.base, v1);
353 stb_kernel(env->tr.base + old_tss_limit_max, v2);
354
355 /* clear busy bit (it is restartable) */
356 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357 target_ulong ptr;
358 uint32_t e2;
359 ptr = env->gdt.base + (env->tr.selector & ~7);
360 e2 = ldl_kernel(ptr + 4);
361 e2 &= ~DESC_TSS_BUSY_MASK;
362 stl_kernel(ptr + 4, e2);
363 }
364 old_eflags = compute_eflags();
365 if (source == SWITCH_TSS_IRET)
366 old_eflags &= ~NT_MASK;
367
368 /* save the current state in the old TSS */
369 if (type & 8) {
370 /* 32 bit */
371 stl_kernel(env->tr.base + 0x20, next_eip);
372 stl_kernel(env->tr.base + 0x24, old_eflags);
373 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381 for(i = 0; i < 6; i++)
382 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383 } else {
384 /* 16 bit */
385 stw_kernel(env->tr.base + 0x0e, next_eip);
386 stw_kernel(env->tr.base + 0x10, old_eflags);
387 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395 for(i = 0; i < 4; i++)
396 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397 }
398
399 /* now if an exception occurs, it will occurs in the next task
400 context */
401
402 if (source == SWITCH_TSS_CALL) {
403 stw_kernel(tss_base, env->tr.selector);
404 new_eflags |= NT_MASK;
405 }
406
407 /* set busy bit */
408 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409 target_ulong ptr;
410 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = ldl_kernel(ptr + 4);
413 e2 |= DESC_TSS_BUSY_MASK;
414 stl_kernel(ptr + 4, e2);
415 }
416
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
428 }
429
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8))
436 eflags_mask &= 0xffff;
437 load_eflags(new_eflags, eflags_mask);
438 /* XXX: what to do in 16 bit case ? */
439 EAX = new_regs[0];
440 ECX = new_regs[1];
441 EDX = new_regs[2];
442 EBX = new_regs[3];
443 ESP = new_regs[4];
444 EBP = new_regs[5];
445 ESI = new_regs[6];
446 EDI = new_regs[7];
447 if (new_eflags & VM_MASK) {
448 for(i = 0; i < 6; i++)
449 load_seg_vm(i, new_segs[i]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env, 3);
452 } else {
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i = 0; i < 6; i++)
457 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458 }
459
460 env->ldt.selector = new_ldt & ~4;
461 env->ldt.base = 0;
462 env->ldt.limit = 0;
463 env->ldt.flags = 0;
464
465 /* load the LDT */
466 if (new_ldt & 4)
467 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 ptr = dt->base + index;
475 e1 = ldl_kernel(ptr);
476 e2 = ldl_kernel(ptr + 4);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479 if (!(e2 & DESC_P_MASK))
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
482 }
483
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 tss_load_seg(R_CS, new_segs[R_CS]);
487 tss_load_seg(R_SS, new_segs[R_SS]);
488 tss_load_seg(R_ES, new_segs[R_ES]);
489 tss_load_seg(R_DS, new_segs[R_DS]);
490 tss_load_seg(R_FS, new_segs[R_FS]);
491 tss_load_seg(R_GS, new_segs[R_GS]);
492 }
493
494 /* check that EIP is in the CS segment limits */
495 if (new_eip > env->segs[R_CS].limit) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF, 0);
498 }
499}
500
501/* check if Port I/O is allowed in TSS */
502static inline void check_io(int addr, int size)
503{
504 int io_offset, val, mask;
505
506 /* TSS must be a valid 32 bit one */
507 if (!(env->tr.flags & DESC_P_MASK) ||
508 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509 env->tr.limit < 103)
510 goto fail;
511 io_offset = lduw_kernel(env->tr.base + 0x66);
512 io_offset += (addr >> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset + 1) > env->tr.limit)
515 goto fail;
516 val = lduw_kernel(env->tr.base + io_offset);
517 val >>= (addr & 7);
518 mask = (1 << size) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val & mask) != 0) {
521 fail:
522 raise_exception_err(EXCP0D_GPF, 0);
523 }
524}
525
526void helper_check_iob(uint32_t t0)
527{
528 check_io(t0, 1);
529}
530
531void helper_check_iow(uint32_t t0)
532{
533 check_io(t0, 2);
534}
535
536void helper_check_iol(uint32_t t0)
537{
538 check_io(t0, 4);
539}
540
541void helper_outb(uint32_t port, uint32_t data)
542{
543 cpu_outb(env, port, data & 0xff);
544}
545
546target_ulong helper_inb(uint32_t port)
547{
548 return cpu_inb(env, port);
549}
550
551void helper_outw(uint32_t port, uint32_t data)
552{
553 cpu_outw(env, port, data & 0xffff);
554}
555
556target_ulong helper_inw(uint32_t port)
557{
558 return cpu_inw(env, port);
559}
560
561void helper_outl(uint32_t port, uint32_t data)
562{
563 cpu_outl(env, port, data);
564}
565
566target_ulong helper_inl(uint32_t port)
567{
568 return cpu_inl(env, port);
569}
570
571static inline unsigned int get_sp_mask(unsigned int e2)
572{
573 if (e2 & DESC_B_MASK)
574 return 0xffffffff;
575 else
576 return 0xffff;
577}
578
579#ifdef TARGET_X86_64
580#define SET_ESP(val, sp_mask)\
581do {\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
586 else\
587 ESP = (val);\
588} while (0)
589#else
590#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591#endif
592
c0a04f0e
AL
593/* in 64-bit machines, this can overflow. So this segment addition macro
594 * can be used to trim the value to 32-bit whenever needed */
595#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
596
eaa728ee
FB
597/* XXX: add a is_user flag to have proper security support */
598#define PUSHW(ssp, sp, sp_mask, val)\
599{\
600 sp -= 2;\
601 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
602}
603
604#define PUSHL(ssp, sp, sp_mask, val)\
605{\
606 sp -= 4;\
c0a04f0e 607 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
608}
609
610#define POPW(ssp, sp, sp_mask, val)\
611{\
612 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
613 sp += 2;\
614}
615
616#define POPL(ssp, sp, sp_mask, val)\
617{\
c0a04f0e 618 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
619 sp += 4;\
620}
621
622/* protected mode interrupt */
623static void do_interrupt_protected(int intno, int is_int, int error_code,
624 unsigned int next_eip, int is_hw)
625{
626 SegmentCache *dt;
627 target_ulong ptr, ssp;
628 int type, dpl, selector, ss_dpl, cpl;
629 int has_error_code, new_stack, shift;
630 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
631 uint32_t old_eip, sp_mask;
eaa728ee 632
eaa728ee
FB
633 has_error_code = 0;
634 if (!is_int && !is_hw) {
635 switch(intno) {
636 case 8:
637 case 10:
638 case 11:
639 case 12:
640 case 13:
641 case 14:
642 case 17:
643 has_error_code = 1;
644 break;
645 }
646 }
647 if (is_int)
648 old_eip = next_eip;
649 else
650 old_eip = env->eip;
651
652 dt = &env->idt;
653 if (intno * 8 + 7 > dt->limit)
654 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
655 ptr = dt->base + intno * 8;
656 e1 = ldl_kernel(ptr);
657 e2 = ldl_kernel(ptr + 4);
658 /* check gate type */
659 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
660 switch(type) {
661 case 5: /* task gate */
662 /* must do that check here to return the correct error code */
663 if (!(e2 & DESC_P_MASK))
664 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
665 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
666 if (has_error_code) {
667 int type;
668 uint32_t mask;
669 /* push the error code */
670 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
671 shift = type >> 3;
672 if (env->segs[R_SS].flags & DESC_B_MASK)
673 mask = 0xffffffff;
674 else
675 mask = 0xffff;
676 esp = (ESP - (2 << shift)) & mask;
677 ssp = env->segs[R_SS].base + esp;
678 if (shift)
679 stl_kernel(ssp, error_code);
680 else
681 stw_kernel(ssp, error_code);
682 SET_ESP(esp, mask);
683 }
684 return;
685 case 6: /* 286 interrupt gate */
686 case 7: /* 286 trap gate */
687 case 14: /* 386 interrupt gate */
688 case 15: /* 386 trap gate */
689 break;
690 default:
691 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692 break;
693 }
694 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
695 cpl = env->hflags & HF_CPL_MASK;
1235fc06 696 /* check privilege if software int */
eaa728ee
FB
697 if (is_int && dpl < cpl)
698 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699 /* check valid bit */
700 if (!(e2 & DESC_P_MASK))
701 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
702 selector = e1 >> 16;
703 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
704 if ((selector & 0xfffc) == 0)
705 raise_exception_err(EXCP0D_GPF, 0);
706
707 if (load_segment(&e1, &e2, selector) != 0)
708 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
709 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
710 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
711 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
712 if (dpl > cpl)
713 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
714 if (!(e2 & DESC_P_MASK))
715 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
716 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
717 /* to inner privilege */
718 get_ss_esp_from_tss(&ss, &esp, dpl);
719 if ((ss & 0xfffc) == 0)
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 if ((ss & 3) != dpl)
722 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
723 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
724 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
726 if (ss_dpl != dpl)
727 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 if (!(ss_e2 & DESC_S_MASK) ||
729 (ss_e2 & DESC_CS_MASK) ||
730 !(ss_e2 & DESC_W_MASK))
731 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732 if (!(ss_e2 & DESC_P_MASK))
733 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
734 new_stack = 1;
735 sp_mask = get_sp_mask(ss_e2);
736 ssp = get_seg_base(ss_e1, ss_e2);
737 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
738 /* to same privilege */
739 if (env->eflags & VM_MASK)
740 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741 new_stack = 0;
742 sp_mask = get_sp_mask(env->segs[R_SS].flags);
743 ssp = env->segs[R_SS].base;
744 esp = ESP;
745 dpl = cpl;
746 } else {
747 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
748 new_stack = 0; /* avoid warning */
749 sp_mask = 0; /* avoid warning */
750 ssp = 0; /* avoid warning */
751 esp = 0; /* avoid warning */
752 }
753
754 shift = type >> 3;
755
756#if 0
757 /* XXX: check that enough room is available */
758 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
759 if (env->eflags & VM_MASK)
760 push_size += 8;
761 push_size <<= shift;
762#endif
763 if (shift == 1) {
764 if (new_stack) {
765 if (env->eflags & VM_MASK) {
766 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
767 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
768 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
769 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
770 }
771 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
772 PUSHL(ssp, esp, sp_mask, ESP);
773 }
774 PUSHL(ssp, esp, sp_mask, compute_eflags());
775 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
776 PUSHL(ssp, esp, sp_mask, old_eip);
777 if (has_error_code) {
778 PUSHL(ssp, esp, sp_mask, error_code);
779 }
780 } else {
781 if (new_stack) {
782 if (env->eflags & VM_MASK) {
783 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
784 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
785 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
786 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
787 }
788 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
789 PUSHW(ssp, esp, sp_mask, ESP);
790 }
791 PUSHW(ssp, esp, sp_mask, compute_eflags());
792 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
793 PUSHW(ssp, esp, sp_mask, old_eip);
794 if (has_error_code) {
795 PUSHW(ssp, esp, sp_mask, error_code);
796 }
797 }
798
799 if (new_stack) {
800 if (env->eflags & VM_MASK) {
801 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
802 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
803 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
804 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
805 }
806 ss = (ss & ~3) | dpl;
807 cpu_x86_load_seg_cache(env, R_SS, ss,
808 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
809 }
810 SET_ESP(esp, sp_mask);
811
812 selector = (selector & ~3) | dpl;
813 cpu_x86_load_seg_cache(env, R_CS, selector,
814 get_seg_base(e1, e2),
815 get_seg_limit(e1, e2),
816 e2);
817 cpu_x86_set_cpl(env, dpl);
818 env->eip = offset;
819
820 /* interrupt gate clear IF mask */
821 if ((type & 1) == 0) {
822 env->eflags &= ~IF_MASK;
823 }
824 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
825}
826
827#ifdef TARGET_X86_64
828
829#define PUSHQ(sp, val)\
830{\
831 sp -= 8;\
832 stq_kernel(sp, (val));\
833}
834
835#define POPQ(sp, val)\
836{\
837 val = ldq_kernel(sp);\
838 sp += 8;\
839}
840
841static inline target_ulong get_rsp_from_tss(int level)
842{
843 int index;
844
845#if 0
846 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
847 env->tr.base, env->tr.limit);
848#endif
849
850 if (!(env->tr.flags & DESC_P_MASK))
851 cpu_abort(env, "invalid tss");
852 index = 8 * level + 4;
853 if ((index + 7) > env->tr.limit)
854 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
855 return ldq_kernel(env->tr.base + index);
856}
857
858/* 64 bit interrupt */
859static void do_interrupt64(int intno, int is_int, int error_code,
860 target_ulong next_eip, int is_hw)
861{
862 SegmentCache *dt;
863 target_ulong ptr;
864 int type, dpl, selector, cpl, ist;
865 int has_error_code, new_stack;
866 uint32_t e1, e2, e3, ss;
867 target_ulong old_eip, esp, offset;
eaa728ee 868
eaa728ee
FB
869 has_error_code = 0;
870 if (!is_int && !is_hw) {
871 switch(intno) {
872 case 8:
873 case 10:
874 case 11:
875 case 12:
876 case 13:
877 case 14:
878 case 17:
879 has_error_code = 1;
880 break;
881 }
882 }
883 if (is_int)
884 old_eip = next_eip;
885 else
886 old_eip = env->eip;
887
888 dt = &env->idt;
889 if (intno * 16 + 15 > dt->limit)
890 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
891 ptr = dt->base + intno * 16;
892 e1 = ldl_kernel(ptr);
893 e2 = ldl_kernel(ptr + 4);
894 e3 = ldl_kernel(ptr + 8);
895 /* check gate type */
896 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
897 switch(type) {
898 case 14: /* 386 interrupt gate */
899 case 15: /* 386 trap gate */
900 break;
901 default:
902 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903 break;
904 }
905 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
906 cpl = env->hflags & HF_CPL_MASK;
1235fc06 907 /* check privilege if software int */
eaa728ee
FB
908 if (is_int && dpl < cpl)
909 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
910 /* check valid bit */
911 if (!(e2 & DESC_P_MASK))
912 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
913 selector = e1 >> 16;
914 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
915 ist = e2 & 7;
916 if ((selector & 0xfffc) == 0)
917 raise_exception_err(EXCP0D_GPF, 0);
918
919 if (load_segment(&e1, &e2, selector) != 0)
920 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
921 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
922 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
923 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
924 if (dpl > cpl)
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 if (!(e2 & DESC_P_MASK))
927 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
928 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
931 /* to inner privilege */
932 if (ist != 0)
933 esp = get_rsp_from_tss(ist + 3);
934 else
935 esp = get_rsp_from_tss(dpl);
936 esp &= ~0xfLL; /* align stack */
937 ss = 0;
938 new_stack = 1;
939 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
940 /* to same privilege */
941 if (env->eflags & VM_MASK)
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 new_stack = 0;
944 if (ist != 0)
945 esp = get_rsp_from_tss(ist + 3);
946 else
947 esp = ESP;
948 esp &= ~0xfLL; /* align stack */
949 dpl = cpl;
950 } else {
951 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
952 new_stack = 0; /* avoid warning */
953 esp = 0; /* avoid warning */
954 }
955
956 PUSHQ(esp, env->segs[R_SS].selector);
957 PUSHQ(esp, ESP);
958 PUSHQ(esp, compute_eflags());
959 PUSHQ(esp, env->segs[R_CS].selector);
960 PUSHQ(esp, old_eip);
961 if (has_error_code) {
962 PUSHQ(esp, error_code);
963 }
964
965 if (new_stack) {
966 ss = 0 | dpl;
967 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
968 }
969 ESP = esp;
970
971 selector = (selector & ~3) | dpl;
972 cpu_x86_load_seg_cache(env, R_CS, selector,
973 get_seg_base(e1, e2),
974 get_seg_limit(e1, e2),
975 e2);
976 cpu_x86_set_cpl(env, dpl);
977 env->eip = offset;
978
979 /* interrupt gate clear IF mask */
980 if ((type & 1) == 0) {
981 env->eflags &= ~IF_MASK;
982 }
983 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
984}
985#endif
986
987#if defined(CONFIG_USER_ONLY)
988void helper_syscall(int next_eip_addend)
989{
990 env->exception_index = EXCP_SYSCALL;
991 env->exception_next_eip = env->eip + next_eip_addend;
992 cpu_loop_exit();
993}
994#else
995void helper_syscall(int next_eip_addend)
996{
997 int selector;
998
999 if (!(env->efer & MSR_EFER_SCE)) {
1000 raise_exception_err(EXCP06_ILLOP, 0);
1001 }
1002 selector = (env->star >> 32) & 0xffff;
1003#ifdef TARGET_X86_64
1004 if (env->hflags & HF_LMA_MASK) {
1005 int code64;
1006
1007 ECX = env->eip + next_eip_addend;
1008 env->regs[11] = compute_eflags();
1009
1010 code64 = env->hflags & HF_CS64_MASK;
1011
1012 cpu_x86_set_cpl(env, 0);
1013 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1014 0, 0xffffffff,
1015 DESC_G_MASK | DESC_P_MASK |
1016 DESC_S_MASK |
1017 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1018 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1019 0, 0xffffffff,
1020 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1021 DESC_S_MASK |
1022 DESC_W_MASK | DESC_A_MASK);
1023 env->eflags &= ~env->fmask;
1024 load_eflags(env->eflags, 0);
1025 if (code64)
1026 env->eip = env->lstar;
1027 else
1028 env->eip = env->cstar;
1029 } else
1030#endif
1031 {
1032 ECX = (uint32_t)(env->eip + next_eip_addend);
1033
1034 cpu_x86_set_cpl(env, 0);
1035 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1036 0, 0xffffffff,
1037 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038 DESC_S_MASK |
1039 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1040 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1041 0, 0xffffffff,
1042 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1043 DESC_S_MASK |
1044 DESC_W_MASK | DESC_A_MASK);
1045 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1046 env->eip = (uint32_t)env->star;
1047 }
1048}
1049#endif
1050
1051void helper_sysret(int dflag)
1052{
1053 int cpl, selector;
1054
1055 if (!(env->efer & MSR_EFER_SCE)) {
1056 raise_exception_err(EXCP06_ILLOP, 0);
1057 }
1058 cpl = env->hflags & HF_CPL_MASK;
1059 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1060 raise_exception_err(EXCP0D_GPF, 0);
1061 }
1062 selector = (env->star >> 48) & 0xffff;
1063#ifdef TARGET_X86_64
1064 if (env->hflags & HF_LMA_MASK) {
1065 if (dflag == 2) {
1066 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1067 0, 0xffffffff,
1068 DESC_G_MASK | DESC_P_MASK |
1069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1071 DESC_L_MASK);
1072 env->eip = ECX;
1073 } else {
1074 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1075 0, 0xffffffff,
1076 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1077 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1078 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1079 env->eip = (uint32_t)ECX;
1080 }
1081 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1082 0, 0xffffffff,
1083 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1084 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085 DESC_W_MASK | DESC_A_MASK);
1086 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1087 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1088 cpu_x86_set_cpl(env, 3);
1089 } else
1090#endif
1091 {
1092 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1093 0, 0xffffffff,
1094 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1097 env->eip = (uint32_t)ECX;
1098 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1099 0, 0xffffffff,
1100 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102 DESC_W_MASK | DESC_A_MASK);
1103 env->eflags |= IF_MASK;
1104 cpu_x86_set_cpl(env, 3);
1105 }
1106#ifdef USE_KQEMU
1107 if (kqemu_is_ok(env)) {
1108 if (env->hflags & HF_LMA_MASK)
1109 CC_OP = CC_OP_EFLAGS;
1110 env->exception_index = -1;
1111 cpu_loop_exit();
1112 }
1113#endif
1114}
1115
1116/* real mode interrupt */
1117static void do_interrupt_real(int intno, int is_int, int error_code,
1118 unsigned int next_eip)
1119{
1120 SegmentCache *dt;
1121 target_ulong ptr, ssp;
1122 int selector;
1123 uint32_t offset, esp;
1124 uint32_t old_cs, old_eip;
eaa728ee 1125
eaa728ee
FB
1126 /* real mode (simpler !) */
1127 dt = &env->idt;
1128 if (intno * 4 + 3 > dt->limit)
1129 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130 ptr = dt->base + intno * 4;
1131 offset = lduw_kernel(ptr);
1132 selector = lduw_kernel(ptr + 2);
1133 esp = ESP;
1134 ssp = env->segs[R_SS].base;
1135 if (is_int)
1136 old_eip = next_eip;
1137 else
1138 old_eip = env->eip;
1139 old_cs = env->segs[R_CS].selector;
1140 /* XXX: use SS segment size ? */
1141 PUSHW(ssp, esp, 0xffff, compute_eflags());
1142 PUSHW(ssp, esp, 0xffff, old_cs);
1143 PUSHW(ssp, esp, 0xffff, old_eip);
1144
1145 /* update processor state */
1146 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147 env->eip = offset;
1148 env->segs[R_CS].selector = selector;
1149 env->segs[R_CS].base = (selector << 4);
1150 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1151}
1152
1153/* fake user mode interrupt */
1154void do_interrupt_user(int intno, int is_int, int error_code,
1155 target_ulong next_eip)
1156{
1157 SegmentCache *dt;
1158 target_ulong ptr;
1159 int dpl, cpl, shift;
1160 uint32_t e2;
1161
1162 dt = &env->idt;
1163 if (env->hflags & HF_LMA_MASK) {
1164 shift = 4;
1165 } else {
1166 shift = 3;
1167 }
1168 ptr = dt->base + (intno << shift);
1169 e2 = ldl_kernel(ptr + 4);
1170
1171 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1173 /* check privilege if software int */
eaa728ee
FB
1174 if (is_int && dpl < cpl)
1175 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1176
1177 /* Since we emulate only user space, we cannot do more than
1178 exiting the emulation with the suitable exception and error
1179 code */
1180 if (is_int)
1181 EIP = next_eip;
1182}
1183
1184/*
1185 * Begin execution of an interruption. is_int is TRUE if coming from
1186 * the int instruction. next_eip is the EIP value AFTER the interrupt
1187 * instruction. It is only relevant if is_int is TRUE.
1188 */
1189void do_interrupt(int intno, int is_int, int error_code,
1190 target_ulong next_eip, int is_hw)
1191{
1192 if (loglevel & CPU_LOG_INT) {
1193 if ((env->cr[0] & CR0_PE_MASK)) {
1194 static int count;
1195 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1196 count, intno, error_code, is_int,
1197 env->hflags & HF_CPL_MASK,
1198 env->segs[R_CS].selector, EIP,
1199 (int)env->segs[R_CS].base + EIP,
1200 env->segs[R_SS].selector, ESP);
1201 if (intno == 0x0e) {
1202 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1203 } else {
1204 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1205 }
1206 fprintf(logfile, "\n");
1207 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1208#if 0
1209 {
1210 int i;
1211 uint8_t *ptr;
1212 fprintf(logfile, " code=");
1213 ptr = env->segs[R_CS].base + env->eip;
1214 for(i = 0; i < 16; i++) {
1215 fprintf(logfile, " %02x", ldub(ptr + i));
1216 }
1217 fprintf(logfile, "\n");
1218 }
1219#endif
1220 count++;
1221 }
1222 }
1223 if (env->cr[0] & CR0_PE_MASK) {
eb38c52c 1224#ifdef TARGET_X86_64
eaa728ee
FB
1225 if (env->hflags & HF_LMA_MASK) {
1226 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1227 } else
1228#endif
1229 {
1230 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1231 }
1232 } else {
1233 do_interrupt_real(intno, is_int, error_code, next_eip);
1234 }
1235}
1236
1237/*
1238 * Check nested exceptions and change to double or triple fault if
1239 * needed. It should only be called, if this is not an interrupt.
1240 * Returns the new exception number.
1241 */
1242static int check_exception(int intno, int *error_code)
1243{
1244 int first_contributory = env->old_exception == 0 ||
1245 (env->old_exception >= 10 &&
1246 env->old_exception <= 13);
1247 int second_contributory = intno == 0 ||
1248 (intno >= 10 && intno <= 13);
1249
1250 if (loglevel & CPU_LOG_INT)
1251 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1252 env->old_exception, intno);
1253
1254 if (env->old_exception == EXCP08_DBLE)
1255 cpu_abort(env, "triple fault");
1256
1257 if ((first_contributory && second_contributory)
1258 || (env->old_exception == EXCP0E_PAGE &&
1259 (second_contributory || (intno == EXCP0E_PAGE)))) {
1260 intno = EXCP08_DBLE;
1261 *error_code = 0;
1262 }
1263
1264 if (second_contributory || (intno == EXCP0E_PAGE) ||
1265 (intno == EXCP08_DBLE))
1266 env->old_exception = intno;
1267
1268 return intno;
1269}
1270
1271/*
1272 * Signal an interruption. It is executed in the main CPU loop.
1273 * is_int is TRUE if coming from the int instruction. next_eip is the
1274 * EIP value AFTER the interrupt instruction. It is only relevant if
1275 * is_int is TRUE.
1276 */
1277void raise_interrupt(int intno, int is_int, int error_code,
1278 int next_eip_addend)
1279{
1280 if (!is_int) {
1281 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1282 intno = check_exception(intno, &error_code);
872929aa
FB
1283 } else {
1284 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1285 }
1286
1287 env->exception_index = intno;
1288 env->error_code = error_code;
1289 env->exception_is_int = is_int;
1290 env->exception_next_eip = env->eip + next_eip_addend;
1291 cpu_loop_exit();
1292}
1293
eaa728ee
FB
1294/* shortcuts to generate exceptions */
1295
1296void (raise_exception_err)(int exception_index, int error_code)
1297{
1298 raise_interrupt(exception_index, 0, error_code, 0);
1299}
1300
1301void raise_exception(int exception_index)
1302{
1303 raise_interrupt(exception_index, 0, 0, 0);
1304}
1305
1306/* SMM support */
1307
1308#if defined(CONFIG_USER_ONLY)
1309
1310void do_smm_enter(void)
1311{
1312}
1313
1314void helper_rsm(void)
1315{
1316}
1317
1318#else
1319
1320#ifdef TARGET_X86_64
1321#define SMM_REVISION_ID 0x00020064
1322#else
1323#define SMM_REVISION_ID 0x00020000
1324#endif
1325
1326void do_smm_enter(void)
1327{
1328 target_ulong sm_state;
1329 SegmentCache *dt;
1330 int i, offset;
1331
1332 if (loglevel & CPU_LOG_INT) {
1333 fprintf(logfile, "SMM: enter\n");
1334 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1335 }
1336
1337 env->hflags |= HF_SMM_MASK;
1338 cpu_smm_update(env);
1339
1340 sm_state = env->smbase + 0x8000;
1341
1342#ifdef TARGET_X86_64
1343 for(i = 0; i < 6; i++) {
1344 dt = &env->segs[i];
1345 offset = 0x7e00 + i * 16;
1346 stw_phys(sm_state + offset, dt->selector);
1347 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1348 stl_phys(sm_state + offset + 4, dt->limit);
1349 stq_phys(sm_state + offset + 8, dt->base);
1350 }
1351
1352 stq_phys(sm_state + 0x7e68, env->gdt.base);
1353 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1354
1355 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1356 stq_phys(sm_state + 0x7e78, env->ldt.base);
1357 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1358 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1359
1360 stq_phys(sm_state + 0x7e88, env->idt.base);
1361 stl_phys(sm_state + 0x7e84, env->idt.limit);
1362
1363 stw_phys(sm_state + 0x7e90, env->tr.selector);
1364 stq_phys(sm_state + 0x7e98, env->tr.base);
1365 stl_phys(sm_state + 0x7e94, env->tr.limit);
1366 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1367
1368 stq_phys(sm_state + 0x7ed0, env->efer);
1369
1370 stq_phys(sm_state + 0x7ff8, EAX);
1371 stq_phys(sm_state + 0x7ff0, ECX);
1372 stq_phys(sm_state + 0x7fe8, EDX);
1373 stq_phys(sm_state + 0x7fe0, EBX);
1374 stq_phys(sm_state + 0x7fd8, ESP);
1375 stq_phys(sm_state + 0x7fd0, EBP);
1376 stq_phys(sm_state + 0x7fc8, ESI);
1377 stq_phys(sm_state + 0x7fc0, EDI);
1378 for(i = 8; i < 16; i++)
1379 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1380 stq_phys(sm_state + 0x7f78, env->eip);
1381 stl_phys(sm_state + 0x7f70, compute_eflags());
1382 stl_phys(sm_state + 0x7f68, env->dr[6]);
1383 stl_phys(sm_state + 0x7f60, env->dr[7]);
1384
1385 stl_phys(sm_state + 0x7f48, env->cr[4]);
1386 stl_phys(sm_state + 0x7f50, env->cr[3]);
1387 stl_phys(sm_state + 0x7f58, env->cr[0]);
1388
1389 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1390 stl_phys(sm_state + 0x7f00, env->smbase);
1391#else
1392 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1393 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1394 stl_phys(sm_state + 0x7ff4, compute_eflags());
1395 stl_phys(sm_state + 0x7ff0, env->eip);
1396 stl_phys(sm_state + 0x7fec, EDI);
1397 stl_phys(sm_state + 0x7fe8, ESI);
1398 stl_phys(sm_state + 0x7fe4, EBP);
1399 stl_phys(sm_state + 0x7fe0, ESP);
1400 stl_phys(sm_state + 0x7fdc, EBX);
1401 stl_phys(sm_state + 0x7fd8, EDX);
1402 stl_phys(sm_state + 0x7fd4, ECX);
1403 stl_phys(sm_state + 0x7fd0, EAX);
1404 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1405 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1406
1407 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1408 stl_phys(sm_state + 0x7f64, env->tr.base);
1409 stl_phys(sm_state + 0x7f60, env->tr.limit);
1410 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1411
1412 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1413 stl_phys(sm_state + 0x7f80, env->ldt.base);
1414 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1415 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1416
1417 stl_phys(sm_state + 0x7f74, env->gdt.base);
1418 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1419
1420 stl_phys(sm_state + 0x7f58, env->idt.base);
1421 stl_phys(sm_state + 0x7f54, env->idt.limit);
1422
1423 for(i = 0; i < 6; i++) {
1424 dt = &env->segs[i];
1425 if (i < 3)
1426 offset = 0x7f84 + i * 12;
1427 else
1428 offset = 0x7f2c + (i - 3) * 12;
1429 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1430 stl_phys(sm_state + offset + 8, dt->base);
1431 stl_phys(sm_state + offset + 4, dt->limit);
1432 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1433 }
1434 stl_phys(sm_state + 0x7f14, env->cr[4]);
1435
1436 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1437 stl_phys(sm_state + 0x7ef8, env->smbase);
1438#endif
1439 /* init SMM cpu state */
1440
1441#ifdef TARGET_X86_64
5efc27bb 1442 cpu_load_efer(env, 0);
eaa728ee
FB
1443#endif
1444 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1445 env->eip = 0x00008000;
1446 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1447 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1449 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1450 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1451 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1452 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1453
1454 cpu_x86_update_cr0(env,
1455 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1456 cpu_x86_update_cr4(env, 0);
1457 env->dr[7] = 0x00000400;
1458 CC_OP = CC_OP_EFLAGS;
1459}
1460
1461void helper_rsm(void)
1462{
1463 target_ulong sm_state;
1464 int i, offset;
1465 uint32_t val;
1466
1467 sm_state = env->smbase + 0x8000;
1468#ifdef TARGET_X86_64
5efc27bb 1469 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1470
1471 for(i = 0; i < 6; i++) {
1472 offset = 0x7e00 + i * 16;
1473 cpu_x86_load_seg_cache(env, i,
1474 lduw_phys(sm_state + offset),
1475 ldq_phys(sm_state + offset + 8),
1476 ldl_phys(sm_state + offset + 4),
1477 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1478 }
1479
1480 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1481 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1482
1483 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1484 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1485 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1486 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1487
1488 env->idt.base = ldq_phys(sm_state + 0x7e88);
1489 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1490
1491 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1492 env->tr.base = ldq_phys(sm_state + 0x7e98);
1493 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1494 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1495
1496 EAX = ldq_phys(sm_state + 0x7ff8);
1497 ECX = ldq_phys(sm_state + 0x7ff0);
1498 EDX = ldq_phys(sm_state + 0x7fe8);
1499 EBX = ldq_phys(sm_state + 0x7fe0);
1500 ESP = ldq_phys(sm_state + 0x7fd8);
1501 EBP = ldq_phys(sm_state + 0x7fd0);
1502 ESI = ldq_phys(sm_state + 0x7fc8);
1503 EDI = ldq_phys(sm_state + 0x7fc0);
1504 for(i = 8; i < 16; i++)
1505 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1506 env->eip = ldq_phys(sm_state + 0x7f78);
1507 load_eflags(ldl_phys(sm_state + 0x7f70),
1508 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1509 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1510 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1511
1512 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1513 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1514 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1515
1516 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1517 if (val & 0x20000) {
1518 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1519 }
1520#else
1521 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1522 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1523 load_eflags(ldl_phys(sm_state + 0x7ff4),
1524 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1525 env->eip = ldl_phys(sm_state + 0x7ff0);
1526 EDI = ldl_phys(sm_state + 0x7fec);
1527 ESI = ldl_phys(sm_state + 0x7fe8);
1528 EBP = ldl_phys(sm_state + 0x7fe4);
1529 ESP = ldl_phys(sm_state + 0x7fe0);
1530 EBX = ldl_phys(sm_state + 0x7fdc);
1531 EDX = ldl_phys(sm_state + 0x7fd8);
1532 ECX = ldl_phys(sm_state + 0x7fd4);
1533 EAX = ldl_phys(sm_state + 0x7fd0);
1534 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1535 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1536
1537 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1538 env->tr.base = ldl_phys(sm_state + 0x7f64);
1539 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1540 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1541
1542 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1543 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1544 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1545 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1546
1547 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1548 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1549
1550 env->idt.base = ldl_phys(sm_state + 0x7f58);
1551 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1552
1553 for(i = 0; i < 6; i++) {
1554 if (i < 3)
1555 offset = 0x7f84 + i * 12;
1556 else
1557 offset = 0x7f2c + (i - 3) * 12;
1558 cpu_x86_load_seg_cache(env, i,
1559 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1560 ldl_phys(sm_state + offset + 8),
1561 ldl_phys(sm_state + offset + 4),
1562 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1563 }
1564 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1565
1566 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1567 if (val & 0x20000) {
1568 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1569 }
1570#endif
1571 CC_OP = CC_OP_EFLAGS;
1572 env->hflags &= ~HF_SMM_MASK;
1573 cpu_smm_update(env);
1574
1575 if (loglevel & CPU_LOG_INT) {
1576 fprintf(logfile, "SMM: after RSM\n");
1577 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1578 }
1579}
1580
1581#endif /* !CONFIG_USER_ONLY */
1582
1583
1584/* division, flags are undefined */
1585
1586void helper_divb_AL(target_ulong t0)
1587{
1588 unsigned int num, den, q, r;
1589
1590 num = (EAX & 0xffff);
1591 den = (t0 & 0xff);
1592 if (den == 0) {
1593 raise_exception(EXCP00_DIVZ);
1594 }
1595 q = (num / den);
1596 if (q > 0xff)
1597 raise_exception(EXCP00_DIVZ);
1598 q &= 0xff;
1599 r = (num % den) & 0xff;
1600 EAX = (EAX & ~0xffff) | (r << 8) | q;
1601}
1602
1603void helper_idivb_AL(target_ulong t0)
1604{
1605 int num, den, q, r;
1606
1607 num = (int16_t)EAX;
1608 den = (int8_t)t0;
1609 if (den == 0) {
1610 raise_exception(EXCP00_DIVZ);
1611 }
1612 q = (num / den);
1613 if (q != (int8_t)q)
1614 raise_exception(EXCP00_DIVZ);
1615 q &= 0xff;
1616 r = (num % den) & 0xff;
1617 EAX = (EAX & ~0xffff) | (r << 8) | q;
1618}
1619
1620void helper_divw_AX(target_ulong t0)
1621{
1622 unsigned int num, den, q, r;
1623
1624 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1625 den = (t0 & 0xffff);
1626 if (den == 0) {
1627 raise_exception(EXCP00_DIVZ);
1628 }
1629 q = (num / den);
1630 if (q > 0xffff)
1631 raise_exception(EXCP00_DIVZ);
1632 q &= 0xffff;
1633 r = (num % den) & 0xffff;
1634 EAX = (EAX & ~0xffff) | q;
1635 EDX = (EDX & ~0xffff) | r;
1636}
1637
1638void helper_idivw_AX(target_ulong t0)
1639{
1640 int num, den, q, r;
1641
1642 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1643 den = (int16_t)t0;
1644 if (den == 0) {
1645 raise_exception(EXCP00_DIVZ);
1646 }
1647 q = (num / den);
1648 if (q != (int16_t)q)
1649 raise_exception(EXCP00_DIVZ);
1650 q &= 0xffff;
1651 r = (num % den) & 0xffff;
1652 EAX = (EAX & ~0xffff) | q;
1653 EDX = (EDX & ~0xffff) | r;
1654}
1655
1656void helper_divl_EAX(target_ulong t0)
1657{
1658 unsigned int den, r;
1659 uint64_t num, q;
1660
1661 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1662 den = t0;
1663 if (den == 0) {
1664 raise_exception(EXCP00_DIVZ);
1665 }
1666 q = (num / den);
1667 r = (num % den);
1668 if (q > 0xffffffff)
1669 raise_exception(EXCP00_DIVZ);
1670 EAX = (uint32_t)q;
1671 EDX = (uint32_t)r;
1672}
1673
1674void helper_idivl_EAX(target_ulong t0)
1675{
1676 int den, r;
1677 int64_t num, q;
1678
1679 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1680 den = t0;
1681 if (den == 0) {
1682 raise_exception(EXCP00_DIVZ);
1683 }
1684 q = (num / den);
1685 r = (num % den);
1686 if (q != (int32_t)q)
1687 raise_exception(EXCP00_DIVZ);
1688 EAX = (uint32_t)q;
1689 EDX = (uint32_t)r;
1690}
1691
1692/* bcd */
1693
1694/* XXX: exception */
1695void helper_aam(int base)
1696{
1697 int al, ah;
1698 al = EAX & 0xff;
1699 ah = al / base;
1700 al = al % base;
1701 EAX = (EAX & ~0xffff) | al | (ah << 8);
1702 CC_DST = al;
1703}
1704
1705void helper_aad(int base)
1706{
1707 int al, ah;
1708 al = EAX & 0xff;
1709 ah = (EAX >> 8) & 0xff;
1710 al = ((ah * base) + al) & 0xff;
1711 EAX = (EAX & ~0xffff) | al;
1712 CC_DST = al;
1713}
1714
1715void helper_aaa(void)
1716{
1717 int icarry;
1718 int al, ah, af;
1719 int eflags;
1720
1721 eflags = cc_table[CC_OP].compute_all();
1722 af = eflags & CC_A;
1723 al = EAX & 0xff;
1724 ah = (EAX >> 8) & 0xff;
1725
1726 icarry = (al > 0xf9);
1727 if (((al & 0x0f) > 9 ) || af) {
1728 al = (al + 6) & 0x0f;
1729 ah = (ah + 1 + icarry) & 0xff;
1730 eflags |= CC_C | CC_A;
1731 } else {
1732 eflags &= ~(CC_C | CC_A);
1733 al &= 0x0f;
1734 }
1735 EAX = (EAX & ~0xffff) | al | (ah << 8);
1736 CC_SRC = eflags;
1737 FORCE_RET();
1738}
1739
1740void helper_aas(void)
1741{
1742 int icarry;
1743 int al, ah, af;
1744 int eflags;
1745
1746 eflags = cc_table[CC_OP].compute_all();
1747 af = eflags & CC_A;
1748 al = EAX & 0xff;
1749 ah = (EAX >> 8) & 0xff;
1750
1751 icarry = (al < 6);
1752 if (((al & 0x0f) > 9 ) || af) {
1753 al = (al - 6) & 0x0f;
1754 ah = (ah - 1 - icarry) & 0xff;
1755 eflags |= CC_C | CC_A;
1756 } else {
1757 eflags &= ~(CC_C | CC_A);
1758 al &= 0x0f;
1759 }
1760 EAX = (EAX & ~0xffff) | al | (ah << 8);
1761 CC_SRC = eflags;
1762 FORCE_RET();
1763}
1764
1765void helper_daa(void)
1766{
1767 int al, af, cf;
1768 int eflags;
1769
1770 eflags = cc_table[CC_OP].compute_all();
1771 cf = eflags & CC_C;
1772 af = eflags & CC_A;
1773 al = EAX & 0xff;
1774
1775 eflags = 0;
1776 if (((al & 0x0f) > 9 ) || af) {
1777 al = (al + 6) & 0xff;
1778 eflags |= CC_A;
1779 }
1780 if ((al > 0x9f) || cf) {
1781 al = (al + 0x60) & 0xff;
1782 eflags |= CC_C;
1783 }
1784 EAX = (EAX & ~0xff) | al;
1785 /* well, speed is not an issue here, so we compute the flags by hand */
1786 eflags |= (al == 0) << 6; /* zf */
1787 eflags |= parity_table[al]; /* pf */
1788 eflags |= (al & 0x80); /* sf */
1789 CC_SRC = eflags;
1790 FORCE_RET();
1791}
1792
1793void helper_das(void)
1794{
1795 int al, al1, af, cf;
1796 int eflags;
1797
1798 eflags = cc_table[CC_OP].compute_all();
1799 cf = eflags & CC_C;
1800 af = eflags & CC_A;
1801 al = EAX & 0xff;
1802
1803 eflags = 0;
1804 al1 = al;
1805 if (((al & 0x0f) > 9 ) || af) {
1806 eflags |= CC_A;
1807 if (al < 6 || cf)
1808 eflags |= CC_C;
1809 al = (al - 6) & 0xff;
1810 }
1811 if ((al1 > 0x99) || cf) {
1812 al = (al - 0x60) & 0xff;
1813 eflags |= CC_C;
1814 }
1815 EAX = (EAX & ~0xff) | al;
1816 /* well, speed is not an issue here, so we compute the flags by hand */
1817 eflags |= (al == 0) << 6; /* zf */
1818 eflags |= parity_table[al]; /* pf */
1819 eflags |= (al & 0x80); /* sf */
1820 CC_SRC = eflags;
1821 FORCE_RET();
1822}
1823
1824void helper_into(int next_eip_addend)
1825{
1826 int eflags;
1827 eflags = cc_table[CC_OP].compute_all();
1828 if (eflags & CC_O) {
1829 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1830 }
1831}
1832
1833void helper_cmpxchg8b(target_ulong a0)
1834{
1835 uint64_t d;
1836 int eflags;
1837
1838 eflags = cc_table[CC_OP].compute_all();
1839 d = ldq(a0);
1840 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1841 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1842 eflags |= CC_Z;
1843 } else {
278ed7c3
FB
1844 /* always do the store */
1845 stq(a0, d);
eaa728ee
FB
1846 EDX = (uint32_t)(d >> 32);
1847 EAX = (uint32_t)d;
1848 eflags &= ~CC_Z;
1849 }
1850 CC_SRC = eflags;
1851}
1852
1853#ifdef TARGET_X86_64
1854void helper_cmpxchg16b(target_ulong a0)
1855{
1856 uint64_t d0, d1;
1857 int eflags;
1858
278ed7c3
FB
1859 if ((a0 & 0xf) != 0)
1860 raise_exception(EXCP0D_GPF);
eaa728ee
FB
1861 eflags = cc_table[CC_OP].compute_all();
1862 d0 = ldq(a0);
1863 d1 = ldq(a0 + 8);
1864 if (d0 == EAX && d1 == EDX) {
1865 stq(a0, EBX);
1866 stq(a0 + 8, ECX);
1867 eflags |= CC_Z;
1868 } else {
278ed7c3
FB
1869 /* always do the store */
1870 stq(a0, d0);
1871 stq(a0 + 8, d1);
eaa728ee
FB
1872 EDX = d1;
1873 EAX = d0;
1874 eflags &= ~CC_Z;
1875 }
1876 CC_SRC = eflags;
1877}
1878#endif
1879
1880void helper_single_step(void)
1881{
1882 env->dr[6] |= 0x4000;
1883 raise_exception(EXCP01_SSTP);
1884}
1885
1886void helper_cpuid(void)
1887{
1888 uint32_t index;
eaa728ee 1889
872929aa
FB
1890 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1891
1892 index = (uint32_t)EAX;
eaa728ee
FB
1893 /* test if maximum index reached */
1894 if (index & 0x80000000) {
1895 if (index > env->cpuid_xlevel)
1896 index = env->cpuid_level;
1897 } else {
1898 if (index > env->cpuid_level)
1899 index = env->cpuid_level;
1900 }
1901
1902 switch(index) {
1903 case 0:
1904 EAX = env->cpuid_level;
1905 EBX = env->cpuid_vendor1;
1906 EDX = env->cpuid_vendor2;
1907 ECX = env->cpuid_vendor3;
1908 break;
1909 case 1:
1910 EAX = env->cpuid_version;
1911 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1912 ECX = env->cpuid_ext_features;
1913 EDX = env->cpuid_features;
1914 break;
1915 case 2:
1916 /* cache info: needed for Pentium Pro compatibility */
1917 EAX = 1;
1918 EBX = 0;
1919 ECX = 0;
1920 EDX = 0x2c307d;
1921 break;
e737b32a
AZ
1922 case 4:
1923 /* cache info: needed for Core compatibility */
1924 switch (ECX) {
1925 case 0: /* L1 dcache info */
1926 EAX = 0x0000121;
1927 EBX = 0x1c0003f;
1928 ECX = 0x000003f;
1929 EDX = 0x0000001;
1930 break;
1931 case 1: /* L1 icache info */
1932 EAX = 0x0000122;
1933 EBX = 0x1c0003f;
1934 ECX = 0x000003f;
1935 EDX = 0x0000001;
1936 break;
1937 case 2: /* L2 cache info */
1938 EAX = 0x0000143;
1939 EBX = 0x3c0003f;
1940 ECX = 0x0000fff;
1941 EDX = 0x0000001;
1942 break;
1943 default: /* end of info */
1944 EAX = 0;
1945 EBX = 0;
1946 ECX = 0;
1947 EDX = 0;
1948 break;
1949 }
1950
1951 break;
1952 case 5:
1953 /* mwait info: needed for Core compatibility */
1954 EAX = 0; /* Smallest monitor-line size in bytes */
1955 EBX = 0; /* Largest monitor-line size in bytes */
1956 ECX = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
1957 EDX = 0;
1958 break;
eaa728ee
FB
1959 case 0x80000000:
1960 EAX = env->cpuid_xlevel;
1961 EBX = env->cpuid_vendor1;
1962 EDX = env->cpuid_vendor2;
1963 ECX = env->cpuid_vendor3;
1964 break;
1965 case 0x80000001:
1966 EAX = env->cpuid_features;
1967 EBX = 0;
1968 ECX = env->cpuid_ext3_features;
1969 EDX = env->cpuid_ext2_features;
1970 break;
1971 case 0x80000002:
1972 case 0x80000003:
1973 case 0x80000004:
1974 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1975 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1976 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1977 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1978 break;
1979 case 0x80000005:
1980 /* cache info (L1 cache) */
1981 EAX = 0x01ff01ff;
1982 EBX = 0x01ff01ff;
1983 ECX = 0x40020140;
1984 EDX = 0x40020140;
1985 break;
1986 case 0x80000006:
1987 /* cache info (L2 cache) */
1988 EAX = 0;
1989 EBX = 0x42004200;
1990 ECX = 0x02008140;
1991 EDX = 0;
1992 break;
1993 case 0x80000008:
1994 /* virtual & phys address size in low 2 bytes. */
1995/* XXX: This value must match the one used in the MMU code. */
da260249
FB
1996 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1997 /* 64 bit processor */
1998#if defined(USE_KQEMU)
1999 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
2000#else
eaa728ee 2001/* XXX: The physical address space is limited to 42 bits in exec.c. */
da260249
FB
2002 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
2003#endif
2004 } else {
2005#if defined(USE_KQEMU)
2006 EAX = 0x00000020; /* 32 bits physical */
eaa728ee 2007#else
da260249 2008 EAX = 0x00000024; /* 36 bits physical */
eaa728ee 2009#endif
da260249 2010 }
eaa728ee
FB
2011 EBX = 0;
2012 ECX = 0;
2013 EDX = 0;
2014 break;
2015 case 0x8000000A:
2016 EAX = 0x00000001;
2017 EBX = 0;
2018 ECX = 0;
2019 EDX = 0;
2020 break;
2021 default:
2022 /* reserved values: zero */
2023 EAX = 0;
2024 EBX = 0;
2025 ECX = 0;
2026 EDX = 0;
2027 break;
2028 }
2029}
2030
2031void helper_enter_level(int level, int data32, target_ulong t1)
2032{
2033 target_ulong ssp;
2034 uint32_t esp_mask, esp, ebp;
2035
2036 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2037 ssp = env->segs[R_SS].base;
2038 ebp = EBP;
2039 esp = ESP;
2040 if (data32) {
2041 /* 32 bit */
2042 esp -= 4;
2043 while (--level) {
2044 esp -= 4;
2045 ebp -= 4;
2046 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2047 }
2048 esp -= 4;
2049 stl(ssp + (esp & esp_mask), t1);
2050 } else {
2051 /* 16 bit */
2052 esp -= 2;
2053 while (--level) {
2054 esp -= 2;
2055 ebp -= 2;
2056 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2057 }
2058 esp -= 2;
2059 stw(ssp + (esp & esp_mask), t1);
2060 }
2061}
2062
2063#ifdef TARGET_X86_64
2064void helper_enter64_level(int level, int data64, target_ulong t1)
2065{
2066 target_ulong esp, ebp;
2067 ebp = EBP;
2068 esp = ESP;
2069
2070 if (data64) {
2071 /* 64 bit */
2072 esp -= 8;
2073 while (--level) {
2074 esp -= 8;
2075 ebp -= 8;
2076 stq(esp, ldq(ebp));
2077 }
2078 esp -= 8;
2079 stq(esp, t1);
2080 } else {
2081 /* 16 bit */
2082 esp -= 2;
2083 while (--level) {
2084 esp -= 2;
2085 ebp -= 2;
2086 stw(esp, lduw(ebp));
2087 }
2088 esp -= 2;
2089 stw(esp, t1);
2090 }
2091}
2092#endif
2093
2094void helper_lldt(int selector)
2095{
2096 SegmentCache *dt;
2097 uint32_t e1, e2;
2098 int index, entry_limit;
2099 target_ulong ptr;
2100
2101 selector &= 0xffff;
2102 if ((selector & 0xfffc) == 0) {
2103 /* XXX: NULL selector case: invalid LDT */
2104 env->ldt.base = 0;
2105 env->ldt.limit = 0;
2106 } else {
2107 if (selector & 0x4)
2108 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2109 dt = &env->gdt;
2110 index = selector & ~7;
2111#ifdef TARGET_X86_64
2112 if (env->hflags & HF_LMA_MASK)
2113 entry_limit = 15;
2114 else
2115#endif
2116 entry_limit = 7;
2117 if ((index + entry_limit) > dt->limit)
2118 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119 ptr = dt->base + index;
2120 e1 = ldl_kernel(ptr);
2121 e2 = ldl_kernel(ptr + 4);
2122 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2123 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2124 if (!(e2 & DESC_P_MASK))
2125 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2126#ifdef TARGET_X86_64
2127 if (env->hflags & HF_LMA_MASK) {
2128 uint32_t e3;
2129 e3 = ldl_kernel(ptr + 8);
2130 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2131 env->ldt.base |= (target_ulong)e3 << 32;
2132 } else
2133#endif
2134 {
2135 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2136 }
2137 }
2138 env->ldt.selector = selector;
2139}
2140
2141void helper_ltr(int selector)
2142{
2143 SegmentCache *dt;
2144 uint32_t e1, e2;
2145 int index, type, entry_limit;
2146 target_ulong ptr;
2147
2148 selector &= 0xffff;
2149 if ((selector & 0xfffc) == 0) {
2150 /* NULL selector case: invalid TR */
2151 env->tr.base = 0;
2152 env->tr.limit = 0;
2153 env->tr.flags = 0;
2154 } else {
2155 if (selector & 0x4)
2156 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2157 dt = &env->gdt;
2158 index = selector & ~7;
2159#ifdef TARGET_X86_64
2160 if (env->hflags & HF_LMA_MASK)
2161 entry_limit = 15;
2162 else
2163#endif
2164 entry_limit = 7;
2165 if ((index + entry_limit) > dt->limit)
2166 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2167 ptr = dt->base + index;
2168 e1 = ldl_kernel(ptr);
2169 e2 = ldl_kernel(ptr + 4);
2170 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2171 if ((e2 & DESC_S_MASK) ||
2172 (type != 1 && type != 9))
2173 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2174 if (!(e2 & DESC_P_MASK))
2175 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2176#ifdef TARGET_X86_64
2177 if (env->hflags & HF_LMA_MASK) {
2178 uint32_t e3, e4;
2179 e3 = ldl_kernel(ptr + 8);
2180 e4 = ldl_kernel(ptr + 12);
2181 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2182 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2183 load_seg_cache_raw_dt(&env->tr, e1, e2);
2184 env->tr.base |= (target_ulong)e3 << 32;
2185 } else
2186#endif
2187 {
2188 load_seg_cache_raw_dt(&env->tr, e1, e2);
2189 }
2190 e2 |= DESC_TSS_BUSY_MASK;
2191 stl_kernel(ptr + 4, e2);
2192 }
2193 env->tr.selector = selector;
2194}
2195
2196/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2197void helper_load_seg(int seg_reg, int selector)
2198{
2199 uint32_t e1, e2;
2200 int cpl, dpl, rpl;
2201 SegmentCache *dt;
2202 int index;
2203 target_ulong ptr;
2204
2205 selector &= 0xffff;
2206 cpl = env->hflags & HF_CPL_MASK;
2207 if ((selector & 0xfffc) == 0) {
2208 /* null selector case */
2209 if (seg_reg == R_SS
2210#ifdef TARGET_X86_64
2211 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2212#endif
2213 )
2214 raise_exception_err(EXCP0D_GPF, 0);
2215 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2216 } else {
2217
2218 if (selector & 0x4)
2219 dt = &env->ldt;
2220 else
2221 dt = &env->gdt;
2222 index = selector & ~7;
2223 if ((index + 7) > dt->limit)
2224 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2225 ptr = dt->base + index;
2226 e1 = ldl_kernel(ptr);
2227 e2 = ldl_kernel(ptr + 4);
2228
2229 if (!(e2 & DESC_S_MASK))
2230 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2231 rpl = selector & 3;
2232 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2233 if (seg_reg == R_SS) {
2234 /* must be writable segment */
2235 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2236 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2237 if (rpl != cpl || dpl != cpl)
2238 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2239 } else {
2240 /* must be readable segment */
2241 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2242 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2243
2244 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2245 /* if not conforming code, test rights */
2246 if (dpl < cpl || dpl < rpl)
2247 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2248 }
2249 }
2250
2251 if (!(e2 & DESC_P_MASK)) {
2252 if (seg_reg == R_SS)
2253 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2254 else
2255 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2256 }
2257
2258 /* set the access bit if not already set */
2259 if (!(e2 & DESC_A_MASK)) {
2260 e2 |= DESC_A_MASK;
2261 stl_kernel(ptr + 4, e2);
2262 }
2263
2264 cpu_x86_load_seg_cache(env, seg_reg, selector,
2265 get_seg_base(e1, e2),
2266 get_seg_limit(e1, e2),
2267 e2);
2268#if 0
2269 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2270 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2271#endif
2272 }
2273}
2274
2275/* protected mode jump */
2276void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2277 int next_eip_addend)
2278{
2279 int gate_cs, type;
2280 uint32_t e1, e2, cpl, dpl, rpl, limit;
2281 target_ulong next_eip;
2282
2283 if ((new_cs & 0xfffc) == 0)
2284 raise_exception_err(EXCP0D_GPF, 0);
2285 if (load_segment(&e1, &e2, new_cs) != 0)
2286 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2287 cpl = env->hflags & HF_CPL_MASK;
2288 if (e2 & DESC_S_MASK) {
2289 if (!(e2 & DESC_CS_MASK))
2290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2291 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2292 if (e2 & DESC_C_MASK) {
2293 /* conforming code segment */
2294 if (dpl > cpl)
2295 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2296 } else {
2297 /* non conforming code segment */
2298 rpl = new_cs & 3;
2299 if (rpl > cpl)
2300 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2301 if (dpl != cpl)
2302 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2303 }
2304 if (!(e2 & DESC_P_MASK))
2305 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2306 limit = get_seg_limit(e1, e2);
2307 if (new_eip > limit &&
2308 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2309 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2310 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2311 get_seg_base(e1, e2), limit, e2);
2312 EIP = new_eip;
2313 } else {
2314 /* jump to call or task gate */
2315 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2316 rpl = new_cs & 3;
2317 cpl = env->hflags & HF_CPL_MASK;
2318 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2319 switch(type) {
2320 case 1: /* 286 TSS */
2321 case 9: /* 386 TSS */
2322 case 5: /* task gate */
2323 if (dpl < cpl || dpl < rpl)
2324 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2325 next_eip = env->eip + next_eip_addend;
2326 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2327 CC_OP = CC_OP_EFLAGS;
2328 break;
2329 case 4: /* 286 call gate */
2330 case 12: /* 386 call gate */
2331 if ((dpl < cpl) || (dpl < rpl))
2332 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2333 if (!(e2 & DESC_P_MASK))
2334 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2335 gate_cs = e1 >> 16;
2336 new_eip = (e1 & 0xffff);
2337 if (type == 12)
2338 new_eip |= (e2 & 0xffff0000);
2339 if (load_segment(&e1, &e2, gate_cs) != 0)
2340 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2341 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2342 /* must be code segment */
2343 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2344 (DESC_S_MASK | DESC_CS_MASK)))
2345 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2346 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2347 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2348 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2349 if (!(e2 & DESC_P_MASK))
2350 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2351 limit = get_seg_limit(e1, e2);
2352 if (new_eip > limit)
2353 raise_exception_err(EXCP0D_GPF, 0);
2354 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2355 get_seg_base(e1, e2), limit, e2);
2356 EIP = new_eip;
2357 break;
2358 default:
2359 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2360 break;
2361 }
2362 }
2363}
2364
2365/* real mode call */
2366void helper_lcall_real(int new_cs, target_ulong new_eip1,
2367 int shift, int next_eip)
2368{
2369 int new_eip;
2370 uint32_t esp, esp_mask;
2371 target_ulong ssp;
2372
2373 new_eip = new_eip1;
2374 esp = ESP;
2375 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2376 ssp = env->segs[R_SS].base;
2377 if (shift) {
2378 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2379 PUSHL(ssp, esp, esp_mask, next_eip);
2380 } else {
2381 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2382 PUSHW(ssp, esp, esp_mask, next_eip);
2383 }
2384
2385 SET_ESP(esp, esp_mask);
2386 env->eip = new_eip;
2387 env->segs[R_CS].selector = new_cs;
2388 env->segs[R_CS].base = (new_cs << 4);
2389}
2390
2391/* protected mode call */
2392void helper_lcall_protected(int new_cs, target_ulong new_eip,
2393 int shift, int next_eip_addend)
2394{
2395 int new_stack, i;
2396 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2397 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2398 uint32_t val, limit, old_sp_mask;
2399 target_ulong ssp, old_ssp, next_eip;
2400
2401 next_eip = env->eip + next_eip_addend;
2402#ifdef DEBUG_PCALL
2403 if (loglevel & CPU_LOG_PCALL) {
2404 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2405 new_cs, (uint32_t)new_eip, shift);
2406 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2407 }
2408#endif
2409 if ((new_cs & 0xfffc) == 0)
2410 raise_exception_err(EXCP0D_GPF, 0);
2411 if (load_segment(&e1, &e2, new_cs) != 0)
2412 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2413 cpl = env->hflags & HF_CPL_MASK;
2414#ifdef DEBUG_PCALL
2415 if (loglevel & CPU_LOG_PCALL) {
2416 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2417 }
2418#endif
2419 if (e2 & DESC_S_MASK) {
2420 if (!(e2 & DESC_CS_MASK))
2421 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2422 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2423 if (e2 & DESC_C_MASK) {
2424 /* conforming code segment */
2425 if (dpl > cpl)
2426 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2427 } else {
2428 /* non conforming code segment */
2429 rpl = new_cs & 3;
2430 if (rpl > cpl)
2431 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2432 if (dpl != cpl)
2433 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2434 }
2435 if (!(e2 & DESC_P_MASK))
2436 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2437
2438#ifdef TARGET_X86_64
2439 /* XXX: check 16/32 bit cases in long mode */
2440 if (shift == 2) {
2441 target_ulong rsp;
2442 /* 64 bit case */
2443 rsp = ESP;
2444 PUSHQ(rsp, env->segs[R_CS].selector);
2445 PUSHQ(rsp, next_eip);
2446 /* from this point, not restartable */
2447 ESP = rsp;
2448 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2449 get_seg_base(e1, e2),
2450 get_seg_limit(e1, e2), e2);
2451 EIP = new_eip;
2452 } else
2453#endif
2454 {
2455 sp = ESP;
2456 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2457 ssp = env->segs[R_SS].base;
2458 if (shift) {
2459 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2460 PUSHL(ssp, sp, sp_mask, next_eip);
2461 } else {
2462 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2463 PUSHW(ssp, sp, sp_mask, next_eip);
2464 }
2465
2466 limit = get_seg_limit(e1, e2);
2467 if (new_eip > limit)
2468 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2469 /* from this point, not restartable */
2470 SET_ESP(sp, sp_mask);
2471 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2472 get_seg_base(e1, e2), limit, e2);
2473 EIP = new_eip;
2474 }
2475 } else {
2476 /* check gate type */
2477 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2478 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2479 rpl = new_cs & 3;
2480 switch(type) {
2481 case 1: /* available 286 TSS */
2482 case 9: /* available 386 TSS */
2483 case 5: /* task gate */
2484 if (dpl < cpl || dpl < rpl)
2485 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2486 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2487 CC_OP = CC_OP_EFLAGS;
2488 return;
2489 case 4: /* 286 call gate */
2490 case 12: /* 386 call gate */
2491 break;
2492 default:
2493 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2494 break;
2495 }
2496 shift = type >> 3;
2497
2498 if (dpl < cpl || dpl < rpl)
2499 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2500 /* check valid bit */
2501 if (!(e2 & DESC_P_MASK))
2502 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2503 selector = e1 >> 16;
2504 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2505 param_count = e2 & 0x1f;
2506 if ((selector & 0xfffc) == 0)
2507 raise_exception_err(EXCP0D_GPF, 0);
2508
2509 if (load_segment(&e1, &e2, selector) != 0)
2510 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2511 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2512 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2513 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2514 if (dpl > cpl)
2515 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2516 if (!(e2 & DESC_P_MASK))
2517 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2518
2519 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2520 /* to inner privilege */
2521 get_ss_esp_from_tss(&ss, &sp, dpl);
2522#ifdef DEBUG_PCALL
2523 if (loglevel & CPU_LOG_PCALL)
2524 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2525 ss, sp, param_count, ESP);
2526#endif
2527 if ((ss & 0xfffc) == 0)
2528 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2529 if ((ss & 3) != dpl)
2530 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2531 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2532 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2533 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2534 if (ss_dpl != dpl)
2535 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2536 if (!(ss_e2 & DESC_S_MASK) ||
2537 (ss_e2 & DESC_CS_MASK) ||
2538 !(ss_e2 & DESC_W_MASK))
2539 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2540 if (!(ss_e2 & DESC_P_MASK))
2541 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2542
2543 // push_size = ((param_count * 2) + 8) << shift;
2544
2545 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2546 old_ssp = env->segs[R_SS].base;
2547
2548 sp_mask = get_sp_mask(ss_e2);
2549 ssp = get_seg_base(ss_e1, ss_e2);
2550 if (shift) {
2551 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2552 PUSHL(ssp, sp, sp_mask, ESP);
2553 for(i = param_count - 1; i >= 0; i--) {
2554 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2555 PUSHL(ssp, sp, sp_mask, val);
2556 }
2557 } else {
2558 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2559 PUSHW(ssp, sp, sp_mask, ESP);
2560 for(i = param_count - 1; i >= 0; i--) {
2561 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2562 PUSHW(ssp, sp, sp_mask, val);
2563 }
2564 }
2565 new_stack = 1;
2566 } else {
2567 /* to same privilege */
2568 sp = ESP;
2569 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2570 ssp = env->segs[R_SS].base;
2571 // push_size = (4 << shift);
2572 new_stack = 0;
2573 }
2574
2575 if (shift) {
2576 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2577 PUSHL(ssp, sp, sp_mask, next_eip);
2578 } else {
2579 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2580 PUSHW(ssp, sp, sp_mask, next_eip);
2581 }
2582
2583 /* from this point, not restartable */
2584
2585 if (new_stack) {
2586 ss = (ss & ~3) | dpl;
2587 cpu_x86_load_seg_cache(env, R_SS, ss,
2588 ssp,
2589 get_seg_limit(ss_e1, ss_e2),
2590 ss_e2);
2591 }
2592
2593 selector = (selector & ~3) | dpl;
2594 cpu_x86_load_seg_cache(env, R_CS, selector,
2595 get_seg_base(e1, e2),
2596 get_seg_limit(e1, e2),
2597 e2);
2598 cpu_x86_set_cpl(env, dpl);
2599 SET_ESP(sp, sp_mask);
2600 EIP = offset;
2601 }
2602#ifdef USE_KQEMU
2603 if (kqemu_is_ok(env)) {
2604 env->exception_index = -1;
2605 cpu_loop_exit();
2606 }
2607#endif
2608}
2609
2610/* real and vm86 mode iret */
2611void helper_iret_real(int shift)
2612{
2613 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2614 target_ulong ssp;
2615 int eflags_mask;
2616
2617 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2618 sp = ESP;
2619 ssp = env->segs[R_SS].base;
2620 if (shift == 1) {
2621 /* 32 bits */
2622 POPL(ssp, sp, sp_mask, new_eip);
2623 POPL(ssp, sp, sp_mask, new_cs);
2624 new_cs &= 0xffff;
2625 POPL(ssp, sp, sp_mask, new_eflags);
2626 } else {
2627 /* 16 bits */
2628 POPW(ssp, sp, sp_mask, new_eip);
2629 POPW(ssp, sp, sp_mask, new_cs);
2630 POPW(ssp, sp, sp_mask, new_eflags);
2631 }
2632 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2633 load_seg_vm(R_CS, new_cs);
2634 env->eip = new_eip;
2635 if (env->eflags & VM_MASK)
2636 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2637 else
2638 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2639 if (shift == 0)
2640 eflags_mask &= 0xffff;
2641 load_eflags(new_eflags, eflags_mask);
db620f46 2642 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2643}
2644
2645static inline void validate_seg(int seg_reg, int cpl)
2646{
2647 int dpl;
2648 uint32_t e2;
2649
2650 /* XXX: on x86_64, we do not want to nullify FS and GS because
2651 they may still contain a valid base. I would be interested to
2652 know how a real x86_64 CPU behaves */
2653 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2654 (env->segs[seg_reg].selector & 0xfffc) == 0)
2655 return;
2656
2657 e2 = env->segs[seg_reg].flags;
2658 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2659 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2660 /* data or non conforming code segment */
2661 if (dpl < cpl) {
2662 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2663 }
2664 }
2665}
2666
2667/* protected mode iret */
2668static inline void helper_ret_protected(int shift, int is_iret, int addend)
2669{
2670 uint32_t new_cs, new_eflags, new_ss;
2671 uint32_t new_es, new_ds, new_fs, new_gs;
2672 uint32_t e1, e2, ss_e1, ss_e2;
2673 int cpl, dpl, rpl, eflags_mask, iopl;
2674 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2675
2676#ifdef TARGET_X86_64
2677 if (shift == 2)
2678 sp_mask = -1;
2679 else
2680#endif
2681 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2682 sp = ESP;
2683 ssp = env->segs[R_SS].base;
2684 new_eflags = 0; /* avoid warning */
2685#ifdef TARGET_X86_64
2686 if (shift == 2) {
2687 POPQ(sp, new_eip);
2688 POPQ(sp, new_cs);
2689 new_cs &= 0xffff;
2690 if (is_iret) {
2691 POPQ(sp, new_eflags);
2692 }
2693 } else
2694#endif
2695 if (shift == 1) {
2696 /* 32 bits */
2697 POPL(ssp, sp, sp_mask, new_eip);
2698 POPL(ssp, sp, sp_mask, new_cs);
2699 new_cs &= 0xffff;
2700 if (is_iret) {
2701 POPL(ssp, sp, sp_mask, new_eflags);
2702 if (new_eflags & VM_MASK)
2703 goto return_to_vm86;
2704 }
2705 } else {
2706 /* 16 bits */
2707 POPW(ssp, sp, sp_mask, new_eip);
2708 POPW(ssp, sp, sp_mask, new_cs);
2709 if (is_iret)
2710 POPW(ssp, sp, sp_mask, new_eflags);
2711 }
2712#ifdef DEBUG_PCALL
2713 if (loglevel & CPU_LOG_PCALL) {
2714 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2715 new_cs, new_eip, shift, addend);
2716 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2717 }
2718#endif
2719 if ((new_cs & 0xfffc) == 0)
2720 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2721 if (load_segment(&e1, &e2, new_cs) != 0)
2722 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2723 if (!(e2 & DESC_S_MASK) ||
2724 !(e2 & DESC_CS_MASK))
2725 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2726 cpl = env->hflags & HF_CPL_MASK;
2727 rpl = new_cs & 3;
2728 if (rpl < cpl)
2729 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2730 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2731 if (e2 & DESC_C_MASK) {
2732 if (dpl > rpl)
2733 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2734 } else {
2735 if (dpl != rpl)
2736 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2737 }
2738 if (!(e2 & DESC_P_MASK))
2739 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2740
2741 sp += addend;
2742 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2743 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2744 /* return to same privilege level */
eaa728ee
FB
2745 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2746 get_seg_base(e1, e2),
2747 get_seg_limit(e1, e2),
2748 e2);
2749 } else {
2750 /* return to different privilege level */
2751#ifdef TARGET_X86_64
2752 if (shift == 2) {
2753 POPQ(sp, new_esp);
2754 POPQ(sp, new_ss);
2755 new_ss &= 0xffff;
2756 } else
2757#endif
2758 if (shift == 1) {
2759 /* 32 bits */
2760 POPL(ssp, sp, sp_mask, new_esp);
2761 POPL(ssp, sp, sp_mask, new_ss);
2762 new_ss &= 0xffff;
2763 } else {
2764 /* 16 bits */
2765 POPW(ssp, sp, sp_mask, new_esp);
2766 POPW(ssp, sp, sp_mask, new_ss);
2767 }
2768#ifdef DEBUG_PCALL
2769 if (loglevel & CPU_LOG_PCALL) {
2770 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2771 new_ss, new_esp);
2772 }
2773#endif
2774 if ((new_ss & 0xfffc) == 0) {
2775#ifdef TARGET_X86_64
2776 /* NULL ss is allowed in long mode if cpl != 3*/
2777 /* XXX: test CS64 ? */
2778 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2779 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2780 0, 0xffffffff,
2781 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2782 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2783 DESC_W_MASK | DESC_A_MASK);
2784 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2785 } else
2786#endif
2787 {
2788 raise_exception_err(EXCP0D_GPF, 0);
2789 }
2790 } else {
2791 if ((new_ss & 3) != rpl)
2792 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2793 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2794 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2795 if (!(ss_e2 & DESC_S_MASK) ||
2796 (ss_e2 & DESC_CS_MASK) ||
2797 !(ss_e2 & DESC_W_MASK))
2798 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2799 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2800 if (dpl != rpl)
2801 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2802 if (!(ss_e2 & DESC_P_MASK))
2803 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2804 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2805 get_seg_base(ss_e1, ss_e2),
2806 get_seg_limit(ss_e1, ss_e2),
2807 ss_e2);
2808 }
2809
2810 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2811 get_seg_base(e1, e2),
2812 get_seg_limit(e1, e2),
2813 e2);
2814 cpu_x86_set_cpl(env, rpl);
2815 sp = new_esp;
2816#ifdef TARGET_X86_64
2817 if (env->hflags & HF_CS64_MASK)
2818 sp_mask = -1;
2819 else
2820#endif
2821 sp_mask = get_sp_mask(ss_e2);
2822
2823 /* validate data segments */
2824 validate_seg(R_ES, rpl);
2825 validate_seg(R_DS, rpl);
2826 validate_seg(R_FS, rpl);
2827 validate_seg(R_GS, rpl);
2828
2829 sp += addend;
2830 }
2831 SET_ESP(sp, sp_mask);
2832 env->eip = new_eip;
2833 if (is_iret) {
2834 /* NOTE: 'cpl' is the _old_ CPL */
2835 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2836 if (cpl == 0)
2837 eflags_mask |= IOPL_MASK;
2838 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2839 if (cpl <= iopl)
2840 eflags_mask |= IF_MASK;
2841 if (shift == 0)
2842 eflags_mask &= 0xffff;
2843 load_eflags(new_eflags, eflags_mask);
2844 }
2845 return;
2846
2847 return_to_vm86:
2848 POPL(ssp, sp, sp_mask, new_esp);
2849 POPL(ssp, sp, sp_mask, new_ss);
2850 POPL(ssp, sp, sp_mask, new_es);
2851 POPL(ssp, sp, sp_mask, new_ds);
2852 POPL(ssp, sp, sp_mask, new_fs);
2853 POPL(ssp, sp, sp_mask, new_gs);
2854
2855 /* modify processor state */
2856 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2857 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2858 load_seg_vm(R_CS, new_cs & 0xffff);
2859 cpu_x86_set_cpl(env, 3);
2860 load_seg_vm(R_SS, new_ss & 0xffff);
2861 load_seg_vm(R_ES, new_es & 0xffff);
2862 load_seg_vm(R_DS, new_ds & 0xffff);
2863 load_seg_vm(R_FS, new_fs & 0xffff);
2864 load_seg_vm(R_GS, new_gs & 0xffff);
2865
2866 env->eip = new_eip & 0xffff;
2867 ESP = new_esp;
2868}
2869
2870void helper_iret_protected(int shift, int next_eip)
2871{
2872 int tss_selector, type;
2873 uint32_t e1, e2;
2874
2875 /* specific case for TSS */
2876 if (env->eflags & NT_MASK) {
2877#ifdef TARGET_X86_64
2878 if (env->hflags & HF_LMA_MASK)
2879 raise_exception_err(EXCP0D_GPF, 0);
2880#endif
2881 tss_selector = lduw_kernel(env->tr.base + 0);
2882 if (tss_selector & 4)
2883 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2884 if (load_segment(&e1, &e2, tss_selector) != 0)
2885 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2886 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2887 /* NOTE: we check both segment and busy TSS */
2888 if (type != 3)
2889 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2890 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2891 } else {
2892 helper_ret_protected(shift, 1, 0);
2893 }
db620f46 2894 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2895#ifdef USE_KQEMU
2896 if (kqemu_is_ok(env)) {
2897 CC_OP = CC_OP_EFLAGS;
2898 env->exception_index = -1;
2899 cpu_loop_exit();
2900 }
2901#endif
2902}
2903
2904void helper_lret_protected(int shift, int addend)
2905{
2906 helper_ret_protected(shift, 0, addend);
2907#ifdef USE_KQEMU
2908 if (kqemu_is_ok(env)) {
2909 env->exception_index = -1;
2910 cpu_loop_exit();
2911 }
2912#endif
2913}
2914
2915void helper_sysenter(void)
2916{
2917 if (env->sysenter_cs == 0) {
2918 raise_exception_err(EXCP0D_GPF, 0);
2919 }
2920 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2921 cpu_x86_set_cpl(env, 0);
2922 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2923 0, 0xffffffff,
2924 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2925 DESC_S_MASK |
2926 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2927 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2928 0, 0xffffffff,
2929 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2930 DESC_S_MASK |
2931 DESC_W_MASK | DESC_A_MASK);
2932 ESP = env->sysenter_esp;
2933 EIP = env->sysenter_eip;
2934}
2935
2936void helper_sysexit(void)
2937{
2938 int cpl;
2939
2940 cpl = env->hflags & HF_CPL_MASK;
2941 if (env->sysenter_cs == 0 || cpl != 0) {
2942 raise_exception_err(EXCP0D_GPF, 0);
2943 }
2944 cpu_x86_set_cpl(env, 3);
2945 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2946 0, 0xffffffff,
2947 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2948 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2949 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2950 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2951 0, 0xffffffff,
2952 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2953 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2954 DESC_W_MASK | DESC_A_MASK);
2955 ESP = ECX;
2956 EIP = EDX;
2957#ifdef USE_KQEMU
2958 if (kqemu_is_ok(env)) {
2959 env->exception_index = -1;
2960 cpu_loop_exit();
2961 }
2962#endif
2963}
2964
872929aa
FB
2965#if defined(CONFIG_USER_ONLY)
2966target_ulong helper_read_crN(int reg)
eaa728ee 2967{
872929aa
FB
2968 return 0;
2969}
2970
2971void helper_write_crN(int reg, target_ulong t0)
2972{
2973}
2974#else
2975target_ulong helper_read_crN(int reg)
2976{
2977 target_ulong val;
2978
2979 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2980 switch(reg) {
2981 default:
2982 val = env->cr[reg];
2983 break;
2984 case 8:
db620f46
FB
2985 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2986 val = cpu_get_apic_tpr(env);
2987 } else {
2988 val = env->v_tpr;
2989 }
872929aa
FB
2990 break;
2991 }
2992 return val;
2993}
2994
2995void helper_write_crN(int reg, target_ulong t0)
2996{
2997 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2998 switch(reg) {
2999 case 0:
3000 cpu_x86_update_cr0(env, t0);
3001 break;
3002 case 3:
3003 cpu_x86_update_cr3(env, t0);
3004 break;
3005 case 4:
3006 cpu_x86_update_cr4(env, t0);
3007 break;
3008 case 8:
db620f46
FB
3009 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3010 cpu_set_apic_tpr(env, t0);
3011 }
3012 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
3013 break;
3014 default:
3015 env->cr[reg] = t0;
3016 break;
3017 }
eaa728ee 3018}
872929aa 3019#endif
eaa728ee
FB
3020
3021void helper_lmsw(target_ulong t0)
3022{
3023 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3024 if already set to one. */
3025 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 3026 helper_write_crN(0, t0);
eaa728ee
FB
3027}
3028
3029void helper_clts(void)
3030{
3031 env->cr[0] &= ~CR0_TS_MASK;
3032 env->hflags &= ~HF_TS_MASK;
3033}
3034
eaa728ee
FB
3035/* XXX: do more */
3036void helper_movl_drN_T0(int reg, target_ulong t0)
3037{
3038 env->dr[reg] = t0;
3039}
3040
3041void helper_invlpg(target_ulong addr)
3042{
872929aa 3043 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 3044 tlb_flush_page(env, addr);
eaa728ee
FB
3045}
3046
3047void helper_rdtsc(void)
3048{
3049 uint64_t val;
3050
3051 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3052 raise_exception(EXCP0D_GPF);
3053 }
872929aa
FB
3054 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3055
33c263df 3056 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
3057 EAX = (uint32_t)(val);
3058 EDX = (uint32_t)(val >> 32);
3059}
3060
3061void helper_rdpmc(void)
3062{
3063 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3064 raise_exception(EXCP0D_GPF);
3065 }
eaa728ee
FB
3066 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3067
3068 /* currently unimplemented */
3069 raise_exception_err(EXCP06_ILLOP, 0);
3070}
3071
3072#if defined(CONFIG_USER_ONLY)
3073void helper_wrmsr(void)
3074{
3075}
3076
3077void helper_rdmsr(void)
3078{
3079}
3080#else
3081void helper_wrmsr(void)
3082{
3083 uint64_t val;
3084
872929aa
FB
3085 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3086
eaa728ee
FB
3087 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3088
3089 switch((uint32_t)ECX) {
3090 case MSR_IA32_SYSENTER_CS:
3091 env->sysenter_cs = val & 0xffff;
3092 break;
3093 case MSR_IA32_SYSENTER_ESP:
3094 env->sysenter_esp = val;
3095 break;
3096 case MSR_IA32_SYSENTER_EIP:
3097 env->sysenter_eip = val;
3098 break;
3099 case MSR_IA32_APICBASE:
3100 cpu_set_apic_base(env, val);
3101 break;
3102 case MSR_EFER:
3103 {
3104 uint64_t update_mask;
3105 update_mask = 0;
3106 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3107 update_mask |= MSR_EFER_SCE;
3108 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3109 update_mask |= MSR_EFER_LME;
3110 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3111 update_mask |= MSR_EFER_FFXSR;
3112 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3113 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3114 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3115 update_mask |= MSR_EFER_SVME;
3116 cpu_load_efer(env, (env->efer & ~update_mask) |
3117 (val & update_mask));
eaa728ee
FB
3118 }
3119 break;
3120 case MSR_STAR:
3121 env->star = val;
3122 break;
3123 case MSR_PAT:
3124 env->pat = val;
3125 break;
3126 case MSR_VM_HSAVE_PA:
3127 env->vm_hsave = val;
3128 break;
e737b32a
AZ
3129 case MSR_IA32_PERF_STATUS:
3130 /* tsc_increment_by_tick */
3131 val = 1000ULL;
3132 /* CPU multiplier */
3133 val |= (((uint64_t)4ULL) << 40);
3134 break;
eaa728ee
FB
3135#ifdef TARGET_X86_64
3136 case MSR_LSTAR:
3137 env->lstar = val;
3138 break;
3139 case MSR_CSTAR:
3140 env->cstar = val;
3141 break;
3142 case MSR_FMASK:
3143 env->fmask = val;
3144 break;
3145 case MSR_FSBASE:
3146 env->segs[R_FS].base = val;
3147 break;
3148 case MSR_GSBASE:
3149 env->segs[R_GS].base = val;
3150 break;
3151 case MSR_KERNELGSBASE:
3152 env->kernelgsbase = val;
3153 break;
3154#endif
3155 default:
3156 /* XXX: exception ? */
3157 break;
3158 }
3159}
3160
3161void helper_rdmsr(void)
3162{
3163 uint64_t val;
872929aa
FB
3164
3165 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3166
eaa728ee
FB
3167 switch((uint32_t)ECX) {
3168 case MSR_IA32_SYSENTER_CS:
3169 val = env->sysenter_cs;
3170 break;
3171 case MSR_IA32_SYSENTER_ESP:
3172 val = env->sysenter_esp;
3173 break;
3174 case MSR_IA32_SYSENTER_EIP:
3175 val = env->sysenter_eip;
3176 break;
3177 case MSR_IA32_APICBASE:
3178 val = cpu_get_apic_base(env);
3179 break;
3180 case MSR_EFER:
3181 val = env->efer;
3182 break;
3183 case MSR_STAR:
3184 val = env->star;
3185 break;
3186 case MSR_PAT:
3187 val = env->pat;
3188 break;
3189 case MSR_VM_HSAVE_PA:
3190 val = env->vm_hsave;
3191 break;
3192#ifdef TARGET_X86_64
3193 case MSR_LSTAR:
3194 val = env->lstar;
3195 break;
3196 case MSR_CSTAR:
3197 val = env->cstar;
3198 break;
3199 case MSR_FMASK:
3200 val = env->fmask;
3201 break;
3202 case MSR_FSBASE:
3203 val = env->segs[R_FS].base;
3204 break;
3205 case MSR_GSBASE:
3206 val = env->segs[R_GS].base;
3207 break;
3208 case MSR_KERNELGSBASE:
3209 val = env->kernelgsbase;
3210 break;
da260249
FB
3211#endif
3212#ifdef USE_KQEMU
3213 case MSR_QPI_COMMBASE:
3214 if (env->kqemu_enabled) {
3215 val = kqemu_comm_base;
3216 } else {
3217 val = 0;
3218 }
3219 break;
eaa728ee
FB
3220#endif
3221 default:
3222 /* XXX: exception ? */
3223 val = 0;
3224 break;
3225 }
3226 EAX = (uint32_t)(val);
3227 EDX = (uint32_t)(val >> 32);
3228}
3229#endif
3230
3231target_ulong helper_lsl(target_ulong selector1)
3232{
3233 unsigned int limit;
3234 uint32_t e1, e2, eflags, selector;
3235 int rpl, dpl, cpl, type;
3236
3237 selector = selector1 & 0xffff;
3238 eflags = cc_table[CC_OP].compute_all();
3239 if (load_segment(&e1, &e2, selector) != 0)
3240 goto fail;
3241 rpl = selector & 3;
3242 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3243 cpl = env->hflags & HF_CPL_MASK;
3244 if (e2 & DESC_S_MASK) {
3245 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3246 /* conforming */
3247 } else {
3248 if (dpl < cpl || dpl < rpl)
3249 goto fail;
3250 }
3251 } else {
3252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3253 switch(type) {
3254 case 1:
3255 case 2:
3256 case 3:
3257 case 9:
3258 case 11:
3259 break;
3260 default:
3261 goto fail;
3262 }
3263 if (dpl < cpl || dpl < rpl) {
3264 fail:
3265 CC_SRC = eflags & ~CC_Z;
3266 return 0;
3267 }
3268 }
3269 limit = get_seg_limit(e1, e2);
3270 CC_SRC = eflags | CC_Z;
3271 return limit;
3272}
3273
3274target_ulong helper_lar(target_ulong selector1)
3275{
3276 uint32_t e1, e2, eflags, selector;
3277 int rpl, dpl, cpl, type;
3278
3279 selector = selector1 & 0xffff;
3280 eflags = cc_table[CC_OP].compute_all();
3281 if ((selector & 0xfffc) == 0)
3282 goto fail;
3283 if (load_segment(&e1, &e2, selector) != 0)
3284 goto fail;
3285 rpl = selector & 3;
3286 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3287 cpl = env->hflags & HF_CPL_MASK;
3288 if (e2 & DESC_S_MASK) {
3289 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3290 /* conforming */
3291 } else {
3292 if (dpl < cpl || dpl < rpl)
3293 goto fail;
3294 }
3295 } else {
3296 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3297 switch(type) {
3298 case 1:
3299 case 2:
3300 case 3:
3301 case 4:
3302 case 5:
3303 case 9:
3304 case 11:
3305 case 12:
3306 break;
3307 default:
3308 goto fail;
3309 }
3310 if (dpl < cpl || dpl < rpl) {
3311 fail:
3312 CC_SRC = eflags & ~CC_Z;
3313 return 0;
3314 }
3315 }
3316 CC_SRC = eflags | CC_Z;
3317 return e2 & 0x00f0ff00;
3318}
3319
3320void helper_verr(target_ulong selector1)
3321{
3322 uint32_t e1, e2, eflags, selector;
3323 int rpl, dpl, cpl;
3324
3325 selector = selector1 & 0xffff;
3326 eflags = cc_table[CC_OP].compute_all();
3327 if ((selector & 0xfffc) == 0)
3328 goto fail;
3329 if (load_segment(&e1, &e2, selector) != 0)
3330 goto fail;
3331 if (!(e2 & DESC_S_MASK))
3332 goto fail;
3333 rpl = selector & 3;
3334 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3335 cpl = env->hflags & HF_CPL_MASK;
3336 if (e2 & DESC_CS_MASK) {
3337 if (!(e2 & DESC_R_MASK))
3338 goto fail;
3339 if (!(e2 & DESC_C_MASK)) {
3340 if (dpl < cpl || dpl < rpl)
3341 goto fail;
3342 }
3343 } else {
3344 if (dpl < cpl || dpl < rpl) {
3345 fail:
3346 CC_SRC = eflags & ~CC_Z;
3347 return;
3348 }
3349 }
3350 CC_SRC = eflags | CC_Z;
3351}
3352
3353void helper_verw(target_ulong selector1)
3354{
3355 uint32_t e1, e2, eflags, selector;
3356 int rpl, dpl, cpl;
3357
3358 selector = selector1 & 0xffff;
3359 eflags = cc_table[CC_OP].compute_all();
3360 if ((selector & 0xfffc) == 0)
3361 goto fail;
3362 if (load_segment(&e1, &e2, selector) != 0)
3363 goto fail;
3364 if (!(e2 & DESC_S_MASK))
3365 goto fail;
3366 rpl = selector & 3;
3367 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3368 cpl = env->hflags & HF_CPL_MASK;
3369 if (e2 & DESC_CS_MASK) {
3370 goto fail;
3371 } else {
3372 if (dpl < cpl || dpl < rpl)
3373 goto fail;
3374 if (!(e2 & DESC_W_MASK)) {
3375 fail:
3376 CC_SRC = eflags & ~CC_Z;
3377 return;
3378 }
3379 }
3380 CC_SRC = eflags | CC_Z;
3381}
3382
3383/* x87 FPU helpers */
3384
3385static void fpu_set_exception(int mask)
3386{
3387 env->fpus |= mask;
3388 if (env->fpus & (~env->fpuc & FPUC_EM))
3389 env->fpus |= FPUS_SE | FPUS_B;
3390}
3391
3392static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3393{
3394 if (b == 0.0)
3395 fpu_set_exception(FPUS_ZE);
3396 return a / b;
3397}
3398
3399void fpu_raise_exception(void)
3400{
3401 if (env->cr[0] & CR0_NE_MASK) {
3402 raise_exception(EXCP10_COPR);
3403 }
3404#if !defined(CONFIG_USER_ONLY)
3405 else {
3406 cpu_set_ferr(env);
3407 }
3408#endif
3409}
3410
3411void helper_flds_FT0(uint32_t val)
3412{
3413 union {
3414 float32 f;
3415 uint32_t i;
3416 } u;
3417 u.i = val;
3418 FT0 = float32_to_floatx(u.f, &env->fp_status);
3419}
3420
3421void helper_fldl_FT0(uint64_t val)
3422{
3423 union {
3424 float64 f;
3425 uint64_t i;
3426 } u;
3427 u.i = val;
3428 FT0 = float64_to_floatx(u.f, &env->fp_status);
3429}
3430
3431void helper_fildl_FT0(int32_t val)
3432{
3433 FT0 = int32_to_floatx(val, &env->fp_status);
3434}
3435
3436void helper_flds_ST0(uint32_t val)
3437{
3438 int new_fpstt;
3439 union {
3440 float32 f;
3441 uint32_t i;
3442 } u;
3443 new_fpstt = (env->fpstt - 1) & 7;
3444 u.i = val;
3445 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3446 env->fpstt = new_fpstt;
3447 env->fptags[new_fpstt] = 0; /* validate stack entry */
3448}
3449
3450void helper_fldl_ST0(uint64_t val)
3451{
3452 int new_fpstt;
3453 union {
3454 float64 f;
3455 uint64_t i;
3456 } u;
3457 new_fpstt = (env->fpstt - 1) & 7;
3458 u.i = val;
3459 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3460 env->fpstt = new_fpstt;
3461 env->fptags[new_fpstt] = 0; /* validate stack entry */
3462}
3463
3464void helper_fildl_ST0(int32_t val)
3465{
3466 int new_fpstt;
3467 new_fpstt = (env->fpstt - 1) & 7;
3468 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3469 env->fpstt = new_fpstt;
3470 env->fptags[new_fpstt] = 0; /* validate stack entry */
3471}
3472
3473void helper_fildll_ST0(int64_t val)
3474{
3475 int new_fpstt;
3476 new_fpstt = (env->fpstt - 1) & 7;
3477 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3478 env->fpstt = new_fpstt;
3479 env->fptags[new_fpstt] = 0; /* validate stack entry */
3480}
3481
3482uint32_t helper_fsts_ST0(void)
3483{
3484 union {
3485 float32 f;
3486 uint32_t i;
3487 } u;
3488 u.f = floatx_to_float32(ST0, &env->fp_status);
3489 return u.i;
3490}
3491
3492uint64_t helper_fstl_ST0(void)
3493{
3494 union {
3495 float64 f;
3496 uint64_t i;
3497 } u;
3498 u.f = floatx_to_float64(ST0, &env->fp_status);
3499 return u.i;
3500}
3501
3502int32_t helper_fist_ST0(void)
3503{
3504 int32_t val;
3505 val = floatx_to_int32(ST0, &env->fp_status);
3506 if (val != (int16_t)val)
3507 val = -32768;
3508 return val;
3509}
3510
3511int32_t helper_fistl_ST0(void)
3512{
3513 int32_t val;
3514 val = floatx_to_int32(ST0, &env->fp_status);
3515 return val;
3516}
3517
3518int64_t helper_fistll_ST0(void)
3519{
3520 int64_t val;
3521 val = floatx_to_int64(ST0, &env->fp_status);
3522 return val;
3523}
3524
3525int32_t helper_fistt_ST0(void)
3526{
3527 int32_t val;
3528 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3529 if (val != (int16_t)val)
3530 val = -32768;
3531 return val;
3532}
3533
3534int32_t helper_fisttl_ST0(void)
3535{
3536 int32_t val;
3537 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3538 return val;
3539}
3540
3541int64_t helper_fisttll_ST0(void)
3542{
3543 int64_t val;
3544 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3545 return val;
3546}
3547
3548void helper_fldt_ST0(target_ulong ptr)
3549{
3550 int new_fpstt;
3551 new_fpstt = (env->fpstt - 1) & 7;
3552 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3553 env->fpstt = new_fpstt;
3554 env->fptags[new_fpstt] = 0; /* validate stack entry */
3555}
3556
3557void helper_fstt_ST0(target_ulong ptr)
3558{
3559 helper_fstt(ST0, ptr);
3560}
3561
3562void helper_fpush(void)
3563{
3564 fpush();
3565}
3566
3567void helper_fpop(void)
3568{
3569 fpop();
3570}
3571
3572void helper_fdecstp(void)
3573{
3574 env->fpstt = (env->fpstt - 1) & 7;
3575 env->fpus &= (~0x4700);
3576}
3577
3578void helper_fincstp(void)
3579{
3580 env->fpstt = (env->fpstt + 1) & 7;
3581 env->fpus &= (~0x4700);
3582}
3583
3584/* FPU move */
3585
3586void helper_ffree_STN(int st_index)
3587{
3588 env->fptags[(env->fpstt + st_index) & 7] = 1;
3589}
3590
3591void helper_fmov_ST0_FT0(void)
3592{
3593 ST0 = FT0;
3594}
3595
3596void helper_fmov_FT0_STN(int st_index)
3597{
3598 FT0 = ST(st_index);
3599}
3600
3601void helper_fmov_ST0_STN(int st_index)
3602{
3603 ST0 = ST(st_index);
3604}
3605
3606void helper_fmov_STN_ST0(int st_index)
3607{
3608 ST(st_index) = ST0;
3609}
3610
3611void helper_fxchg_ST0_STN(int st_index)
3612{
3613 CPU86_LDouble tmp;
3614 tmp = ST(st_index);
3615 ST(st_index) = ST0;
3616 ST0 = tmp;
3617}
3618
3619/* FPU operations */
3620
3621static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3622
3623void helper_fcom_ST0_FT0(void)
3624{
3625 int ret;
3626
3627 ret = floatx_compare(ST0, FT0, &env->fp_status);
3628 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3629 FORCE_RET();
3630}
3631
3632void helper_fucom_ST0_FT0(void)
3633{
3634 int ret;
3635
3636 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3637 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3638 FORCE_RET();
3639}
3640
3641static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3642
3643void helper_fcomi_ST0_FT0(void)
3644{
3645 int eflags;
3646 int ret;
3647
3648 ret = floatx_compare(ST0, FT0, &env->fp_status);
3649 eflags = cc_table[CC_OP].compute_all();
3650 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3651 CC_SRC = eflags;
3652 FORCE_RET();
3653}
3654
3655void helper_fucomi_ST0_FT0(void)
3656{
3657 int eflags;
3658 int ret;
3659
3660 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3661 eflags = cc_table[CC_OP].compute_all();
3662 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3663 CC_SRC = eflags;
3664 FORCE_RET();
3665}
3666
3667void helper_fadd_ST0_FT0(void)
3668{
3669 ST0 += FT0;
3670}
3671
3672void helper_fmul_ST0_FT0(void)
3673{
3674 ST0 *= FT0;
3675}
3676
3677void helper_fsub_ST0_FT0(void)
3678{
3679 ST0 -= FT0;
3680}
3681
3682void helper_fsubr_ST0_FT0(void)
3683{
3684 ST0 = FT0 - ST0;
3685}
3686
3687void helper_fdiv_ST0_FT0(void)
3688{
3689 ST0 = helper_fdiv(ST0, FT0);
3690}
3691
3692void helper_fdivr_ST0_FT0(void)
3693{
3694 ST0 = helper_fdiv(FT0, ST0);
3695}
3696
3697/* fp operations between STN and ST0 */
3698
3699void helper_fadd_STN_ST0(int st_index)
3700{
3701 ST(st_index) += ST0;
3702}
3703
3704void helper_fmul_STN_ST0(int st_index)
3705{
3706 ST(st_index) *= ST0;
3707}
3708
3709void helper_fsub_STN_ST0(int st_index)
3710{
3711 ST(st_index) -= ST0;
3712}
3713
3714void helper_fsubr_STN_ST0(int st_index)
3715{
3716 CPU86_LDouble *p;
3717 p = &ST(st_index);
3718 *p = ST0 - *p;
3719}
3720
3721void helper_fdiv_STN_ST0(int st_index)
3722{
3723 CPU86_LDouble *p;
3724 p = &ST(st_index);
3725 *p = helper_fdiv(*p, ST0);
3726}
3727
3728void helper_fdivr_STN_ST0(int st_index)
3729{
3730 CPU86_LDouble *p;
3731 p = &ST(st_index);
3732 *p = helper_fdiv(ST0, *p);
3733}
3734
3735/* misc FPU operations */
3736void helper_fchs_ST0(void)
3737{
3738 ST0 = floatx_chs(ST0);
3739}
3740
3741void helper_fabs_ST0(void)
3742{
3743 ST0 = floatx_abs(ST0);
3744}
3745
3746void helper_fld1_ST0(void)
3747{
3748 ST0 = f15rk[1];
3749}
3750
3751void helper_fldl2t_ST0(void)
3752{
3753 ST0 = f15rk[6];
3754}
3755
3756void helper_fldl2e_ST0(void)
3757{
3758 ST0 = f15rk[5];
3759}
3760
3761void helper_fldpi_ST0(void)
3762{
3763 ST0 = f15rk[2];
3764}
3765
3766void helper_fldlg2_ST0(void)
3767{
3768 ST0 = f15rk[3];
3769}
3770
3771void helper_fldln2_ST0(void)
3772{
3773 ST0 = f15rk[4];
3774}
3775
3776void helper_fldz_ST0(void)
3777{
3778 ST0 = f15rk[0];
3779}
3780
3781void helper_fldz_FT0(void)
3782{
3783 FT0 = f15rk[0];
3784}
3785
3786uint32_t helper_fnstsw(void)
3787{
3788 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3789}
3790
3791uint32_t helper_fnstcw(void)
3792{
3793 return env->fpuc;
3794}
3795
3796static void update_fp_status(void)
3797{
3798 int rnd_type;
3799
3800 /* set rounding mode */
3801 switch(env->fpuc & RC_MASK) {
3802 default:
3803 case RC_NEAR:
3804 rnd_type = float_round_nearest_even;
3805 break;
3806 case RC_DOWN:
3807 rnd_type = float_round_down;
3808 break;
3809 case RC_UP:
3810 rnd_type = float_round_up;
3811 break;
3812 case RC_CHOP:
3813 rnd_type = float_round_to_zero;
3814 break;
3815 }
3816 set_float_rounding_mode(rnd_type, &env->fp_status);
3817#ifdef FLOATX80
3818 switch((env->fpuc >> 8) & 3) {
3819 case 0:
3820 rnd_type = 32;
3821 break;
3822 case 2:
3823 rnd_type = 64;
3824 break;
3825 case 3:
3826 default:
3827 rnd_type = 80;
3828 break;
3829 }
3830 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3831#endif
3832}
3833
3834void helper_fldcw(uint32_t val)
3835{
3836 env->fpuc = val;
3837 update_fp_status();
3838}
3839
3840void helper_fclex(void)
3841{
3842 env->fpus &= 0x7f00;
3843}
3844
3845void helper_fwait(void)
3846{
3847 if (env->fpus & FPUS_SE)
3848 fpu_raise_exception();
3849 FORCE_RET();
3850}
3851
3852void helper_fninit(void)
3853{
3854 env->fpus = 0;
3855 env->fpstt = 0;
3856 env->fpuc = 0x37f;
3857 env->fptags[0] = 1;
3858 env->fptags[1] = 1;
3859 env->fptags[2] = 1;
3860 env->fptags[3] = 1;
3861 env->fptags[4] = 1;
3862 env->fptags[5] = 1;
3863 env->fptags[6] = 1;
3864 env->fptags[7] = 1;
3865}
3866
3867/* BCD ops */
3868
3869void helper_fbld_ST0(target_ulong ptr)
3870{
3871 CPU86_LDouble tmp;
3872 uint64_t val;
3873 unsigned int v;
3874 int i;
3875
3876 val = 0;
3877 for(i = 8; i >= 0; i--) {
3878 v = ldub(ptr + i);
3879 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3880 }
3881 tmp = val;
3882 if (ldub(ptr + 9) & 0x80)
3883 tmp = -tmp;
3884 fpush();
3885 ST0 = tmp;
3886}
3887
3888void helper_fbst_ST0(target_ulong ptr)
3889{
3890 int v;
3891 target_ulong mem_ref, mem_end;
3892 int64_t val;
3893
3894 val = floatx_to_int64(ST0, &env->fp_status);
3895 mem_ref = ptr;
3896 mem_end = mem_ref + 9;
3897 if (val < 0) {
3898 stb(mem_end, 0x80);
3899 val = -val;
3900 } else {
3901 stb(mem_end, 0x00);
3902 }
3903 while (mem_ref < mem_end) {
3904 if (val == 0)
3905 break;
3906 v = val % 100;
3907 val = val / 100;
3908 v = ((v / 10) << 4) | (v % 10);
3909 stb(mem_ref++, v);
3910 }
3911 while (mem_ref < mem_end) {
3912 stb(mem_ref++, 0);
3913 }
3914}
3915
3916void helper_f2xm1(void)
3917{
3918 ST0 = pow(2.0,ST0) - 1.0;
3919}
3920
3921void helper_fyl2x(void)
3922{
3923 CPU86_LDouble fptemp;
3924
3925 fptemp = ST0;
3926 if (fptemp>0.0){
3927 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3928 ST1 *= fptemp;
3929 fpop();
3930 } else {
3931 env->fpus &= (~0x4700);
3932 env->fpus |= 0x400;
3933 }
3934}
3935
3936void helper_fptan(void)
3937{
3938 CPU86_LDouble fptemp;
3939
3940 fptemp = ST0;
3941 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3942 env->fpus |= 0x400;
3943 } else {
3944 ST0 = tan(fptemp);
3945 fpush();
3946 ST0 = 1.0;
3947 env->fpus &= (~0x400); /* C2 <-- 0 */
3948 /* the above code is for |arg| < 2**52 only */
3949 }
3950}
3951
3952void helper_fpatan(void)
3953{
3954 CPU86_LDouble fptemp, fpsrcop;
3955
3956 fpsrcop = ST1;
3957 fptemp = ST0;
3958 ST1 = atan2(fpsrcop,fptemp);
3959 fpop();
3960}
3961
3962void helper_fxtract(void)
3963{
3964 CPU86_LDoubleU temp;
3965 unsigned int expdif;
3966
3967 temp.d = ST0;
3968 expdif = EXPD(temp) - EXPBIAS;
3969 /*DP exponent bias*/
3970 ST0 = expdif;
3971 fpush();
3972 BIASEXPONENT(temp);
3973 ST0 = temp.d;
3974}
3975
3976void helper_fprem1(void)
3977{
3978 CPU86_LDouble dblq, fpsrcop, fptemp;
3979 CPU86_LDoubleU fpsrcop1, fptemp1;
3980 int expdif;
3981 signed long long int q;
3982
3983 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3984 ST0 = 0.0 / 0.0; /* NaN */
3985 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3986 return;
3987 }
3988
3989 fpsrcop = ST0;
3990 fptemp = ST1;
3991 fpsrcop1.d = fpsrcop;
3992 fptemp1.d = fptemp;
3993 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3994
3995 if (expdif < 0) {
3996 /* optimisation? taken from the AMD docs */
3997 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3998 /* ST0 is unchanged */
3999 return;
4000 }
4001
4002 if (expdif < 53) {
4003 dblq = fpsrcop / fptemp;
4004 /* round dblq towards nearest integer */
4005 dblq = rint(dblq);
4006 ST0 = fpsrcop - fptemp * dblq;
4007
4008 /* convert dblq to q by truncating towards zero */
4009 if (dblq < 0.0)
4010 q = (signed long long int)(-dblq);
4011 else
4012 q = (signed long long int)dblq;
4013
4014 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4015 /* (C0,C3,C1) <-- (q2,q1,q0) */
4016 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4017 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4018 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4019 } else {
4020 env->fpus |= 0x400; /* C2 <-- 1 */
4021 fptemp = pow(2.0, expdif - 50);
4022 fpsrcop = (ST0 / ST1) / fptemp;
4023 /* fpsrcop = integer obtained by chopping */
4024 fpsrcop = (fpsrcop < 0.0) ?
4025 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4026 ST0 -= (ST1 * fpsrcop * fptemp);
4027 }
4028}
4029
4030void helper_fprem(void)
4031{
4032 CPU86_LDouble dblq, fpsrcop, fptemp;
4033 CPU86_LDoubleU fpsrcop1, fptemp1;
4034 int expdif;
4035 signed long long int q;
4036
4037 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4038 ST0 = 0.0 / 0.0; /* NaN */
4039 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4040 return;
4041 }
4042
4043 fpsrcop = (CPU86_LDouble)ST0;
4044 fptemp = (CPU86_LDouble)ST1;
4045 fpsrcop1.d = fpsrcop;
4046 fptemp1.d = fptemp;
4047 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4048
4049 if (expdif < 0) {
4050 /* optimisation? taken from the AMD docs */
4051 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4052 /* ST0 is unchanged */
4053 return;
4054 }
4055
4056 if ( expdif < 53 ) {
4057 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4058 /* round dblq towards zero */
4059 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4060 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4061
4062 /* convert dblq to q by truncating towards zero */
4063 if (dblq < 0.0)
4064 q = (signed long long int)(-dblq);
4065 else
4066 q = (signed long long int)dblq;
4067
4068 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4069 /* (C0,C3,C1) <-- (q2,q1,q0) */
4070 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4071 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4072 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4073 } else {
4074 int N = 32 + (expdif % 32); /* as per AMD docs */
4075 env->fpus |= 0x400; /* C2 <-- 1 */
4076 fptemp = pow(2.0, (double)(expdif - N));
4077 fpsrcop = (ST0 / ST1) / fptemp;
4078 /* fpsrcop = integer obtained by chopping */
4079 fpsrcop = (fpsrcop < 0.0) ?
4080 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4081 ST0 -= (ST1 * fpsrcop * fptemp);
4082 }
4083}
4084
4085void helper_fyl2xp1(void)
4086{
4087 CPU86_LDouble fptemp;
4088
4089 fptemp = ST0;
4090 if ((fptemp+1.0)>0.0) {
4091 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4092 ST1 *= fptemp;
4093 fpop();
4094 } else {
4095 env->fpus &= (~0x4700);
4096 env->fpus |= 0x400;
4097 }
4098}
4099
4100void helper_fsqrt(void)
4101{
4102 CPU86_LDouble fptemp;
4103
4104 fptemp = ST0;
4105 if (fptemp<0.0) {
4106 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4107 env->fpus |= 0x400;
4108 }
4109 ST0 = sqrt(fptemp);
4110}
4111
4112void helper_fsincos(void)
4113{
4114 CPU86_LDouble fptemp;
4115
4116 fptemp = ST0;
4117 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4118 env->fpus |= 0x400;
4119 } else {
4120 ST0 = sin(fptemp);
4121 fpush();
4122 ST0 = cos(fptemp);
4123 env->fpus &= (~0x400); /* C2 <-- 0 */
4124 /* the above code is for |arg| < 2**63 only */
4125 }
4126}
4127
4128void helper_frndint(void)
4129{
4130 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4131}
4132
4133void helper_fscale(void)
4134{
4135 ST0 = ldexp (ST0, (int)(ST1));
4136}
4137
4138void helper_fsin(void)
4139{
4140 CPU86_LDouble fptemp;
4141
4142 fptemp = ST0;
4143 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4144 env->fpus |= 0x400;
4145 } else {
4146 ST0 = sin(fptemp);
4147 env->fpus &= (~0x400); /* C2 <-- 0 */
4148 /* the above code is for |arg| < 2**53 only */
4149 }
4150}
4151
4152void helper_fcos(void)
4153{
4154 CPU86_LDouble fptemp;
4155
4156 fptemp = ST0;
4157 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4158 env->fpus |= 0x400;
4159 } else {
4160 ST0 = cos(fptemp);
4161 env->fpus &= (~0x400); /* C2 <-- 0 */
4162 /* the above code is for |arg5 < 2**63 only */
4163 }
4164}
4165
4166void helper_fxam_ST0(void)
4167{
4168 CPU86_LDoubleU temp;
4169 int expdif;
4170
4171 temp.d = ST0;
4172
4173 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4174 if (SIGND(temp))
4175 env->fpus |= 0x200; /* C1 <-- 1 */
4176
4177 /* XXX: test fptags too */
4178 expdif = EXPD(temp);
4179 if (expdif == MAXEXPD) {
4180#ifdef USE_X86LDOUBLE
4181 if (MANTD(temp) == 0x8000000000000000ULL)
4182#else
4183 if (MANTD(temp) == 0)
4184#endif
4185 env->fpus |= 0x500 /*Infinity*/;
4186 else
4187 env->fpus |= 0x100 /*NaN*/;
4188 } else if (expdif == 0) {
4189 if (MANTD(temp) == 0)
4190 env->fpus |= 0x4000 /*Zero*/;
4191 else
4192 env->fpus |= 0x4400 /*Denormal*/;
4193 } else {
4194 env->fpus |= 0x400;
4195 }
4196}
4197
4198void helper_fstenv(target_ulong ptr, int data32)
4199{
4200 int fpus, fptag, exp, i;
4201 uint64_t mant;
4202 CPU86_LDoubleU tmp;
4203
4204 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4205 fptag = 0;
4206 for (i=7; i>=0; i--) {
4207 fptag <<= 2;
4208 if (env->fptags[i]) {
4209 fptag |= 3;
4210 } else {
4211 tmp.d = env->fpregs[i].d;
4212 exp = EXPD(tmp);
4213 mant = MANTD(tmp);
4214 if (exp == 0 && mant == 0) {
4215 /* zero */
4216 fptag |= 1;
4217 } else if (exp == 0 || exp == MAXEXPD
4218#ifdef USE_X86LDOUBLE
4219 || (mant & (1LL << 63)) == 0
4220#endif
4221 ) {
4222 /* NaNs, infinity, denormal */
4223 fptag |= 2;
4224 }
4225 }
4226 }
4227 if (data32) {
4228 /* 32 bit */
4229 stl(ptr, env->fpuc);
4230 stl(ptr + 4, fpus);
4231 stl(ptr + 8, fptag);
4232 stl(ptr + 12, 0); /* fpip */
4233 stl(ptr + 16, 0); /* fpcs */
4234 stl(ptr + 20, 0); /* fpoo */
4235 stl(ptr + 24, 0); /* fpos */
4236 } else {
4237 /* 16 bit */
4238 stw(ptr, env->fpuc);
4239 stw(ptr + 2, fpus);
4240 stw(ptr + 4, fptag);
4241 stw(ptr + 6, 0);
4242 stw(ptr + 8, 0);
4243 stw(ptr + 10, 0);
4244 stw(ptr + 12, 0);
4245 }
4246}
4247
4248void helper_fldenv(target_ulong ptr, int data32)
4249{
4250 int i, fpus, fptag;
4251
4252 if (data32) {
4253 env->fpuc = lduw(ptr);
4254 fpus = lduw(ptr + 4);
4255 fptag = lduw(ptr + 8);
4256 }
4257 else {
4258 env->fpuc = lduw(ptr);
4259 fpus = lduw(ptr + 2);
4260 fptag = lduw(ptr + 4);
4261 }
4262 env->fpstt = (fpus >> 11) & 7;
4263 env->fpus = fpus & ~0x3800;
4264 for(i = 0;i < 8; i++) {
4265 env->fptags[i] = ((fptag & 3) == 3);
4266 fptag >>= 2;
4267 }
4268}
4269
4270void helper_fsave(target_ulong ptr, int data32)
4271{
4272 CPU86_LDouble tmp;
4273 int i;
4274
4275 helper_fstenv(ptr, data32);
4276
4277 ptr += (14 << data32);
4278 for(i = 0;i < 8; i++) {
4279 tmp = ST(i);
4280 helper_fstt(tmp, ptr);
4281 ptr += 10;
4282 }
4283
4284 /* fninit */
4285 env->fpus = 0;
4286 env->fpstt = 0;
4287 env->fpuc = 0x37f;
4288 env->fptags[0] = 1;
4289 env->fptags[1] = 1;
4290 env->fptags[2] = 1;
4291 env->fptags[3] = 1;
4292 env->fptags[4] = 1;
4293 env->fptags[5] = 1;
4294 env->fptags[6] = 1;
4295 env->fptags[7] = 1;
4296}
4297
4298void helper_frstor(target_ulong ptr, int data32)
4299{
4300 CPU86_LDouble tmp;
4301 int i;
4302
4303 helper_fldenv(ptr, data32);
4304 ptr += (14 << data32);
4305
4306 for(i = 0;i < 8; i++) {
4307 tmp = helper_fldt(ptr);
4308 ST(i) = tmp;
4309 ptr += 10;
4310 }
4311}
4312
4313void helper_fxsave(target_ulong ptr, int data64)
4314{
4315 int fpus, fptag, i, nb_xmm_regs;
4316 CPU86_LDouble tmp;
4317 target_ulong addr;
4318
4319 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4320 fptag = 0;
4321 for(i = 0; i < 8; i++) {
4322 fptag |= (env->fptags[i] << i);
4323 }
4324 stw(ptr, env->fpuc);
4325 stw(ptr + 2, fpus);
4326 stw(ptr + 4, fptag ^ 0xff);
4327#ifdef TARGET_X86_64
4328 if (data64) {
4329 stq(ptr + 0x08, 0); /* rip */
4330 stq(ptr + 0x10, 0); /* rdp */
4331 } else
4332#endif
4333 {
4334 stl(ptr + 0x08, 0); /* eip */
4335 stl(ptr + 0x0c, 0); /* sel */
4336 stl(ptr + 0x10, 0); /* dp */
4337 stl(ptr + 0x14, 0); /* sel */
4338 }
4339
4340 addr = ptr + 0x20;
4341 for(i = 0;i < 8; i++) {
4342 tmp = ST(i);
4343 helper_fstt(tmp, addr);
4344 addr += 16;
4345 }
4346
4347 if (env->cr[4] & CR4_OSFXSR_MASK) {
4348 /* XXX: finish it */
4349 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4350 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4351 if (env->hflags & HF_CS64_MASK)
4352 nb_xmm_regs = 16;
4353 else
4354 nb_xmm_regs = 8;
4355 addr = ptr + 0xa0;
4356 for(i = 0; i < nb_xmm_regs; i++) {
4357 stq(addr, env->xmm_regs[i].XMM_Q(0));
4358 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4359 addr += 16;
4360 }
4361 }
4362}
4363
4364void helper_fxrstor(target_ulong ptr, int data64)
4365{
4366 int i, fpus, fptag, nb_xmm_regs;
4367 CPU86_LDouble tmp;
4368 target_ulong addr;
4369
4370 env->fpuc = lduw(ptr);
4371 fpus = lduw(ptr + 2);
4372 fptag = lduw(ptr + 4);
4373 env->fpstt = (fpus >> 11) & 7;
4374 env->fpus = fpus & ~0x3800;
4375 fptag ^= 0xff;
4376 for(i = 0;i < 8; i++) {
4377 env->fptags[i] = ((fptag >> i) & 1);
4378 }
4379
4380 addr = ptr + 0x20;
4381 for(i = 0;i < 8; i++) {
4382 tmp = helper_fldt(addr);
4383 ST(i) = tmp;
4384 addr += 16;
4385 }
4386
4387 if (env->cr[4] & CR4_OSFXSR_MASK) {
4388 /* XXX: finish it */
4389 env->mxcsr = ldl(ptr + 0x18);
4390 //ldl(ptr + 0x1c);
4391 if (env->hflags & HF_CS64_MASK)
4392 nb_xmm_regs = 16;
4393 else
4394 nb_xmm_regs = 8;
4395 addr = ptr + 0xa0;
4396 for(i = 0; i < nb_xmm_regs; i++) {
4397 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4398 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4399 addr += 16;
4400 }
4401 }
4402}
4403
4404#ifndef USE_X86LDOUBLE
4405
4406void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4407{
4408 CPU86_LDoubleU temp;
4409 int e;
4410
4411 temp.d = f;
4412 /* mantissa */
4413 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4414 /* exponent + sign */
4415 e = EXPD(temp) - EXPBIAS + 16383;
4416 e |= SIGND(temp) >> 16;
4417 *pexp = e;
4418}
4419
4420CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4421{
4422 CPU86_LDoubleU temp;
4423 int e;
4424 uint64_t ll;
4425
4426 /* XXX: handle overflow ? */
4427 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4428 e |= (upper >> 4) & 0x800; /* sign */
4429 ll = (mant >> 11) & ((1LL << 52) - 1);
4430#ifdef __arm__
4431 temp.l.upper = (e << 20) | (ll >> 32);
4432 temp.l.lower = ll;
4433#else
4434 temp.ll = ll | ((uint64_t)e << 52);
4435#endif
4436 return temp.d;
4437}
4438
4439#else
4440
4441void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4442{
4443 CPU86_LDoubleU temp;
4444
4445 temp.d = f;
4446 *pmant = temp.l.lower;
4447 *pexp = temp.l.upper;
4448}
4449
4450CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4451{
4452 CPU86_LDoubleU temp;
4453
4454 temp.l.upper = upper;
4455 temp.l.lower = mant;
4456 return temp.d;
4457}
4458#endif
4459
4460#ifdef TARGET_X86_64
4461
4462//#define DEBUG_MULDIV
4463
4464static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4465{
4466 *plow += a;
4467 /* carry test */
4468 if (*plow < a)
4469 (*phigh)++;
4470 *phigh += b;
4471}
4472
4473static void neg128(uint64_t *plow, uint64_t *phigh)
4474{
4475 *plow = ~ *plow;
4476 *phigh = ~ *phigh;
4477 add128(plow, phigh, 1, 0);
4478}
4479
4480/* return TRUE if overflow */
4481static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4482{
4483 uint64_t q, r, a1, a0;
4484 int i, qb, ab;
4485
4486 a0 = *plow;
4487 a1 = *phigh;
4488 if (a1 == 0) {
4489 q = a0 / b;
4490 r = a0 % b;
4491 *plow = q;
4492 *phigh = r;
4493 } else {
4494 if (a1 >= b)
4495 return 1;
4496 /* XXX: use a better algorithm */
4497 for(i = 0; i < 64; i++) {
4498 ab = a1 >> 63;
4499 a1 = (a1 << 1) | (a0 >> 63);
4500 if (ab || a1 >= b) {
4501 a1 -= b;
4502 qb = 1;
4503 } else {
4504 qb = 0;
4505 }
4506 a0 = (a0 << 1) | qb;
4507 }
4508#if defined(DEBUG_MULDIV)
4509 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4510 *phigh, *plow, b, a0, a1);
4511#endif
4512 *plow = a0;
4513 *phigh = a1;
4514 }
4515 return 0;
4516}
4517
4518/* return TRUE if overflow */
4519static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4520{
4521 int sa, sb;
4522 sa = ((int64_t)*phigh < 0);
4523 if (sa)
4524 neg128(plow, phigh);
4525 sb = (b < 0);
4526 if (sb)
4527 b = -b;
4528 if (div64(plow, phigh, b) != 0)
4529 return 1;
4530 if (sa ^ sb) {
4531 if (*plow > (1ULL << 63))
4532 return 1;
4533 *plow = - *plow;
4534 } else {
4535 if (*plow >= (1ULL << 63))
4536 return 1;
4537 }
4538 if (sa)
4539 *phigh = - *phigh;
4540 return 0;
4541}
4542
4543void helper_mulq_EAX_T0(target_ulong t0)
4544{
4545 uint64_t r0, r1;
4546
4547 mulu64(&r0, &r1, EAX, t0);
4548 EAX = r0;
4549 EDX = r1;
4550 CC_DST = r0;
4551 CC_SRC = r1;
4552}
4553
4554void helper_imulq_EAX_T0(target_ulong t0)
4555{
4556 uint64_t r0, r1;
4557
4558 muls64(&r0, &r1, EAX, t0);
4559 EAX = r0;
4560 EDX = r1;
4561 CC_DST = r0;
4562 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4563}
4564
4565target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4566{
4567 uint64_t r0, r1;
4568
4569 muls64(&r0, &r1, t0, t1);
4570 CC_DST = r0;
4571 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4572 return r0;
4573}
4574
4575void helper_divq_EAX(target_ulong t0)
4576{
4577 uint64_t r0, r1;
4578 if (t0 == 0) {
4579 raise_exception(EXCP00_DIVZ);
4580 }
4581 r0 = EAX;
4582 r1 = EDX;
4583 if (div64(&r0, &r1, t0))
4584 raise_exception(EXCP00_DIVZ);
4585 EAX = r0;
4586 EDX = r1;
4587}
4588
4589void helper_idivq_EAX(target_ulong t0)
4590{
4591 uint64_t r0, r1;
4592 if (t0 == 0) {
4593 raise_exception(EXCP00_DIVZ);
4594 }
4595 r0 = EAX;
4596 r1 = EDX;
4597 if (idiv64(&r0, &r1, t0))
4598 raise_exception(EXCP00_DIVZ);
4599 EAX = r0;
4600 EDX = r1;
4601}
4602#endif
4603
94451178 4604static void do_hlt(void)
eaa728ee
FB
4605{
4606 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4607 env->halted = 1;
eaa728ee
FB
4608 env->exception_index = EXCP_HLT;
4609 cpu_loop_exit();
4610}
4611
94451178
FB
4612void helper_hlt(int next_eip_addend)
4613{
4614 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4615 EIP += next_eip_addend;
4616
4617 do_hlt();
4618}
4619
eaa728ee
FB
4620void helper_monitor(target_ulong ptr)
4621{
4622 if ((uint32_t)ECX != 0)
4623 raise_exception(EXCP0D_GPF);
4624 /* XXX: store address ? */
872929aa 4625 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4626}
4627
94451178 4628void helper_mwait(int next_eip_addend)
eaa728ee
FB
4629{
4630 if ((uint32_t)ECX != 0)
4631 raise_exception(EXCP0D_GPF);
872929aa 4632 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4633 EIP += next_eip_addend;
4634
eaa728ee
FB
4635 /* XXX: not complete but not completely erroneous */
4636 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4637 /* more than one CPU: do not sleep because another CPU may
4638 wake this one */
4639 } else {
94451178 4640 do_hlt();
eaa728ee
FB
4641 }
4642}
4643
4644void helper_debug(void)
4645{
4646 env->exception_index = EXCP_DEBUG;
4647 cpu_loop_exit();
4648}
4649
4650void helper_raise_interrupt(int intno, int next_eip_addend)
4651{
4652 raise_interrupt(intno, 1, 0, next_eip_addend);
4653}
4654
4655void helper_raise_exception(int exception_index)
4656{
4657 raise_exception(exception_index);
4658}
4659
4660void helper_cli(void)
4661{
4662 env->eflags &= ~IF_MASK;
4663}
4664
4665void helper_sti(void)
4666{
4667 env->eflags |= IF_MASK;
4668}
4669
4670#if 0
4671/* vm86plus instructions */
4672void helper_cli_vm(void)
4673{
4674 env->eflags &= ~VIF_MASK;
4675}
4676
4677void helper_sti_vm(void)
4678{
4679 env->eflags |= VIF_MASK;
4680 if (env->eflags & VIP_MASK) {
4681 raise_exception(EXCP0D_GPF);
4682 }
4683}
4684#endif
4685
4686void helper_set_inhibit_irq(void)
4687{
4688 env->hflags |= HF_INHIBIT_IRQ_MASK;
4689}
4690
4691void helper_reset_inhibit_irq(void)
4692{
4693 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4694}
4695
4696void helper_boundw(target_ulong a0, int v)
4697{
4698 int low, high;
4699 low = ldsw(a0);
4700 high = ldsw(a0 + 2);
4701 v = (int16_t)v;
4702 if (v < low || v > high) {
4703 raise_exception(EXCP05_BOUND);
4704 }
4705 FORCE_RET();
4706}
4707
4708void helper_boundl(target_ulong a0, int v)
4709{
4710 int low, high;
4711 low = ldl(a0);
4712 high = ldl(a0 + 4);
4713 if (v < low || v > high) {
4714 raise_exception(EXCP05_BOUND);
4715 }
4716 FORCE_RET();
4717}
4718
4719static float approx_rsqrt(float a)
4720{
4721 return 1.0 / sqrt(a);
4722}
4723
4724static float approx_rcp(float a)
4725{
4726 return 1.0 / a;
4727}
4728
4729#if !defined(CONFIG_USER_ONLY)
4730
4731#define MMUSUFFIX _mmu
4732
4733#define SHIFT 0
4734#include "softmmu_template.h"
4735
4736#define SHIFT 1
4737#include "softmmu_template.h"
4738
4739#define SHIFT 2
4740#include "softmmu_template.h"
4741
4742#define SHIFT 3
4743#include "softmmu_template.h"
4744
4745#endif
4746
4747/* try to fill the TLB and return an exception if error. If retaddr is
4748 NULL, it means that the function was called in C code (i.e. not
4749 from generated code or from helper.c) */
4750/* XXX: fix it to restore all registers */
4751void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4752{
4753 TranslationBlock *tb;
4754 int ret;
4755 unsigned long pc;
4756 CPUX86State *saved_env;
4757
4758 /* XXX: hack to restore env in all cases, even if not called from
4759 generated code */
4760 saved_env = env;
4761 env = cpu_single_env;
4762
4763 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4764 if (ret) {
4765 if (retaddr) {
4766 /* now we have a real cpu fault */
4767 pc = (unsigned long)retaddr;
4768 tb = tb_find_pc(pc);
4769 if (tb) {
4770 /* the PC is inside the translated code. It means that we have
4771 a virtual CPU fault */
4772 cpu_restore_state(tb, env, pc, NULL);
4773 }
4774 }
872929aa 4775 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4776 }
4777 env = saved_env;
4778}
4779
4780
4781/* Secure Virtual Machine helpers */
4782
eaa728ee
FB
4783#if defined(CONFIG_USER_ONLY)
4784
db620f46 4785void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4786{
4787}
4788void helper_vmmcall(void)
4789{
4790}
914178d3 4791void helper_vmload(int aflag)
eaa728ee
FB
4792{
4793}
914178d3 4794void helper_vmsave(int aflag)
eaa728ee
FB
4795{
4796}
872929aa
FB
4797void helper_stgi(void)
4798{
4799}
4800void helper_clgi(void)
4801{
4802}
eaa728ee
FB
4803void helper_skinit(void)
4804{
4805}
914178d3 4806void helper_invlpga(int aflag)
eaa728ee
FB
4807{
4808}
4809void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4810{
4811}
4812void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4813{
4814}
4815
4816void helper_svm_check_io(uint32_t port, uint32_t param,
4817 uint32_t next_eip_addend)
4818{
4819}
4820#else
4821
872929aa
FB
4822static inline void svm_save_seg(target_phys_addr_t addr,
4823 const SegmentCache *sc)
eaa728ee 4824{
872929aa
FB
4825 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4826 sc->selector);
4827 stq_phys(addr + offsetof(struct vmcb_seg, base),
4828 sc->base);
4829 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4830 sc->limit);
4831 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4832 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4833}
4834
4835static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4836{
4837 unsigned int flags;
4838
4839 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4840 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4841 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4842 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4843 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4844}
4845
872929aa
FB
4846static inline void svm_load_seg_cache(target_phys_addr_t addr,
4847 CPUState *env, int seg_reg)
eaa728ee 4848{
872929aa
FB
4849 SegmentCache sc1, *sc = &sc1;
4850 svm_load_seg(addr, sc);
4851 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4852 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4853}
4854
db620f46 4855void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4856{
4857 target_ulong addr;
4858 uint32_t event_inj;
4859 uint32_t int_ctl;
4860
872929aa
FB
4861 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4862
914178d3
FB
4863 if (aflag == 2)
4864 addr = EAX;
4865 else
4866 addr = (uint32_t)EAX;
4867
eaa728ee
FB
4868 if (loglevel & CPU_LOG_TB_IN_ASM)
4869 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4870
4871 env->vm_vmcb = addr;
4872
4873 /* save the current CPU state in the hsave page */
4874 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4875 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4876
4877 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4878 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4879
4880 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4881 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4882 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4883 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4884 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4885 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4886
4887 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4888 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4889
872929aa
FB
4890 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4891 &env->segs[R_ES]);
4892 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4893 &env->segs[R_CS]);
4894 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4895 &env->segs[R_SS]);
4896 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4897 &env->segs[R_DS]);
eaa728ee 4898
db620f46
FB
4899 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4900 EIP + next_eip_addend);
eaa728ee
FB
4901 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4902 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4903
4904 /* load the interception bitmaps so we do not need to access the
4905 vmcb in svm mode */
872929aa 4906 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
4907 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4908 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4909 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4910 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4911 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4912
872929aa
FB
4913 /* enable intercepts */
4914 env->hflags |= HF_SVMI_MASK;
4915
33c263df
FB
4916 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4917
eaa728ee
FB
4918 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4919 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4920
4921 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4922 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4923
4924 /* clear exit_info_2 so we behave like the real hardware */
4925 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4926
4927 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4928 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4929 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4930 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4931 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 4932 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 4933 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
4934 env->v_tpr = int_ctl & V_TPR_MASK;
4935 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 4936 if (env->eflags & IF_MASK)
db620f46 4937 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
4938 }
4939
5efc27bb
FB
4940 cpu_load_efer(env,
4941 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
4942 env->eflags = 0;
4943 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4944 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4945 CC_OP = CC_OP_EFLAGS;
eaa728ee 4946
872929aa
FB
4947 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4948 env, R_ES);
4949 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4950 env, R_CS);
4951 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4952 env, R_SS);
4953 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4954 env, R_DS);
eaa728ee
FB
4955
4956 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4957 env->eip = EIP;
4958 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4959 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4960 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4961 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4962 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4963
4964 /* FIXME: guest state consistency checks */
4965
4966 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4967 case TLB_CONTROL_DO_NOTHING:
4968 break;
4969 case TLB_CONTROL_FLUSH_ALL_ASID:
4970 /* FIXME: this is not 100% correct but should work for now */
4971 tlb_flush(env, 1);
4972 break;
4973 }
4974
960540b4 4975 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 4976
db620f46
FB
4977 if (int_ctl & V_IRQ_MASK) {
4978 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4979 }
4980
eaa728ee
FB
4981 /* maybe we need to inject an event */
4982 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4983 if (event_inj & SVM_EVTINJ_VALID) {
4984 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4985 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4986 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4987 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4988
4989 if (loglevel & CPU_LOG_TB_IN_ASM)
4990 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4991 /* FIXME: need to implement valid_err */
4992 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4993 case SVM_EVTINJ_TYPE_INTR:
4994 env->exception_index = vector;
4995 env->error_code = event_inj_err;
4996 env->exception_is_int = 0;
4997 env->exception_next_eip = -1;
4998 if (loglevel & CPU_LOG_TB_IN_ASM)
4999 fprintf(logfile, "INTR");
db620f46
FB
5000 /* XXX: is it always correct ? */
5001 do_interrupt(vector, 0, 0, 0, 1);
eaa728ee
FB
5002 break;
5003 case SVM_EVTINJ_TYPE_NMI:
db620f46 5004 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5005 env->error_code = event_inj_err;
5006 env->exception_is_int = 0;
5007 env->exception_next_eip = EIP;
5008 if (loglevel & CPU_LOG_TB_IN_ASM)
5009 fprintf(logfile, "NMI");
db620f46 5010 cpu_loop_exit();
eaa728ee
FB
5011 break;
5012 case SVM_EVTINJ_TYPE_EXEPT:
5013 env->exception_index = vector;
5014 env->error_code = event_inj_err;
5015 env->exception_is_int = 0;
5016 env->exception_next_eip = -1;
5017 if (loglevel & CPU_LOG_TB_IN_ASM)
5018 fprintf(logfile, "EXEPT");
db620f46 5019 cpu_loop_exit();
eaa728ee
FB
5020 break;
5021 case SVM_EVTINJ_TYPE_SOFT:
5022 env->exception_index = vector;
5023 env->error_code = event_inj_err;
5024 env->exception_is_int = 1;
5025 env->exception_next_eip = EIP;
5026 if (loglevel & CPU_LOG_TB_IN_ASM)
5027 fprintf(logfile, "SOFT");
db620f46 5028 cpu_loop_exit();
eaa728ee
FB
5029 break;
5030 }
5031 if (loglevel & CPU_LOG_TB_IN_ASM)
5032 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
5033 }
eaa728ee
FB
5034}
5035
5036void helper_vmmcall(void)
5037{
872929aa
FB
5038 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5039 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5040}
5041
914178d3 5042void helper_vmload(int aflag)
eaa728ee
FB
5043{
5044 target_ulong addr;
872929aa
FB
5045 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5046
914178d3
FB
5047 if (aflag == 2)
5048 addr = EAX;
5049 else
5050 addr = (uint32_t)EAX;
5051
eaa728ee
FB
5052 if (loglevel & CPU_LOG_TB_IN_ASM)
5053 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5054 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5055 env->segs[R_FS].base);
5056
872929aa
FB
5057 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5058 env, R_FS);
5059 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5060 env, R_GS);
5061 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5062 &env->tr);
5063 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5064 &env->ldt);
eaa728ee
FB
5065
5066#ifdef TARGET_X86_64
5067 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5068 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5069 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5070 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5071#endif
5072 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5073 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5074 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5075 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5076}
5077
914178d3 5078void helper_vmsave(int aflag)
eaa728ee
FB
5079{
5080 target_ulong addr;
872929aa 5081 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5082
5083 if (aflag == 2)
5084 addr = EAX;
5085 else
5086 addr = (uint32_t)EAX;
5087
eaa728ee
FB
5088 if (loglevel & CPU_LOG_TB_IN_ASM)
5089 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5090 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5091 env->segs[R_FS].base);
5092
872929aa
FB
5093 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5094 &env->segs[R_FS]);
5095 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5096 &env->segs[R_GS]);
5097 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5098 &env->tr);
5099 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5100 &env->ldt);
eaa728ee
FB
5101
5102#ifdef TARGET_X86_64
5103 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5104 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5105 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5106 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5107#endif
5108 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5109 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5110 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5111 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5112}
5113
872929aa
FB
5114void helper_stgi(void)
5115{
5116 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5117 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5118}
5119
5120void helper_clgi(void)
5121{
5122 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5123 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5124}
5125
eaa728ee
FB
5126void helper_skinit(void)
5127{
872929aa
FB
5128 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5129 /* XXX: not implemented */
872929aa 5130 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5131}
5132
914178d3 5133void helper_invlpga(int aflag)
eaa728ee 5134{
914178d3 5135 target_ulong addr;
872929aa 5136 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5137
5138 if (aflag == 2)
5139 addr = EAX;
5140 else
5141 addr = (uint32_t)EAX;
5142
5143 /* XXX: could use the ASID to see if it is needed to do the
5144 flush */
5145 tlb_flush_page(env, addr);
eaa728ee
FB
5146}
5147
5148void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5149{
872929aa
FB
5150 if (likely(!(env->hflags & HF_SVMI_MASK)))
5151 return;
eaa728ee
FB
5152 switch(type) {
5153 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5154 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5155 helper_vmexit(type, param);
5156 }
5157 break;
872929aa
FB
5158 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5159 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5160 helper_vmexit(type, param);
5161 }
5162 break;
872929aa
FB
5163 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5164 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5165 helper_vmexit(type, param);
5166 }
5167 break;
872929aa
FB
5168 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5169 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5170 helper_vmexit(type, param);
5171 }
5172 break;
872929aa
FB
5173 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5174 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5175 helper_vmexit(type, param);
5176 }
5177 break;
eaa728ee 5178 case SVM_EXIT_MSR:
872929aa 5179 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5180 /* FIXME: this should be read in at vmrun (faster this way?) */
5181 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5182 uint32_t t0, t1;
5183 switch((uint32_t)ECX) {
5184 case 0 ... 0x1fff:
5185 t0 = (ECX * 2) % 8;
5186 t1 = ECX / 8;
5187 break;
5188 case 0xc0000000 ... 0xc0001fff:
5189 t0 = (8192 + ECX - 0xc0000000) * 2;
5190 t1 = (t0 / 8);
5191 t0 %= 8;
5192 break;
5193 case 0xc0010000 ... 0xc0011fff:
5194 t0 = (16384 + ECX - 0xc0010000) * 2;
5195 t1 = (t0 / 8);
5196 t0 %= 8;
5197 break;
5198 default:
5199 helper_vmexit(type, param);
5200 t0 = 0;
5201 t1 = 0;
5202 break;
5203 }
5204 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5205 helper_vmexit(type, param);
5206 }
5207 break;
5208 default:
872929aa 5209 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5210 helper_vmexit(type, param);
5211 }
5212 break;
5213 }
5214}
5215
5216void helper_svm_check_io(uint32_t port, uint32_t param,
5217 uint32_t next_eip_addend)
5218{
872929aa 5219 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5220 /* FIXME: this should be read in at vmrun (faster this way?) */
5221 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5222 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5223 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5224 /* next EIP */
5225 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5226 env->eip + next_eip_addend);
5227 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5228 }
5229 }
5230}
5231
5232/* Note: currently only 32 bits of exit_code are used */
5233void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5234{
5235 uint32_t int_ctl;
5236
5237 if (loglevel & CPU_LOG_TB_IN_ASM)
5238 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5239 exit_code, exit_info_1,
5240 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5241 EIP);
5242
5243 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5244 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5245 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5246 } else {
5247 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5248 }
5249
5250 /* Save the VM state in the vmcb */
872929aa
FB
5251 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5252 &env->segs[R_ES]);
5253 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5254 &env->segs[R_CS]);
5255 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5256 &env->segs[R_SS]);
5257 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5258 &env->segs[R_DS]);
eaa728ee
FB
5259
5260 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5261 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5262
5263 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5264 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5265
5266 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5267 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5268 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5269 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5270 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5271
db620f46
FB
5272 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5273 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5274 int_ctl |= env->v_tpr & V_TPR_MASK;
5275 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5276 int_ctl |= V_IRQ_MASK;
5277 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5278
5279 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5280 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5281 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5282 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5283 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5284 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5285 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5286
5287 /* Reload the host state from vm_hsave */
db620f46 5288 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5289 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5290 env->intercept = 0;
5291 env->intercept_exceptions = 0;
5292 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5293 env->tsc_offset = 0;
eaa728ee
FB
5294
5295 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5296 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5297
5298 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5299 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5300
5301 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5302 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5303 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5304 /* we need to set the efer after the crs so the hidden flags get
5305 set properly */
5efc27bb
FB
5306 cpu_load_efer(env,
5307 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5308 env->eflags = 0;
5309 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5310 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5311 CC_OP = CC_OP_EFLAGS;
5312
872929aa
FB
5313 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5314 env, R_ES);
5315 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5316 env, R_CS);
5317 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5318 env, R_SS);
5319 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5320 env, R_DS);
eaa728ee
FB
5321
5322 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5323 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5324 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5325
5326 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5327 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5328
5329 /* other setups */
5330 cpu_x86_set_cpl(env, 0);
5331 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5332 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5333
960540b4 5334 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5335 /* FIXME: Resets the current ASID register to zero (host ASID). */
5336
5337 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5338
5339 /* Clears the TSC_OFFSET inside the processor. */
5340
5341 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5342 from the page table indicated the host's CR3. If the PDPEs contain
5343 illegal state, the processor causes a shutdown. */
5344
5345 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5346 env->cr[0] |= CR0_PE_MASK;
5347 env->eflags &= ~VM_MASK;
5348
5349 /* Disables all breakpoints in the host DR7 register. */
5350
5351 /* Checks the reloaded host state for consistency. */
5352
5353 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5354 host's code segment or non-canonical (in the case of long mode), a
5355 #GP fault is delivered inside the host.) */
5356
5357 /* remove any pending exception */
5358 env->exception_index = -1;
5359 env->error_code = 0;
5360 env->old_exception = -1;
5361
5362 cpu_loop_exit();
5363}
5364
5365#endif
5366
5367/* MMX/SSE */
5368/* XXX: optimize by storing fptt and fptags in the static cpu state */
5369void helper_enter_mmx(void)
5370{
5371 env->fpstt = 0;
5372 *(uint32_t *)(env->fptags) = 0;
5373 *(uint32_t *)(env->fptags + 4) = 0;
5374}
5375
5376void helper_emms(void)
5377{
5378 /* set to empty state */
5379 *(uint32_t *)(env->fptags) = 0x01010101;
5380 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5381}
5382
5383/* XXX: suppress */
5384void helper_movq(uint64_t *d, uint64_t *s)
5385{
5386 *d = *s;
5387}
5388
5389#define SHIFT 0
5390#include "ops_sse.h"
5391
5392#define SHIFT 1
5393#include "ops_sse.h"
5394
5395#define SHIFT 0
5396#include "helper_template.h"
5397#undef SHIFT
5398
5399#define SHIFT 1
5400#include "helper_template.h"
5401#undef SHIFT
5402
5403#define SHIFT 2
5404#include "helper_template.h"
5405#undef SHIFT
5406
5407#ifdef TARGET_X86_64
5408
5409#define SHIFT 3
5410#include "helper_template.h"
5411#undef SHIFT
5412
5413#endif
5414
5415/* bit operations */
5416target_ulong helper_bsf(target_ulong t0)
5417{
5418 int count;
5419 target_ulong res;
5420
5421 res = t0;
5422 count = 0;
5423 while ((res & 1) == 0) {
5424 count++;
5425 res >>= 1;
5426 }
5427 return count;
5428}
5429
5430target_ulong helper_bsr(target_ulong t0)
5431{
5432 int count;
5433 target_ulong res, mask;
5434
5435 res = t0;
5436 count = TARGET_LONG_BITS - 1;
5437 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5438 while ((res & mask) == 0) {
5439 count--;
5440 res <<= 1;
5441 }
5442 return count;
5443}
5444
5445
5446static int compute_all_eflags(void)
5447{
5448 return CC_SRC;
5449}
5450
5451static int compute_c_eflags(void)
5452{
5453 return CC_SRC & CC_C;
5454}
5455
5456CCTable cc_table[CC_OP_NB] = {
5457 [CC_OP_DYNAMIC] = { /* should never happen */ },
5458
5459 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5460
5461 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5462 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5463 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5464
5465 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5466 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5467 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5468
5469 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5470 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5471 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5472
5473 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5474 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5475 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5476
5477 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5478 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5479 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5480
5481 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5482 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5483 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5484
5485 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5486 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5487 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5488
5489 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5490 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5491 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5492
5493 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5494 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5495 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5496
5497 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5498 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5499 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5500
5501#ifdef TARGET_X86_64
5502 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5503
5504 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5505
5506 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5507
5508 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5509
5510 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5511
5512 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5513
5514 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5515
5516 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5517
5518 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5519
5520 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5521#endif
5522};
5523