]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/op_helper.c
alpha: only print debug information to the log file
[mirror_qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#define CPU_NO_GLOBAL_REGS
21#include "exec.h"
22#include "host-utils.h"
23
24//#define DEBUG_PCALL
25
26#if 0
27#define raise_exception_err(a, b)\
28do {\
29 if (logfile)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
32} while (0)
33#endif
34
35const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68};
69
70/* modulo 17 table */
71const uint8_t rclw_table[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
76};
77
78/* modulo 9 table */
79const uint8_t rclb_table[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
84};
85
86const CPU86_LDouble f15rk[7] =
87{
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
95};
96
97/* broken thread support */
98
99spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100
101void helper_lock(void)
102{
103 spin_lock(&global_cpu_lock);
104}
105
106void helper_unlock(void)
107{
108 spin_unlock(&global_cpu_lock);
109}
110
111void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112{
113 load_eflags(t0, update_mask);
114}
115
116target_ulong helper_read_eflags(void)
117{
118 uint32_t eflags;
119 eflags = cc_table[CC_OP].compute_all();
120 eflags |= (DF & DF_MASK);
121 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122 return eflags;
123}
124
125/* return non zero if error */
126static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127 int selector)
128{
129 SegmentCache *dt;
130 int index;
131 target_ulong ptr;
132
133 if (selector & 0x4)
134 dt = &env->ldt;
135 else
136 dt = &env->gdt;
137 index = selector & ~7;
138 if ((index + 7) > dt->limit)
139 return -1;
140 ptr = dt->base + index;
141 *e1_ptr = ldl_kernel(ptr);
142 *e2_ptr = ldl_kernel(ptr + 4);
143 return 0;
144}
145
146static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147{
148 unsigned int limit;
149 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150 if (e2 & DESC_G_MASK)
151 limit = (limit << 12) | 0xfff;
152 return limit;
153}
154
155static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
156{
157 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
158}
159
160static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161{
162 sc->base = get_seg_base(e1, e2);
163 sc->limit = get_seg_limit(e1, e2);
164 sc->flags = e2;
165}
166
167/* init the segment cache in vm86 mode. */
168static inline void load_seg_vm(int seg, int selector)
169{
170 selector &= 0xffff;
171 cpu_x86_load_seg_cache(env, seg, selector,
172 (selector << 4), 0xffff, 0);
173}
174
175static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
176 uint32_t *esp_ptr, int dpl)
177{
178 int type, index, shift;
179
180#if 0
181 {
182 int i;
183 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184 for(i=0;i<env->tr.limit;i++) {
185 printf("%02x ", env->tr.base[i]);
186 if ((i & 7) == 7) printf("\n");
187 }
188 printf("\n");
189 }
190#endif
191
192 if (!(env->tr.flags & DESC_P_MASK))
193 cpu_abort(env, "invalid tss");
194 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195 if ((type & 7) != 1)
196 cpu_abort(env, "invalid tss type");
197 shift = type >> 3;
198 index = (dpl * 4 + 2) << shift;
199 if (index + (4 << shift) - 1 > env->tr.limit)
200 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201 if (shift == 0) {
202 *esp_ptr = lduw_kernel(env->tr.base + index);
203 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
204 } else {
205 *esp_ptr = ldl_kernel(env->tr.base + index);
206 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
207 }
208}
209
210/* XXX: merge with load_seg() */
211static void tss_load_seg(int seg_reg, int selector)
212{
213 uint32_t e1, e2;
214 int rpl, dpl, cpl;
215
216 if ((selector & 0xfffc) != 0) {
217 if (load_segment(&e1, &e2, selector) != 0)
218 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 if (!(e2 & DESC_S_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 rpl = selector & 3;
222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223 cpl = env->hflags & HF_CPL_MASK;
224 if (seg_reg == R_CS) {
225 if (!(e2 & DESC_CS_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* XXX: is it correct ? */
228 if (dpl != rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if ((e2 & DESC_C_MASK) && dpl > rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 } else if (seg_reg == R_SS) {
233 /* SS must be writable data */
234 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (dpl != cpl || dpl != rpl)
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 } else {
239 /* not readable code */
240 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244 if (dpl < cpl || dpl < rpl)
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246 }
247 }
248 if (!(e2 & DESC_P_MASK))
249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
250 cpu_x86_load_seg_cache(env, seg_reg, selector,
251 get_seg_base(e1, e2),
252 get_seg_limit(e1, e2),
253 e2);
254 } else {
255 if (seg_reg == R_SS || seg_reg == R_CS)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257 }
258}
259
260#define SWITCH_TSS_JMP 0
261#define SWITCH_TSS_IRET 1
262#define SWITCH_TSS_CALL 2
263
264/* XXX: restore CPU state in registers (PowerPC case) */
265static void switch_tss(int tss_selector,
266 uint32_t e1, uint32_t e2, int source,
267 uint32_t next_eip)
268{
269 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
270 target_ulong tss_base;
271 uint32_t new_regs[8], new_segs[6];
272 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273 uint32_t old_eflags, eflags_mask;
274 SegmentCache *dt;
275 int index;
276 target_ulong ptr;
277
278 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
279#ifdef DEBUG_PCALL
280 if (loglevel & CPU_LOG_PCALL)
281 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282#endif
283
284 /* if task gate, we read the TSS segment and we load it */
285 if (type == 5) {
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288 tss_selector = e1 >> 16;
289 if (tss_selector & 4)
290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291 if (load_segment(&e1, &e2, tss_selector) != 0)
292 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293 if (e2 & DESC_S_MASK)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296 if ((type & 7) != 1)
297 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298 }
299
300 if (!(e2 & DESC_P_MASK))
301 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302
303 if (type & 8)
304 tss_limit_max = 103;
305 else
306 tss_limit_max = 43;
307 tss_limit = get_seg_limit(e1, e2);
308 tss_base = get_seg_base(e1, e2);
309 if ((tss_selector & 4) != 0 ||
310 tss_limit < tss_limit_max)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if (old_type & 8)
314 old_tss_limit_max = 103;
315 else
316 old_tss_limit_max = 43;
317
318 /* read all the registers from the new TSS */
319 if (type & 8) {
320 /* 32 bit */
321 new_cr3 = ldl_kernel(tss_base + 0x1c);
322 new_eip = ldl_kernel(tss_base + 0x20);
323 new_eflags = ldl_kernel(tss_base + 0x24);
324 for(i = 0; i < 8; i++)
325 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326 for(i = 0; i < 6; i++)
327 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328 new_ldt = lduw_kernel(tss_base + 0x60);
329 new_trap = ldl_kernel(tss_base + 0x64);
330 } else {
331 /* 16 bit */
332 new_cr3 = 0;
333 new_eip = lduw_kernel(tss_base + 0x0e);
334 new_eflags = lduw_kernel(tss_base + 0x10);
335 for(i = 0; i < 8; i++)
336 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337 for(i = 0; i < 4; i++)
338 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339 new_ldt = lduw_kernel(tss_base + 0x2a);
340 new_segs[R_FS] = 0;
341 new_segs[R_GS] = 0;
342 new_trap = 0;
343 }
344
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
349
350 v1 = ldub_kernel(env->tr.base);
351 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
352 stb_kernel(env->tr.base, v1);
353 stb_kernel(env->tr.base + old_tss_limit_max, v2);
354
355 /* clear busy bit (it is restartable) */
356 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
357 target_ulong ptr;
358 uint32_t e2;
359 ptr = env->gdt.base + (env->tr.selector & ~7);
360 e2 = ldl_kernel(ptr + 4);
361 e2 &= ~DESC_TSS_BUSY_MASK;
362 stl_kernel(ptr + 4, e2);
363 }
364 old_eflags = compute_eflags();
365 if (source == SWITCH_TSS_IRET)
366 old_eflags &= ~NT_MASK;
367
368 /* save the current state in the old TSS */
369 if (type & 8) {
370 /* 32 bit */
371 stl_kernel(env->tr.base + 0x20, next_eip);
372 stl_kernel(env->tr.base + 0x24, old_eflags);
373 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
381 for(i = 0; i < 6; i++)
382 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383 } else {
384 /* 16 bit */
385 stw_kernel(env->tr.base + 0x0e, next_eip);
386 stw_kernel(env->tr.base + 0x10, old_eflags);
387 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
395 for(i = 0; i < 4; i++)
396 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397 }
398
399 /* now if an exception occurs, it will occurs in the next task
400 context */
401
402 if (source == SWITCH_TSS_CALL) {
403 stw_kernel(tss_base, env->tr.selector);
404 new_eflags |= NT_MASK;
405 }
406
407 /* set busy bit */
408 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
409 target_ulong ptr;
410 uint32_t e2;
411 ptr = env->gdt.base + (tss_selector & ~7);
412 e2 = ldl_kernel(ptr + 4);
413 e2 |= DESC_TSS_BUSY_MASK;
414 stl_kernel(ptr + 4, e2);
415 }
416
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
428 }
429
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
435 if (!(type & 8))
436 eflags_mask &= 0xffff;
437 load_eflags(new_eflags, eflags_mask);
438 /* XXX: what to do in 16 bit case ? */
439 EAX = new_regs[0];
440 ECX = new_regs[1];
441 EDX = new_regs[2];
442 EBX = new_regs[3];
443 ESP = new_regs[4];
444 EBP = new_regs[5];
445 ESI = new_regs[6];
446 EDI = new_regs[7];
447 if (new_eflags & VM_MASK) {
448 for(i = 0; i < 6; i++)
449 load_seg_vm(i, new_segs[i]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env, 3);
452 } else {
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i = 0; i < 6; i++)
457 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
458 }
459
460 env->ldt.selector = new_ldt & ~4;
461 env->ldt.base = 0;
462 env->ldt.limit = 0;
463 env->ldt.flags = 0;
464
465 /* load the LDT */
466 if (new_ldt & 4)
467 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 ptr = dt->base + index;
475 e1 = ldl_kernel(ptr);
476 e2 = ldl_kernel(ptr + 4);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479 if (!(e2 & DESC_P_MASK))
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
482 }
483
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 tss_load_seg(R_CS, new_segs[R_CS]);
487 tss_load_seg(R_SS, new_segs[R_SS]);
488 tss_load_seg(R_ES, new_segs[R_ES]);
489 tss_load_seg(R_DS, new_segs[R_DS]);
490 tss_load_seg(R_FS, new_segs[R_FS]);
491 tss_load_seg(R_GS, new_segs[R_GS]);
492 }
493
494 /* check that EIP is in the CS segment limits */
495 if (new_eip > env->segs[R_CS].limit) {
496 /* XXX: different exception if CALL ? */
497 raise_exception_err(EXCP0D_GPF, 0);
498 }
499}
500
501/* check if Port I/O is allowed in TSS */
502static inline void check_io(int addr, int size)
503{
504 int io_offset, val, mask;
505
506 /* TSS must be a valid 32 bit one */
507 if (!(env->tr.flags & DESC_P_MASK) ||
508 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509 env->tr.limit < 103)
510 goto fail;
511 io_offset = lduw_kernel(env->tr.base + 0x66);
512 io_offset += (addr >> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset + 1) > env->tr.limit)
515 goto fail;
516 val = lduw_kernel(env->tr.base + io_offset);
517 val >>= (addr & 7);
518 mask = (1 << size) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val & mask) != 0) {
521 fail:
522 raise_exception_err(EXCP0D_GPF, 0);
523 }
524}
525
526void helper_check_iob(uint32_t t0)
527{
528 check_io(t0, 1);
529}
530
531void helper_check_iow(uint32_t t0)
532{
533 check_io(t0, 2);
534}
535
536void helper_check_iol(uint32_t t0)
537{
538 check_io(t0, 4);
539}
540
541void helper_outb(uint32_t port, uint32_t data)
542{
543 cpu_outb(env, port, data & 0xff);
544}
545
546target_ulong helper_inb(uint32_t port)
547{
548 return cpu_inb(env, port);
549}
550
551void helper_outw(uint32_t port, uint32_t data)
552{
553 cpu_outw(env, port, data & 0xffff);
554}
555
556target_ulong helper_inw(uint32_t port)
557{
558 return cpu_inw(env, port);
559}
560
561void helper_outl(uint32_t port, uint32_t data)
562{
563 cpu_outl(env, port, data);
564}
565
566target_ulong helper_inl(uint32_t port)
567{
568 return cpu_inl(env, port);
569}
570
571static inline unsigned int get_sp_mask(unsigned int e2)
572{
573 if (e2 & DESC_B_MASK)
574 return 0xffffffff;
575 else
576 return 0xffff;
577}
578
579#ifdef TARGET_X86_64
580#define SET_ESP(val, sp_mask)\
581do {\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
586 else\
587 ESP = (val);\
588} while (0)
589#else
590#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591#endif
592
593/* XXX: add a is_user flag to have proper security support */
594#define PUSHW(ssp, sp, sp_mask, val)\
595{\
596 sp -= 2;\
597 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
598}
599
600#define PUSHL(ssp, sp, sp_mask, val)\
601{\
602 sp -= 4;\
603 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
604}
605
606#define POPW(ssp, sp, sp_mask, val)\
607{\
608 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
609 sp += 2;\
610}
611
612#define POPL(ssp, sp, sp_mask, val)\
613{\
614 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
615 sp += 4;\
616}
617
618/* protected mode interrupt */
619static void do_interrupt_protected(int intno, int is_int, int error_code,
620 unsigned int next_eip, int is_hw)
621{
622 SegmentCache *dt;
623 target_ulong ptr, ssp;
624 int type, dpl, selector, ss_dpl, cpl;
625 int has_error_code, new_stack, shift;
626 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
627 uint32_t old_eip, sp_mask;
eaa728ee 628
eaa728ee
FB
629 has_error_code = 0;
630 if (!is_int && !is_hw) {
631 switch(intno) {
632 case 8:
633 case 10:
634 case 11:
635 case 12:
636 case 13:
637 case 14:
638 case 17:
639 has_error_code = 1;
640 break;
641 }
642 }
643 if (is_int)
644 old_eip = next_eip;
645 else
646 old_eip = env->eip;
647
648 dt = &env->idt;
649 if (intno * 8 + 7 > dt->limit)
650 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
651 ptr = dt->base + intno * 8;
652 e1 = ldl_kernel(ptr);
653 e2 = ldl_kernel(ptr + 4);
654 /* check gate type */
655 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
656 switch(type) {
657 case 5: /* task gate */
658 /* must do that check here to return the correct error code */
659 if (!(e2 & DESC_P_MASK))
660 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
661 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
662 if (has_error_code) {
663 int type;
664 uint32_t mask;
665 /* push the error code */
666 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
667 shift = type >> 3;
668 if (env->segs[R_SS].flags & DESC_B_MASK)
669 mask = 0xffffffff;
670 else
671 mask = 0xffff;
672 esp = (ESP - (2 << shift)) & mask;
673 ssp = env->segs[R_SS].base + esp;
674 if (shift)
675 stl_kernel(ssp, error_code);
676 else
677 stw_kernel(ssp, error_code);
678 SET_ESP(esp, mask);
679 }
680 return;
681 case 6: /* 286 interrupt gate */
682 case 7: /* 286 trap gate */
683 case 14: /* 386 interrupt gate */
684 case 15: /* 386 trap gate */
685 break;
686 default:
687 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
688 break;
689 }
690 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
691 cpl = env->hflags & HF_CPL_MASK;
1235fc06 692 /* check privilege if software int */
eaa728ee
FB
693 if (is_int && dpl < cpl)
694 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
695 /* check valid bit */
696 if (!(e2 & DESC_P_MASK))
697 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
698 selector = e1 >> 16;
699 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
700 if ((selector & 0xfffc) == 0)
701 raise_exception_err(EXCP0D_GPF, 0);
702
703 if (load_segment(&e1, &e2, selector) != 0)
704 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
705 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
706 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
708 if (dpl > cpl)
709 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
710 if (!(e2 & DESC_P_MASK))
711 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
712 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
713 /* to inner privilege */
714 get_ss_esp_from_tss(&ss, &esp, dpl);
715 if ((ss & 0xfffc) == 0)
716 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
717 if ((ss & 3) != dpl)
718 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
719 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
722 if (ss_dpl != dpl)
723 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
724 if (!(ss_e2 & DESC_S_MASK) ||
725 (ss_e2 & DESC_CS_MASK) ||
726 !(ss_e2 & DESC_W_MASK))
727 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 if (!(ss_e2 & DESC_P_MASK))
729 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
730 new_stack = 1;
731 sp_mask = get_sp_mask(ss_e2);
732 ssp = get_seg_base(ss_e1, ss_e2);
733 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
734 /* to same privilege */
735 if (env->eflags & VM_MASK)
736 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737 new_stack = 0;
738 sp_mask = get_sp_mask(env->segs[R_SS].flags);
739 ssp = env->segs[R_SS].base;
740 esp = ESP;
741 dpl = cpl;
742 } else {
743 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
744 new_stack = 0; /* avoid warning */
745 sp_mask = 0; /* avoid warning */
746 ssp = 0; /* avoid warning */
747 esp = 0; /* avoid warning */
748 }
749
750 shift = type >> 3;
751
752#if 0
753 /* XXX: check that enough room is available */
754 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
755 if (env->eflags & VM_MASK)
756 push_size += 8;
757 push_size <<= shift;
758#endif
759 if (shift == 1) {
760 if (new_stack) {
761 if (env->eflags & VM_MASK) {
762 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
763 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
764 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
765 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
766 }
767 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
768 PUSHL(ssp, esp, sp_mask, ESP);
769 }
770 PUSHL(ssp, esp, sp_mask, compute_eflags());
771 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
772 PUSHL(ssp, esp, sp_mask, old_eip);
773 if (has_error_code) {
774 PUSHL(ssp, esp, sp_mask, error_code);
775 }
776 } else {
777 if (new_stack) {
778 if (env->eflags & VM_MASK) {
779 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
780 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
781 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
782 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
783 }
784 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
785 PUSHW(ssp, esp, sp_mask, ESP);
786 }
787 PUSHW(ssp, esp, sp_mask, compute_eflags());
788 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
789 PUSHW(ssp, esp, sp_mask, old_eip);
790 if (has_error_code) {
791 PUSHW(ssp, esp, sp_mask, error_code);
792 }
793 }
794
795 if (new_stack) {
796 if (env->eflags & VM_MASK) {
797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
801 }
802 ss = (ss & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_SS, ss,
804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
805 }
806 SET_ESP(esp, sp_mask);
807
808 selector = (selector & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_CS, selector,
810 get_seg_base(e1, e2),
811 get_seg_limit(e1, e2),
812 e2);
813 cpu_x86_set_cpl(env, dpl);
814 env->eip = offset;
815
816 /* interrupt gate clear IF mask */
817 if ((type & 1) == 0) {
818 env->eflags &= ~IF_MASK;
819 }
820 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
821}
822
823#ifdef TARGET_X86_64
824
825#define PUSHQ(sp, val)\
826{\
827 sp -= 8;\
828 stq_kernel(sp, (val));\
829}
830
831#define POPQ(sp, val)\
832{\
833 val = ldq_kernel(sp);\
834 sp += 8;\
835}
836
837static inline target_ulong get_rsp_from_tss(int level)
838{
839 int index;
840
841#if 0
842 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
843 env->tr.base, env->tr.limit);
844#endif
845
846 if (!(env->tr.flags & DESC_P_MASK))
847 cpu_abort(env, "invalid tss");
848 index = 8 * level + 4;
849 if ((index + 7) > env->tr.limit)
850 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
851 return ldq_kernel(env->tr.base + index);
852}
853
854/* 64 bit interrupt */
855static void do_interrupt64(int intno, int is_int, int error_code,
856 target_ulong next_eip, int is_hw)
857{
858 SegmentCache *dt;
859 target_ulong ptr;
860 int type, dpl, selector, cpl, ist;
861 int has_error_code, new_stack;
862 uint32_t e1, e2, e3, ss;
863 target_ulong old_eip, esp, offset;
eaa728ee 864
eaa728ee
FB
865 has_error_code = 0;
866 if (!is_int && !is_hw) {
867 switch(intno) {
868 case 8:
869 case 10:
870 case 11:
871 case 12:
872 case 13:
873 case 14:
874 case 17:
875 has_error_code = 1;
876 break;
877 }
878 }
879 if (is_int)
880 old_eip = next_eip;
881 else
882 old_eip = env->eip;
883
884 dt = &env->idt;
885 if (intno * 16 + 15 > dt->limit)
886 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
887 ptr = dt->base + intno * 16;
888 e1 = ldl_kernel(ptr);
889 e2 = ldl_kernel(ptr + 4);
890 e3 = ldl_kernel(ptr + 8);
891 /* check gate type */
892 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
893 switch(type) {
894 case 14: /* 386 interrupt gate */
895 case 15: /* 386 trap gate */
896 break;
897 default:
898 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
899 break;
900 }
901 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
902 cpl = env->hflags & HF_CPL_MASK;
1235fc06 903 /* check privilege if software int */
eaa728ee
FB
904 if (is_int && dpl < cpl)
905 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906 /* check valid bit */
907 if (!(e2 & DESC_P_MASK))
908 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
909 selector = e1 >> 16;
910 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
911 ist = e2 & 7;
912 if ((selector & 0xfffc) == 0)
913 raise_exception_err(EXCP0D_GPF, 0);
914
915 if (load_segment(&e1, &e2, selector) != 0)
916 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
918 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
920 if (dpl > cpl)
921 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
922 if (!(e2 & DESC_P_MASK))
923 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
924 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
927 /* to inner privilege */
928 if (ist != 0)
929 esp = get_rsp_from_tss(ist + 3);
930 else
931 esp = get_rsp_from_tss(dpl);
932 esp &= ~0xfLL; /* align stack */
933 ss = 0;
934 new_stack = 1;
935 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
936 /* to same privilege */
937 if (env->eflags & VM_MASK)
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 new_stack = 0;
940 if (ist != 0)
941 esp = get_rsp_from_tss(ist + 3);
942 else
943 esp = ESP;
944 esp &= ~0xfLL; /* align stack */
945 dpl = cpl;
946 } else {
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 new_stack = 0; /* avoid warning */
949 esp = 0; /* avoid warning */
950 }
951
952 PUSHQ(esp, env->segs[R_SS].selector);
953 PUSHQ(esp, ESP);
954 PUSHQ(esp, compute_eflags());
955 PUSHQ(esp, env->segs[R_CS].selector);
956 PUSHQ(esp, old_eip);
957 if (has_error_code) {
958 PUSHQ(esp, error_code);
959 }
960
961 if (new_stack) {
962 ss = 0 | dpl;
963 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
964 }
965 ESP = esp;
966
967 selector = (selector & ~3) | dpl;
968 cpu_x86_load_seg_cache(env, R_CS, selector,
969 get_seg_base(e1, e2),
970 get_seg_limit(e1, e2),
971 e2);
972 cpu_x86_set_cpl(env, dpl);
973 env->eip = offset;
974
975 /* interrupt gate clear IF mask */
976 if ((type & 1) == 0) {
977 env->eflags &= ~IF_MASK;
978 }
979 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
980}
981#endif
982
983#if defined(CONFIG_USER_ONLY)
984void helper_syscall(int next_eip_addend)
985{
986 env->exception_index = EXCP_SYSCALL;
987 env->exception_next_eip = env->eip + next_eip_addend;
988 cpu_loop_exit();
989}
990#else
991void helper_syscall(int next_eip_addend)
992{
993 int selector;
994
995 if (!(env->efer & MSR_EFER_SCE)) {
996 raise_exception_err(EXCP06_ILLOP, 0);
997 }
998 selector = (env->star >> 32) & 0xffff;
999#ifdef TARGET_X86_64
1000 if (env->hflags & HF_LMA_MASK) {
1001 int code64;
1002
1003 ECX = env->eip + next_eip_addend;
1004 env->regs[11] = compute_eflags();
1005
1006 code64 = env->hflags & HF_CS64_MASK;
1007
1008 cpu_x86_set_cpl(env, 0);
1009 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1010 0, 0xffffffff,
1011 DESC_G_MASK | DESC_P_MASK |
1012 DESC_S_MASK |
1013 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1014 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1015 0, 0xffffffff,
1016 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1017 DESC_S_MASK |
1018 DESC_W_MASK | DESC_A_MASK);
1019 env->eflags &= ~env->fmask;
1020 load_eflags(env->eflags, 0);
1021 if (code64)
1022 env->eip = env->lstar;
1023 else
1024 env->eip = env->cstar;
1025 } else
1026#endif
1027 {
1028 ECX = (uint32_t)(env->eip + next_eip_addend);
1029
1030 cpu_x86_set_cpl(env, 0);
1031 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1032 0, 0xffffffff,
1033 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1034 DESC_S_MASK |
1035 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1036 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1037 0, 0xffffffff,
1038 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039 DESC_S_MASK |
1040 DESC_W_MASK | DESC_A_MASK);
1041 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1042 env->eip = (uint32_t)env->star;
1043 }
1044}
1045#endif
1046
1047void helper_sysret(int dflag)
1048{
1049 int cpl, selector;
1050
1051 if (!(env->efer & MSR_EFER_SCE)) {
1052 raise_exception_err(EXCP06_ILLOP, 0);
1053 }
1054 cpl = env->hflags & HF_CPL_MASK;
1055 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1056 raise_exception_err(EXCP0D_GPF, 0);
1057 }
1058 selector = (env->star >> 48) & 0xffff;
1059#ifdef TARGET_X86_64
1060 if (env->hflags & HF_LMA_MASK) {
1061 if (dflag == 2) {
1062 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1063 0, 0xffffffff,
1064 DESC_G_MASK | DESC_P_MASK |
1065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1067 DESC_L_MASK);
1068 env->eip = ECX;
1069 } else {
1070 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1071 0, 0xffffffff,
1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1075 env->eip = (uint32_t)ECX;
1076 }
1077 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_W_MASK | DESC_A_MASK);
1082 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1083 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1084 cpu_x86_set_cpl(env, 3);
1085 } else
1086#endif
1087 {
1088 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1089 0, 0xffffffff,
1090 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1091 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1092 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1093 env->eip = (uint32_t)ECX;
1094 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1095 0, 0xffffffff,
1096 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1097 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098 DESC_W_MASK | DESC_A_MASK);
1099 env->eflags |= IF_MASK;
1100 cpu_x86_set_cpl(env, 3);
1101 }
1102#ifdef USE_KQEMU
1103 if (kqemu_is_ok(env)) {
1104 if (env->hflags & HF_LMA_MASK)
1105 CC_OP = CC_OP_EFLAGS;
1106 env->exception_index = -1;
1107 cpu_loop_exit();
1108 }
1109#endif
1110}
1111
1112/* real mode interrupt */
1113static void do_interrupt_real(int intno, int is_int, int error_code,
1114 unsigned int next_eip)
1115{
1116 SegmentCache *dt;
1117 target_ulong ptr, ssp;
1118 int selector;
1119 uint32_t offset, esp;
1120 uint32_t old_cs, old_eip;
eaa728ee 1121
eaa728ee
FB
1122 /* real mode (simpler !) */
1123 dt = &env->idt;
1124 if (intno * 4 + 3 > dt->limit)
1125 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126 ptr = dt->base + intno * 4;
1127 offset = lduw_kernel(ptr);
1128 selector = lduw_kernel(ptr + 2);
1129 esp = ESP;
1130 ssp = env->segs[R_SS].base;
1131 if (is_int)
1132 old_eip = next_eip;
1133 else
1134 old_eip = env->eip;
1135 old_cs = env->segs[R_CS].selector;
1136 /* XXX: use SS segment size ? */
1137 PUSHW(ssp, esp, 0xffff, compute_eflags());
1138 PUSHW(ssp, esp, 0xffff, old_cs);
1139 PUSHW(ssp, esp, 0xffff, old_eip);
1140
1141 /* update processor state */
1142 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1143 env->eip = offset;
1144 env->segs[R_CS].selector = selector;
1145 env->segs[R_CS].base = (selector << 4);
1146 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1147}
1148
1149/* fake user mode interrupt */
1150void do_interrupt_user(int intno, int is_int, int error_code,
1151 target_ulong next_eip)
1152{
1153 SegmentCache *dt;
1154 target_ulong ptr;
1155 int dpl, cpl, shift;
1156 uint32_t e2;
1157
1158 dt = &env->idt;
1159 if (env->hflags & HF_LMA_MASK) {
1160 shift = 4;
1161 } else {
1162 shift = 3;
1163 }
1164 ptr = dt->base + (intno << shift);
1165 e2 = ldl_kernel(ptr + 4);
1166
1167 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1168 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1169 /* check privilege if software int */
eaa728ee
FB
1170 if (is_int && dpl < cpl)
1171 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1172
1173 /* Since we emulate only user space, we cannot do more than
1174 exiting the emulation with the suitable exception and error
1175 code */
1176 if (is_int)
1177 EIP = next_eip;
1178}
1179
1180/*
1181 * Begin execution of an interruption. is_int is TRUE if coming from
1182 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183 * instruction. It is only relevant if is_int is TRUE.
1184 */
1185void do_interrupt(int intno, int is_int, int error_code,
1186 target_ulong next_eip, int is_hw)
1187{
1188 if (loglevel & CPU_LOG_INT) {
1189 if ((env->cr[0] & CR0_PE_MASK)) {
1190 static int count;
1191 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1192 count, intno, error_code, is_int,
1193 env->hflags & HF_CPL_MASK,
1194 env->segs[R_CS].selector, EIP,
1195 (int)env->segs[R_CS].base + EIP,
1196 env->segs[R_SS].selector, ESP);
1197 if (intno == 0x0e) {
1198 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1199 } else {
1200 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1201 }
1202 fprintf(logfile, "\n");
1203 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1204#if 0
1205 {
1206 int i;
1207 uint8_t *ptr;
1208 fprintf(logfile, " code=");
1209 ptr = env->segs[R_CS].base + env->eip;
1210 for(i = 0; i < 16; i++) {
1211 fprintf(logfile, " %02x", ldub(ptr + i));
1212 }
1213 fprintf(logfile, "\n");
1214 }
1215#endif
1216 count++;
1217 }
1218 }
1219 if (env->cr[0] & CR0_PE_MASK) {
eb38c52c 1220#ifdef TARGET_X86_64
eaa728ee
FB
1221 if (env->hflags & HF_LMA_MASK) {
1222 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1223 } else
1224#endif
1225 {
1226 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1227 }
1228 } else {
1229 do_interrupt_real(intno, is_int, error_code, next_eip);
1230 }
1231}
1232
1233/*
1234 * Check nested exceptions and change to double or triple fault if
1235 * needed. It should only be called, if this is not an interrupt.
1236 * Returns the new exception number.
1237 */
1238static int check_exception(int intno, int *error_code)
1239{
1240 int first_contributory = env->old_exception == 0 ||
1241 (env->old_exception >= 10 &&
1242 env->old_exception <= 13);
1243 int second_contributory = intno == 0 ||
1244 (intno >= 10 && intno <= 13);
1245
1246 if (loglevel & CPU_LOG_INT)
1247 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1248 env->old_exception, intno);
1249
1250 if (env->old_exception == EXCP08_DBLE)
1251 cpu_abort(env, "triple fault");
1252
1253 if ((first_contributory && second_contributory)
1254 || (env->old_exception == EXCP0E_PAGE &&
1255 (second_contributory || (intno == EXCP0E_PAGE)))) {
1256 intno = EXCP08_DBLE;
1257 *error_code = 0;
1258 }
1259
1260 if (second_contributory || (intno == EXCP0E_PAGE) ||
1261 (intno == EXCP08_DBLE))
1262 env->old_exception = intno;
1263
1264 return intno;
1265}
1266
1267/*
1268 * Signal an interruption. It is executed in the main CPU loop.
1269 * is_int is TRUE if coming from the int instruction. next_eip is the
1270 * EIP value AFTER the interrupt instruction. It is only relevant if
1271 * is_int is TRUE.
1272 */
1273void raise_interrupt(int intno, int is_int, int error_code,
1274 int next_eip_addend)
1275{
1276 if (!is_int) {
1277 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1278 intno = check_exception(intno, &error_code);
872929aa
FB
1279 } else {
1280 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1281 }
1282
1283 env->exception_index = intno;
1284 env->error_code = error_code;
1285 env->exception_is_int = is_int;
1286 env->exception_next_eip = env->eip + next_eip_addend;
1287 cpu_loop_exit();
1288}
1289
eaa728ee
FB
1290/* shortcuts to generate exceptions */
1291
1292void (raise_exception_err)(int exception_index, int error_code)
1293{
1294 raise_interrupt(exception_index, 0, error_code, 0);
1295}
1296
1297void raise_exception(int exception_index)
1298{
1299 raise_interrupt(exception_index, 0, 0, 0);
1300}
1301
1302/* SMM support */
1303
1304#if defined(CONFIG_USER_ONLY)
1305
1306void do_smm_enter(void)
1307{
1308}
1309
1310void helper_rsm(void)
1311{
1312}
1313
1314#else
1315
1316#ifdef TARGET_X86_64
1317#define SMM_REVISION_ID 0x00020064
1318#else
1319#define SMM_REVISION_ID 0x00020000
1320#endif
1321
1322void do_smm_enter(void)
1323{
1324 target_ulong sm_state;
1325 SegmentCache *dt;
1326 int i, offset;
1327
1328 if (loglevel & CPU_LOG_INT) {
1329 fprintf(logfile, "SMM: enter\n");
1330 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1331 }
1332
1333 env->hflags |= HF_SMM_MASK;
1334 cpu_smm_update(env);
1335
1336 sm_state = env->smbase + 0x8000;
1337
1338#ifdef TARGET_X86_64
1339 for(i = 0; i < 6; i++) {
1340 dt = &env->segs[i];
1341 offset = 0x7e00 + i * 16;
1342 stw_phys(sm_state + offset, dt->selector);
1343 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1344 stl_phys(sm_state + offset + 4, dt->limit);
1345 stq_phys(sm_state + offset + 8, dt->base);
1346 }
1347
1348 stq_phys(sm_state + 0x7e68, env->gdt.base);
1349 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1350
1351 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1352 stq_phys(sm_state + 0x7e78, env->ldt.base);
1353 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1354 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1355
1356 stq_phys(sm_state + 0x7e88, env->idt.base);
1357 stl_phys(sm_state + 0x7e84, env->idt.limit);
1358
1359 stw_phys(sm_state + 0x7e90, env->tr.selector);
1360 stq_phys(sm_state + 0x7e98, env->tr.base);
1361 stl_phys(sm_state + 0x7e94, env->tr.limit);
1362 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1363
1364 stq_phys(sm_state + 0x7ed0, env->efer);
1365
1366 stq_phys(sm_state + 0x7ff8, EAX);
1367 stq_phys(sm_state + 0x7ff0, ECX);
1368 stq_phys(sm_state + 0x7fe8, EDX);
1369 stq_phys(sm_state + 0x7fe0, EBX);
1370 stq_phys(sm_state + 0x7fd8, ESP);
1371 stq_phys(sm_state + 0x7fd0, EBP);
1372 stq_phys(sm_state + 0x7fc8, ESI);
1373 stq_phys(sm_state + 0x7fc0, EDI);
1374 for(i = 8; i < 16; i++)
1375 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1376 stq_phys(sm_state + 0x7f78, env->eip);
1377 stl_phys(sm_state + 0x7f70, compute_eflags());
1378 stl_phys(sm_state + 0x7f68, env->dr[6]);
1379 stl_phys(sm_state + 0x7f60, env->dr[7]);
1380
1381 stl_phys(sm_state + 0x7f48, env->cr[4]);
1382 stl_phys(sm_state + 0x7f50, env->cr[3]);
1383 stl_phys(sm_state + 0x7f58, env->cr[0]);
1384
1385 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1386 stl_phys(sm_state + 0x7f00, env->smbase);
1387#else
1388 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1389 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1390 stl_phys(sm_state + 0x7ff4, compute_eflags());
1391 stl_phys(sm_state + 0x7ff0, env->eip);
1392 stl_phys(sm_state + 0x7fec, EDI);
1393 stl_phys(sm_state + 0x7fe8, ESI);
1394 stl_phys(sm_state + 0x7fe4, EBP);
1395 stl_phys(sm_state + 0x7fe0, ESP);
1396 stl_phys(sm_state + 0x7fdc, EBX);
1397 stl_phys(sm_state + 0x7fd8, EDX);
1398 stl_phys(sm_state + 0x7fd4, ECX);
1399 stl_phys(sm_state + 0x7fd0, EAX);
1400 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1401 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1402
1403 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1404 stl_phys(sm_state + 0x7f64, env->tr.base);
1405 stl_phys(sm_state + 0x7f60, env->tr.limit);
1406 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1407
1408 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1409 stl_phys(sm_state + 0x7f80, env->ldt.base);
1410 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1411 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1412
1413 stl_phys(sm_state + 0x7f74, env->gdt.base);
1414 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1415
1416 stl_phys(sm_state + 0x7f58, env->idt.base);
1417 stl_phys(sm_state + 0x7f54, env->idt.limit);
1418
1419 for(i = 0; i < 6; i++) {
1420 dt = &env->segs[i];
1421 if (i < 3)
1422 offset = 0x7f84 + i * 12;
1423 else
1424 offset = 0x7f2c + (i - 3) * 12;
1425 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1426 stl_phys(sm_state + offset + 8, dt->base);
1427 stl_phys(sm_state + offset + 4, dt->limit);
1428 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1429 }
1430 stl_phys(sm_state + 0x7f14, env->cr[4]);
1431
1432 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1433 stl_phys(sm_state + 0x7ef8, env->smbase);
1434#endif
1435 /* init SMM cpu state */
1436
1437#ifdef TARGET_X86_64
5efc27bb 1438 cpu_load_efer(env, 0);
eaa728ee
FB
1439#endif
1440 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1441 env->eip = 0x00008000;
1442 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1443 0xffffffff, 0);
1444 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1445 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1446 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1447 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1448 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1449
1450 cpu_x86_update_cr0(env,
1451 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1452 cpu_x86_update_cr4(env, 0);
1453 env->dr[7] = 0x00000400;
1454 CC_OP = CC_OP_EFLAGS;
1455}
1456
1457void helper_rsm(void)
1458{
1459 target_ulong sm_state;
1460 int i, offset;
1461 uint32_t val;
1462
1463 sm_state = env->smbase + 0x8000;
1464#ifdef TARGET_X86_64
5efc27bb 1465 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1466
1467 for(i = 0; i < 6; i++) {
1468 offset = 0x7e00 + i * 16;
1469 cpu_x86_load_seg_cache(env, i,
1470 lduw_phys(sm_state + offset),
1471 ldq_phys(sm_state + offset + 8),
1472 ldl_phys(sm_state + offset + 4),
1473 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1474 }
1475
1476 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1477 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1478
1479 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1480 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1481 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1482 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1483
1484 env->idt.base = ldq_phys(sm_state + 0x7e88);
1485 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1486
1487 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1488 env->tr.base = ldq_phys(sm_state + 0x7e98);
1489 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1490 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1491
1492 EAX = ldq_phys(sm_state + 0x7ff8);
1493 ECX = ldq_phys(sm_state + 0x7ff0);
1494 EDX = ldq_phys(sm_state + 0x7fe8);
1495 EBX = ldq_phys(sm_state + 0x7fe0);
1496 ESP = ldq_phys(sm_state + 0x7fd8);
1497 EBP = ldq_phys(sm_state + 0x7fd0);
1498 ESI = ldq_phys(sm_state + 0x7fc8);
1499 EDI = ldq_phys(sm_state + 0x7fc0);
1500 for(i = 8; i < 16; i++)
1501 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1502 env->eip = ldq_phys(sm_state + 0x7f78);
1503 load_eflags(ldl_phys(sm_state + 0x7f70),
1504 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1505 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1506 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1507
1508 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1509 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1510 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1511
1512 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1513 if (val & 0x20000) {
1514 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1515 }
1516#else
1517 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1518 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1519 load_eflags(ldl_phys(sm_state + 0x7ff4),
1520 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521 env->eip = ldl_phys(sm_state + 0x7ff0);
1522 EDI = ldl_phys(sm_state + 0x7fec);
1523 ESI = ldl_phys(sm_state + 0x7fe8);
1524 EBP = ldl_phys(sm_state + 0x7fe4);
1525 ESP = ldl_phys(sm_state + 0x7fe0);
1526 EBX = ldl_phys(sm_state + 0x7fdc);
1527 EDX = ldl_phys(sm_state + 0x7fd8);
1528 ECX = ldl_phys(sm_state + 0x7fd4);
1529 EAX = ldl_phys(sm_state + 0x7fd0);
1530 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1531 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1532
1533 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1534 env->tr.base = ldl_phys(sm_state + 0x7f64);
1535 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1536 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1537
1538 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1539 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1540 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1541 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1542
1543 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1544 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1545
1546 env->idt.base = ldl_phys(sm_state + 0x7f58);
1547 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1548
1549 for(i = 0; i < 6; i++) {
1550 if (i < 3)
1551 offset = 0x7f84 + i * 12;
1552 else
1553 offset = 0x7f2c + (i - 3) * 12;
1554 cpu_x86_load_seg_cache(env, i,
1555 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1556 ldl_phys(sm_state + offset + 8),
1557 ldl_phys(sm_state + offset + 4),
1558 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1559 }
1560 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1561
1562 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1563 if (val & 0x20000) {
1564 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1565 }
1566#endif
1567 CC_OP = CC_OP_EFLAGS;
1568 env->hflags &= ~HF_SMM_MASK;
1569 cpu_smm_update(env);
1570
1571 if (loglevel & CPU_LOG_INT) {
1572 fprintf(logfile, "SMM: after RSM\n");
1573 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1574 }
1575}
1576
1577#endif /* !CONFIG_USER_ONLY */
1578
1579
1580/* division, flags are undefined */
1581
1582void helper_divb_AL(target_ulong t0)
1583{
1584 unsigned int num, den, q, r;
1585
1586 num = (EAX & 0xffff);
1587 den = (t0 & 0xff);
1588 if (den == 0) {
1589 raise_exception(EXCP00_DIVZ);
1590 }
1591 q = (num / den);
1592 if (q > 0xff)
1593 raise_exception(EXCP00_DIVZ);
1594 q &= 0xff;
1595 r = (num % den) & 0xff;
1596 EAX = (EAX & ~0xffff) | (r << 8) | q;
1597}
1598
1599void helper_idivb_AL(target_ulong t0)
1600{
1601 int num, den, q, r;
1602
1603 num = (int16_t)EAX;
1604 den = (int8_t)t0;
1605 if (den == 0) {
1606 raise_exception(EXCP00_DIVZ);
1607 }
1608 q = (num / den);
1609 if (q != (int8_t)q)
1610 raise_exception(EXCP00_DIVZ);
1611 q &= 0xff;
1612 r = (num % den) & 0xff;
1613 EAX = (EAX & ~0xffff) | (r << 8) | q;
1614}
1615
1616void helper_divw_AX(target_ulong t0)
1617{
1618 unsigned int num, den, q, r;
1619
1620 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1621 den = (t0 & 0xffff);
1622 if (den == 0) {
1623 raise_exception(EXCP00_DIVZ);
1624 }
1625 q = (num / den);
1626 if (q > 0xffff)
1627 raise_exception(EXCP00_DIVZ);
1628 q &= 0xffff;
1629 r = (num % den) & 0xffff;
1630 EAX = (EAX & ~0xffff) | q;
1631 EDX = (EDX & ~0xffff) | r;
1632}
1633
1634void helper_idivw_AX(target_ulong t0)
1635{
1636 int num, den, q, r;
1637
1638 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1639 den = (int16_t)t0;
1640 if (den == 0) {
1641 raise_exception(EXCP00_DIVZ);
1642 }
1643 q = (num / den);
1644 if (q != (int16_t)q)
1645 raise_exception(EXCP00_DIVZ);
1646 q &= 0xffff;
1647 r = (num % den) & 0xffff;
1648 EAX = (EAX & ~0xffff) | q;
1649 EDX = (EDX & ~0xffff) | r;
1650}
1651
1652void helper_divl_EAX(target_ulong t0)
1653{
1654 unsigned int den, r;
1655 uint64_t num, q;
1656
1657 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1658 den = t0;
1659 if (den == 0) {
1660 raise_exception(EXCP00_DIVZ);
1661 }
1662 q = (num / den);
1663 r = (num % den);
1664 if (q > 0xffffffff)
1665 raise_exception(EXCP00_DIVZ);
1666 EAX = (uint32_t)q;
1667 EDX = (uint32_t)r;
1668}
1669
1670void helper_idivl_EAX(target_ulong t0)
1671{
1672 int den, r;
1673 int64_t num, q;
1674
1675 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1676 den = t0;
1677 if (den == 0) {
1678 raise_exception(EXCP00_DIVZ);
1679 }
1680 q = (num / den);
1681 r = (num % den);
1682 if (q != (int32_t)q)
1683 raise_exception(EXCP00_DIVZ);
1684 EAX = (uint32_t)q;
1685 EDX = (uint32_t)r;
1686}
1687
1688/* bcd */
1689
1690/* XXX: exception */
1691void helper_aam(int base)
1692{
1693 int al, ah;
1694 al = EAX & 0xff;
1695 ah = al / base;
1696 al = al % base;
1697 EAX = (EAX & ~0xffff) | al | (ah << 8);
1698 CC_DST = al;
1699}
1700
1701void helper_aad(int base)
1702{
1703 int al, ah;
1704 al = EAX & 0xff;
1705 ah = (EAX >> 8) & 0xff;
1706 al = ((ah * base) + al) & 0xff;
1707 EAX = (EAX & ~0xffff) | al;
1708 CC_DST = al;
1709}
1710
1711void helper_aaa(void)
1712{
1713 int icarry;
1714 int al, ah, af;
1715 int eflags;
1716
1717 eflags = cc_table[CC_OP].compute_all();
1718 af = eflags & CC_A;
1719 al = EAX & 0xff;
1720 ah = (EAX >> 8) & 0xff;
1721
1722 icarry = (al > 0xf9);
1723 if (((al & 0x0f) > 9 ) || af) {
1724 al = (al + 6) & 0x0f;
1725 ah = (ah + 1 + icarry) & 0xff;
1726 eflags |= CC_C | CC_A;
1727 } else {
1728 eflags &= ~(CC_C | CC_A);
1729 al &= 0x0f;
1730 }
1731 EAX = (EAX & ~0xffff) | al | (ah << 8);
1732 CC_SRC = eflags;
1733 FORCE_RET();
1734}
1735
1736void helper_aas(void)
1737{
1738 int icarry;
1739 int al, ah, af;
1740 int eflags;
1741
1742 eflags = cc_table[CC_OP].compute_all();
1743 af = eflags & CC_A;
1744 al = EAX & 0xff;
1745 ah = (EAX >> 8) & 0xff;
1746
1747 icarry = (al < 6);
1748 if (((al & 0x0f) > 9 ) || af) {
1749 al = (al - 6) & 0x0f;
1750 ah = (ah - 1 - icarry) & 0xff;
1751 eflags |= CC_C | CC_A;
1752 } else {
1753 eflags &= ~(CC_C | CC_A);
1754 al &= 0x0f;
1755 }
1756 EAX = (EAX & ~0xffff) | al | (ah << 8);
1757 CC_SRC = eflags;
1758 FORCE_RET();
1759}
1760
1761void helper_daa(void)
1762{
1763 int al, af, cf;
1764 int eflags;
1765
1766 eflags = cc_table[CC_OP].compute_all();
1767 cf = eflags & CC_C;
1768 af = eflags & CC_A;
1769 al = EAX & 0xff;
1770
1771 eflags = 0;
1772 if (((al & 0x0f) > 9 ) || af) {
1773 al = (al + 6) & 0xff;
1774 eflags |= CC_A;
1775 }
1776 if ((al > 0x9f) || cf) {
1777 al = (al + 0x60) & 0xff;
1778 eflags |= CC_C;
1779 }
1780 EAX = (EAX & ~0xff) | al;
1781 /* well, speed is not an issue here, so we compute the flags by hand */
1782 eflags |= (al == 0) << 6; /* zf */
1783 eflags |= parity_table[al]; /* pf */
1784 eflags |= (al & 0x80); /* sf */
1785 CC_SRC = eflags;
1786 FORCE_RET();
1787}
1788
1789void helper_das(void)
1790{
1791 int al, al1, af, cf;
1792 int eflags;
1793
1794 eflags = cc_table[CC_OP].compute_all();
1795 cf = eflags & CC_C;
1796 af = eflags & CC_A;
1797 al = EAX & 0xff;
1798
1799 eflags = 0;
1800 al1 = al;
1801 if (((al & 0x0f) > 9 ) || af) {
1802 eflags |= CC_A;
1803 if (al < 6 || cf)
1804 eflags |= CC_C;
1805 al = (al - 6) & 0xff;
1806 }
1807 if ((al1 > 0x99) || cf) {
1808 al = (al - 0x60) & 0xff;
1809 eflags |= CC_C;
1810 }
1811 EAX = (EAX & ~0xff) | al;
1812 /* well, speed is not an issue here, so we compute the flags by hand */
1813 eflags |= (al == 0) << 6; /* zf */
1814 eflags |= parity_table[al]; /* pf */
1815 eflags |= (al & 0x80); /* sf */
1816 CC_SRC = eflags;
1817 FORCE_RET();
1818}
1819
1820void helper_into(int next_eip_addend)
1821{
1822 int eflags;
1823 eflags = cc_table[CC_OP].compute_all();
1824 if (eflags & CC_O) {
1825 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1826 }
1827}
1828
1829void helper_cmpxchg8b(target_ulong a0)
1830{
1831 uint64_t d;
1832 int eflags;
1833
1834 eflags = cc_table[CC_OP].compute_all();
1835 d = ldq(a0);
1836 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1837 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1838 eflags |= CC_Z;
1839 } else {
278ed7c3
FB
1840 /* always do the store */
1841 stq(a0, d);
eaa728ee
FB
1842 EDX = (uint32_t)(d >> 32);
1843 EAX = (uint32_t)d;
1844 eflags &= ~CC_Z;
1845 }
1846 CC_SRC = eflags;
1847}
1848
1849#ifdef TARGET_X86_64
1850void helper_cmpxchg16b(target_ulong a0)
1851{
1852 uint64_t d0, d1;
1853 int eflags;
1854
278ed7c3
FB
1855 if ((a0 & 0xf) != 0)
1856 raise_exception(EXCP0D_GPF);
eaa728ee
FB
1857 eflags = cc_table[CC_OP].compute_all();
1858 d0 = ldq(a0);
1859 d1 = ldq(a0 + 8);
1860 if (d0 == EAX && d1 == EDX) {
1861 stq(a0, EBX);
1862 stq(a0 + 8, ECX);
1863 eflags |= CC_Z;
1864 } else {
278ed7c3
FB
1865 /* always do the store */
1866 stq(a0, d0);
1867 stq(a0 + 8, d1);
eaa728ee
FB
1868 EDX = d1;
1869 EAX = d0;
1870 eflags &= ~CC_Z;
1871 }
1872 CC_SRC = eflags;
1873}
1874#endif
1875
1876void helper_single_step(void)
1877{
1878 env->dr[6] |= 0x4000;
1879 raise_exception(EXCP01_SSTP);
1880}
1881
1882void helper_cpuid(void)
1883{
1884 uint32_t index;
eaa728ee 1885
872929aa
FB
1886 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1887
1888 index = (uint32_t)EAX;
eaa728ee
FB
1889 /* test if maximum index reached */
1890 if (index & 0x80000000) {
1891 if (index > env->cpuid_xlevel)
1892 index = env->cpuid_level;
1893 } else {
1894 if (index > env->cpuid_level)
1895 index = env->cpuid_level;
1896 }
1897
1898 switch(index) {
1899 case 0:
1900 EAX = env->cpuid_level;
1901 EBX = env->cpuid_vendor1;
1902 EDX = env->cpuid_vendor2;
1903 ECX = env->cpuid_vendor3;
1904 break;
1905 case 1:
1906 EAX = env->cpuid_version;
1907 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1908 ECX = env->cpuid_ext_features;
1909 EDX = env->cpuid_features;
1910 break;
1911 case 2:
1912 /* cache info: needed for Pentium Pro compatibility */
1913 EAX = 1;
1914 EBX = 0;
1915 ECX = 0;
1916 EDX = 0x2c307d;
1917 break;
1918 case 0x80000000:
1919 EAX = env->cpuid_xlevel;
1920 EBX = env->cpuid_vendor1;
1921 EDX = env->cpuid_vendor2;
1922 ECX = env->cpuid_vendor3;
1923 break;
1924 case 0x80000001:
1925 EAX = env->cpuid_features;
1926 EBX = 0;
1927 ECX = env->cpuid_ext3_features;
1928 EDX = env->cpuid_ext2_features;
1929 break;
1930 case 0x80000002:
1931 case 0x80000003:
1932 case 0x80000004:
1933 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1934 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1935 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1936 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1937 break;
1938 case 0x80000005:
1939 /* cache info (L1 cache) */
1940 EAX = 0x01ff01ff;
1941 EBX = 0x01ff01ff;
1942 ECX = 0x40020140;
1943 EDX = 0x40020140;
1944 break;
1945 case 0x80000006:
1946 /* cache info (L2 cache) */
1947 EAX = 0;
1948 EBX = 0x42004200;
1949 ECX = 0x02008140;
1950 EDX = 0;
1951 break;
1952 case 0x80000008:
1953 /* virtual & phys address size in low 2 bytes. */
1954/* XXX: This value must match the one used in the MMU code. */
da260249
FB
1955 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
1956 /* 64 bit processor */
1957#if defined(USE_KQEMU)
1958 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1959#else
eaa728ee 1960/* XXX: The physical address space is limited to 42 bits in exec.c. */
da260249
FB
1961 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1962#endif
1963 } else {
1964#if defined(USE_KQEMU)
1965 EAX = 0x00000020; /* 32 bits physical */
eaa728ee 1966#else
da260249 1967 EAX = 0x00000024; /* 36 bits physical */
eaa728ee 1968#endif
da260249 1969 }
eaa728ee
FB
1970 EBX = 0;
1971 ECX = 0;
1972 EDX = 0;
1973 break;
1974 case 0x8000000A:
1975 EAX = 0x00000001;
1976 EBX = 0;
1977 ECX = 0;
1978 EDX = 0;
1979 break;
1980 default:
1981 /* reserved values: zero */
1982 EAX = 0;
1983 EBX = 0;
1984 ECX = 0;
1985 EDX = 0;
1986 break;
1987 }
1988}
1989
1990void helper_enter_level(int level, int data32, target_ulong t1)
1991{
1992 target_ulong ssp;
1993 uint32_t esp_mask, esp, ebp;
1994
1995 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1996 ssp = env->segs[R_SS].base;
1997 ebp = EBP;
1998 esp = ESP;
1999 if (data32) {
2000 /* 32 bit */
2001 esp -= 4;
2002 while (--level) {
2003 esp -= 4;
2004 ebp -= 4;
2005 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2006 }
2007 esp -= 4;
2008 stl(ssp + (esp & esp_mask), t1);
2009 } else {
2010 /* 16 bit */
2011 esp -= 2;
2012 while (--level) {
2013 esp -= 2;
2014 ebp -= 2;
2015 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2016 }
2017 esp -= 2;
2018 stw(ssp + (esp & esp_mask), t1);
2019 }
2020}
2021
2022#ifdef TARGET_X86_64
2023void helper_enter64_level(int level, int data64, target_ulong t1)
2024{
2025 target_ulong esp, ebp;
2026 ebp = EBP;
2027 esp = ESP;
2028
2029 if (data64) {
2030 /* 64 bit */
2031 esp -= 8;
2032 while (--level) {
2033 esp -= 8;
2034 ebp -= 8;
2035 stq(esp, ldq(ebp));
2036 }
2037 esp -= 8;
2038 stq(esp, t1);
2039 } else {
2040 /* 16 bit */
2041 esp -= 2;
2042 while (--level) {
2043 esp -= 2;
2044 ebp -= 2;
2045 stw(esp, lduw(ebp));
2046 }
2047 esp -= 2;
2048 stw(esp, t1);
2049 }
2050}
2051#endif
2052
2053void helper_lldt(int selector)
2054{
2055 SegmentCache *dt;
2056 uint32_t e1, e2;
2057 int index, entry_limit;
2058 target_ulong ptr;
2059
2060 selector &= 0xffff;
2061 if ((selector & 0xfffc) == 0) {
2062 /* XXX: NULL selector case: invalid LDT */
2063 env->ldt.base = 0;
2064 env->ldt.limit = 0;
2065 } else {
2066 if (selector & 0x4)
2067 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2068 dt = &env->gdt;
2069 index = selector & ~7;
2070#ifdef TARGET_X86_64
2071 if (env->hflags & HF_LMA_MASK)
2072 entry_limit = 15;
2073 else
2074#endif
2075 entry_limit = 7;
2076 if ((index + entry_limit) > dt->limit)
2077 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078 ptr = dt->base + index;
2079 e1 = ldl_kernel(ptr);
2080 e2 = ldl_kernel(ptr + 4);
2081 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2082 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2083 if (!(e2 & DESC_P_MASK))
2084 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2085#ifdef TARGET_X86_64
2086 if (env->hflags & HF_LMA_MASK) {
2087 uint32_t e3;
2088 e3 = ldl_kernel(ptr + 8);
2089 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2090 env->ldt.base |= (target_ulong)e3 << 32;
2091 } else
2092#endif
2093 {
2094 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2095 }
2096 }
2097 env->ldt.selector = selector;
2098}
2099
2100void helper_ltr(int selector)
2101{
2102 SegmentCache *dt;
2103 uint32_t e1, e2;
2104 int index, type, entry_limit;
2105 target_ulong ptr;
2106
2107 selector &= 0xffff;
2108 if ((selector & 0xfffc) == 0) {
2109 /* NULL selector case: invalid TR */
2110 env->tr.base = 0;
2111 env->tr.limit = 0;
2112 env->tr.flags = 0;
2113 } else {
2114 if (selector & 0x4)
2115 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2116 dt = &env->gdt;
2117 index = selector & ~7;
2118#ifdef TARGET_X86_64
2119 if (env->hflags & HF_LMA_MASK)
2120 entry_limit = 15;
2121 else
2122#endif
2123 entry_limit = 7;
2124 if ((index + entry_limit) > dt->limit)
2125 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2126 ptr = dt->base + index;
2127 e1 = ldl_kernel(ptr);
2128 e2 = ldl_kernel(ptr + 4);
2129 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2130 if ((e2 & DESC_S_MASK) ||
2131 (type != 1 && type != 9))
2132 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2133 if (!(e2 & DESC_P_MASK))
2134 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2135#ifdef TARGET_X86_64
2136 if (env->hflags & HF_LMA_MASK) {
2137 uint32_t e3, e4;
2138 e3 = ldl_kernel(ptr + 8);
2139 e4 = ldl_kernel(ptr + 12);
2140 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2141 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2142 load_seg_cache_raw_dt(&env->tr, e1, e2);
2143 env->tr.base |= (target_ulong)e3 << 32;
2144 } else
2145#endif
2146 {
2147 load_seg_cache_raw_dt(&env->tr, e1, e2);
2148 }
2149 e2 |= DESC_TSS_BUSY_MASK;
2150 stl_kernel(ptr + 4, e2);
2151 }
2152 env->tr.selector = selector;
2153}
2154
2155/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2156void helper_load_seg(int seg_reg, int selector)
2157{
2158 uint32_t e1, e2;
2159 int cpl, dpl, rpl;
2160 SegmentCache *dt;
2161 int index;
2162 target_ulong ptr;
2163
2164 selector &= 0xffff;
2165 cpl = env->hflags & HF_CPL_MASK;
2166 if ((selector & 0xfffc) == 0) {
2167 /* null selector case */
2168 if (seg_reg == R_SS
2169#ifdef TARGET_X86_64
2170 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2171#endif
2172 )
2173 raise_exception_err(EXCP0D_GPF, 0);
2174 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2175 } else {
2176
2177 if (selector & 0x4)
2178 dt = &env->ldt;
2179 else
2180 dt = &env->gdt;
2181 index = selector & ~7;
2182 if ((index + 7) > dt->limit)
2183 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2184 ptr = dt->base + index;
2185 e1 = ldl_kernel(ptr);
2186 e2 = ldl_kernel(ptr + 4);
2187
2188 if (!(e2 & DESC_S_MASK))
2189 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2190 rpl = selector & 3;
2191 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2192 if (seg_reg == R_SS) {
2193 /* must be writable segment */
2194 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2195 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2196 if (rpl != cpl || dpl != cpl)
2197 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198 } else {
2199 /* must be readable segment */
2200 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2201 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2202
2203 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2204 /* if not conforming code, test rights */
2205 if (dpl < cpl || dpl < rpl)
2206 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2207 }
2208 }
2209
2210 if (!(e2 & DESC_P_MASK)) {
2211 if (seg_reg == R_SS)
2212 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2213 else
2214 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2215 }
2216
2217 /* set the access bit if not already set */
2218 if (!(e2 & DESC_A_MASK)) {
2219 e2 |= DESC_A_MASK;
2220 stl_kernel(ptr + 4, e2);
2221 }
2222
2223 cpu_x86_load_seg_cache(env, seg_reg, selector,
2224 get_seg_base(e1, e2),
2225 get_seg_limit(e1, e2),
2226 e2);
2227#if 0
2228 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2229 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2230#endif
2231 }
2232}
2233
2234/* protected mode jump */
2235void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2236 int next_eip_addend)
2237{
2238 int gate_cs, type;
2239 uint32_t e1, e2, cpl, dpl, rpl, limit;
2240 target_ulong next_eip;
2241
2242 if ((new_cs & 0xfffc) == 0)
2243 raise_exception_err(EXCP0D_GPF, 0);
2244 if (load_segment(&e1, &e2, new_cs) != 0)
2245 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246 cpl = env->hflags & HF_CPL_MASK;
2247 if (e2 & DESC_S_MASK) {
2248 if (!(e2 & DESC_CS_MASK))
2249 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2250 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2251 if (e2 & DESC_C_MASK) {
2252 /* conforming code segment */
2253 if (dpl > cpl)
2254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2255 } else {
2256 /* non conforming code segment */
2257 rpl = new_cs & 3;
2258 if (rpl > cpl)
2259 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2260 if (dpl != cpl)
2261 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2262 }
2263 if (!(e2 & DESC_P_MASK))
2264 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2265 limit = get_seg_limit(e1, e2);
2266 if (new_eip > limit &&
2267 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2268 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2269 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2270 get_seg_base(e1, e2), limit, e2);
2271 EIP = new_eip;
2272 } else {
2273 /* jump to call or task gate */
2274 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2275 rpl = new_cs & 3;
2276 cpl = env->hflags & HF_CPL_MASK;
2277 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2278 switch(type) {
2279 case 1: /* 286 TSS */
2280 case 9: /* 386 TSS */
2281 case 5: /* task gate */
2282 if (dpl < cpl || dpl < rpl)
2283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2284 next_eip = env->eip + next_eip_addend;
2285 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2286 CC_OP = CC_OP_EFLAGS;
2287 break;
2288 case 4: /* 286 call gate */
2289 case 12: /* 386 call gate */
2290 if ((dpl < cpl) || (dpl < rpl))
2291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2292 if (!(e2 & DESC_P_MASK))
2293 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2294 gate_cs = e1 >> 16;
2295 new_eip = (e1 & 0xffff);
2296 if (type == 12)
2297 new_eip |= (e2 & 0xffff0000);
2298 if (load_segment(&e1, &e2, gate_cs) != 0)
2299 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2300 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2301 /* must be code segment */
2302 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2303 (DESC_S_MASK | DESC_CS_MASK)))
2304 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2305 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2306 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2307 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2308 if (!(e2 & DESC_P_MASK))
2309 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2310 limit = get_seg_limit(e1, e2);
2311 if (new_eip > limit)
2312 raise_exception_err(EXCP0D_GPF, 0);
2313 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2314 get_seg_base(e1, e2), limit, e2);
2315 EIP = new_eip;
2316 break;
2317 default:
2318 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2319 break;
2320 }
2321 }
2322}
2323
2324/* real mode call */
2325void helper_lcall_real(int new_cs, target_ulong new_eip1,
2326 int shift, int next_eip)
2327{
2328 int new_eip;
2329 uint32_t esp, esp_mask;
2330 target_ulong ssp;
2331
2332 new_eip = new_eip1;
2333 esp = ESP;
2334 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2335 ssp = env->segs[R_SS].base;
2336 if (shift) {
2337 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2338 PUSHL(ssp, esp, esp_mask, next_eip);
2339 } else {
2340 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2341 PUSHW(ssp, esp, esp_mask, next_eip);
2342 }
2343
2344 SET_ESP(esp, esp_mask);
2345 env->eip = new_eip;
2346 env->segs[R_CS].selector = new_cs;
2347 env->segs[R_CS].base = (new_cs << 4);
2348}
2349
2350/* protected mode call */
2351void helper_lcall_protected(int new_cs, target_ulong new_eip,
2352 int shift, int next_eip_addend)
2353{
2354 int new_stack, i;
2355 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2356 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2357 uint32_t val, limit, old_sp_mask;
2358 target_ulong ssp, old_ssp, next_eip;
2359
2360 next_eip = env->eip + next_eip_addend;
2361#ifdef DEBUG_PCALL
2362 if (loglevel & CPU_LOG_PCALL) {
2363 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2364 new_cs, (uint32_t)new_eip, shift);
2365 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2366 }
2367#endif
2368 if ((new_cs & 0xfffc) == 0)
2369 raise_exception_err(EXCP0D_GPF, 0);
2370 if (load_segment(&e1, &e2, new_cs) != 0)
2371 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2372 cpl = env->hflags & HF_CPL_MASK;
2373#ifdef DEBUG_PCALL
2374 if (loglevel & CPU_LOG_PCALL) {
2375 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2376 }
2377#endif
2378 if (e2 & DESC_S_MASK) {
2379 if (!(e2 & DESC_CS_MASK))
2380 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2382 if (e2 & DESC_C_MASK) {
2383 /* conforming code segment */
2384 if (dpl > cpl)
2385 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2386 } else {
2387 /* non conforming code segment */
2388 rpl = new_cs & 3;
2389 if (rpl > cpl)
2390 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2391 if (dpl != cpl)
2392 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2393 }
2394 if (!(e2 & DESC_P_MASK))
2395 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2396
2397#ifdef TARGET_X86_64
2398 /* XXX: check 16/32 bit cases in long mode */
2399 if (shift == 2) {
2400 target_ulong rsp;
2401 /* 64 bit case */
2402 rsp = ESP;
2403 PUSHQ(rsp, env->segs[R_CS].selector);
2404 PUSHQ(rsp, next_eip);
2405 /* from this point, not restartable */
2406 ESP = rsp;
2407 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2408 get_seg_base(e1, e2),
2409 get_seg_limit(e1, e2), e2);
2410 EIP = new_eip;
2411 } else
2412#endif
2413 {
2414 sp = ESP;
2415 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2416 ssp = env->segs[R_SS].base;
2417 if (shift) {
2418 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2419 PUSHL(ssp, sp, sp_mask, next_eip);
2420 } else {
2421 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2422 PUSHW(ssp, sp, sp_mask, next_eip);
2423 }
2424
2425 limit = get_seg_limit(e1, e2);
2426 if (new_eip > limit)
2427 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2428 /* from this point, not restartable */
2429 SET_ESP(sp, sp_mask);
2430 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2431 get_seg_base(e1, e2), limit, e2);
2432 EIP = new_eip;
2433 }
2434 } else {
2435 /* check gate type */
2436 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2437 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2438 rpl = new_cs & 3;
2439 switch(type) {
2440 case 1: /* available 286 TSS */
2441 case 9: /* available 386 TSS */
2442 case 5: /* task gate */
2443 if (dpl < cpl || dpl < rpl)
2444 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2445 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2446 CC_OP = CC_OP_EFLAGS;
2447 return;
2448 case 4: /* 286 call gate */
2449 case 12: /* 386 call gate */
2450 break;
2451 default:
2452 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2453 break;
2454 }
2455 shift = type >> 3;
2456
2457 if (dpl < cpl || dpl < rpl)
2458 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2459 /* check valid bit */
2460 if (!(e2 & DESC_P_MASK))
2461 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2462 selector = e1 >> 16;
2463 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2464 param_count = e2 & 0x1f;
2465 if ((selector & 0xfffc) == 0)
2466 raise_exception_err(EXCP0D_GPF, 0);
2467
2468 if (load_segment(&e1, &e2, selector) != 0)
2469 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2470 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2471 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2472 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2473 if (dpl > cpl)
2474 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2475 if (!(e2 & DESC_P_MASK))
2476 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2477
2478 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2479 /* to inner privilege */
2480 get_ss_esp_from_tss(&ss, &sp, dpl);
2481#ifdef DEBUG_PCALL
2482 if (loglevel & CPU_LOG_PCALL)
2483 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2484 ss, sp, param_count, ESP);
2485#endif
2486 if ((ss & 0xfffc) == 0)
2487 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2488 if ((ss & 3) != dpl)
2489 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2490 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2491 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2493 if (ss_dpl != dpl)
2494 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2495 if (!(ss_e2 & DESC_S_MASK) ||
2496 (ss_e2 & DESC_CS_MASK) ||
2497 !(ss_e2 & DESC_W_MASK))
2498 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2499 if (!(ss_e2 & DESC_P_MASK))
2500 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2501
2502 // push_size = ((param_count * 2) + 8) << shift;
2503
2504 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2505 old_ssp = env->segs[R_SS].base;
2506
2507 sp_mask = get_sp_mask(ss_e2);
2508 ssp = get_seg_base(ss_e1, ss_e2);
2509 if (shift) {
2510 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2511 PUSHL(ssp, sp, sp_mask, ESP);
2512 for(i = param_count - 1; i >= 0; i--) {
2513 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2514 PUSHL(ssp, sp, sp_mask, val);
2515 }
2516 } else {
2517 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2518 PUSHW(ssp, sp, sp_mask, ESP);
2519 for(i = param_count - 1; i >= 0; i--) {
2520 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2521 PUSHW(ssp, sp, sp_mask, val);
2522 }
2523 }
2524 new_stack = 1;
2525 } else {
2526 /* to same privilege */
2527 sp = ESP;
2528 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2529 ssp = env->segs[R_SS].base;
2530 // push_size = (4 << shift);
2531 new_stack = 0;
2532 }
2533
2534 if (shift) {
2535 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2536 PUSHL(ssp, sp, sp_mask, next_eip);
2537 } else {
2538 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2539 PUSHW(ssp, sp, sp_mask, next_eip);
2540 }
2541
2542 /* from this point, not restartable */
2543
2544 if (new_stack) {
2545 ss = (ss & ~3) | dpl;
2546 cpu_x86_load_seg_cache(env, R_SS, ss,
2547 ssp,
2548 get_seg_limit(ss_e1, ss_e2),
2549 ss_e2);
2550 }
2551
2552 selector = (selector & ~3) | dpl;
2553 cpu_x86_load_seg_cache(env, R_CS, selector,
2554 get_seg_base(e1, e2),
2555 get_seg_limit(e1, e2),
2556 e2);
2557 cpu_x86_set_cpl(env, dpl);
2558 SET_ESP(sp, sp_mask);
2559 EIP = offset;
2560 }
2561#ifdef USE_KQEMU
2562 if (kqemu_is_ok(env)) {
2563 env->exception_index = -1;
2564 cpu_loop_exit();
2565 }
2566#endif
2567}
2568
2569/* real and vm86 mode iret */
2570void helper_iret_real(int shift)
2571{
2572 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2573 target_ulong ssp;
2574 int eflags_mask;
2575
2576 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2577 sp = ESP;
2578 ssp = env->segs[R_SS].base;
2579 if (shift == 1) {
2580 /* 32 bits */
2581 POPL(ssp, sp, sp_mask, new_eip);
2582 POPL(ssp, sp, sp_mask, new_cs);
2583 new_cs &= 0xffff;
2584 POPL(ssp, sp, sp_mask, new_eflags);
2585 } else {
2586 /* 16 bits */
2587 POPW(ssp, sp, sp_mask, new_eip);
2588 POPW(ssp, sp, sp_mask, new_cs);
2589 POPW(ssp, sp, sp_mask, new_eflags);
2590 }
2591 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2592 load_seg_vm(R_CS, new_cs);
2593 env->eip = new_eip;
2594 if (env->eflags & VM_MASK)
2595 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2596 else
2597 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2598 if (shift == 0)
2599 eflags_mask &= 0xffff;
2600 load_eflags(new_eflags, eflags_mask);
db620f46 2601 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2602}
2603
2604static inline void validate_seg(int seg_reg, int cpl)
2605{
2606 int dpl;
2607 uint32_t e2;
2608
2609 /* XXX: on x86_64, we do not want to nullify FS and GS because
2610 they may still contain a valid base. I would be interested to
2611 know how a real x86_64 CPU behaves */
2612 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2613 (env->segs[seg_reg].selector & 0xfffc) == 0)
2614 return;
2615
2616 e2 = env->segs[seg_reg].flags;
2617 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2618 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2619 /* data or non conforming code segment */
2620 if (dpl < cpl) {
2621 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2622 }
2623 }
2624}
2625
2626/* protected mode iret */
2627static inline void helper_ret_protected(int shift, int is_iret, int addend)
2628{
2629 uint32_t new_cs, new_eflags, new_ss;
2630 uint32_t new_es, new_ds, new_fs, new_gs;
2631 uint32_t e1, e2, ss_e1, ss_e2;
2632 int cpl, dpl, rpl, eflags_mask, iopl;
2633 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2634
2635#ifdef TARGET_X86_64
2636 if (shift == 2)
2637 sp_mask = -1;
2638 else
2639#endif
2640 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2641 sp = ESP;
2642 ssp = env->segs[R_SS].base;
2643 new_eflags = 0; /* avoid warning */
2644#ifdef TARGET_X86_64
2645 if (shift == 2) {
2646 POPQ(sp, new_eip);
2647 POPQ(sp, new_cs);
2648 new_cs &= 0xffff;
2649 if (is_iret) {
2650 POPQ(sp, new_eflags);
2651 }
2652 } else
2653#endif
2654 if (shift == 1) {
2655 /* 32 bits */
2656 POPL(ssp, sp, sp_mask, new_eip);
2657 POPL(ssp, sp, sp_mask, new_cs);
2658 new_cs &= 0xffff;
2659 if (is_iret) {
2660 POPL(ssp, sp, sp_mask, new_eflags);
2661 if (new_eflags & VM_MASK)
2662 goto return_to_vm86;
2663 }
2664 } else {
2665 /* 16 bits */
2666 POPW(ssp, sp, sp_mask, new_eip);
2667 POPW(ssp, sp, sp_mask, new_cs);
2668 if (is_iret)
2669 POPW(ssp, sp, sp_mask, new_eflags);
2670 }
2671#ifdef DEBUG_PCALL
2672 if (loglevel & CPU_LOG_PCALL) {
2673 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2674 new_cs, new_eip, shift, addend);
2675 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2676 }
2677#endif
2678 if ((new_cs & 0xfffc) == 0)
2679 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2680 if (load_segment(&e1, &e2, new_cs) != 0)
2681 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2682 if (!(e2 & DESC_S_MASK) ||
2683 !(e2 & DESC_CS_MASK))
2684 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2685 cpl = env->hflags & HF_CPL_MASK;
2686 rpl = new_cs & 3;
2687 if (rpl < cpl)
2688 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2689 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2690 if (e2 & DESC_C_MASK) {
2691 if (dpl > rpl)
2692 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2693 } else {
2694 if (dpl != rpl)
2695 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2696 }
2697 if (!(e2 & DESC_P_MASK))
2698 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2699
2700 sp += addend;
2701 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2702 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2703 /* return to same privilege level */
eaa728ee
FB
2704 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2705 get_seg_base(e1, e2),
2706 get_seg_limit(e1, e2),
2707 e2);
2708 } else {
2709 /* return to different privilege level */
2710#ifdef TARGET_X86_64
2711 if (shift == 2) {
2712 POPQ(sp, new_esp);
2713 POPQ(sp, new_ss);
2714 new_ss &= 0xffff;
2715 } else
2716#endif
2717 if (shift == 1) {
2718 /* 32 bits */
2719 POPL(ssp, sp, sp_mask, new_esp);
2720 POPL(ssp, sp, sp_mask, new_ss);
2721 new_ss &= 0xffff;
2722 } else {
2723 /* 16 bits */
2724 POPW(ssp, sp, sp_mask, new_esp);
2725 POPW(ssp, sp, sp_mask, new_ss);
2726 }
2727#ifdef DEBUG_PCALL
2728 if (loglevel & CPU_LOG_PCALL) {
2729 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2730 new_ss, new_esp);
2731 }
2732#endif
2733 if ((new_ss & 0xfffc) == 0) {
2734#ifdef TARGET_X86_64
2735 /* NULL ss is allowed in long mode if cpl != 3*/
2736 /* XXX: test CS64 ? */
2737 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2738 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2739 0, 0xffffffff,
2740 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2741 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2742 DESC_W_MASK | DESC_A_MASK);
2743 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2744 } else
2745#endif
2746 {
2747 raise_exception_err(EXCP0D_GPF, 0);
2748 }
2749 } else {
2750 if ((new_ss & 3) != rpl)
2751 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2752 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2753 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2754 if (!(ss_e2 & DESC_S_MASK) ||
2755 (ss_e2 & DESC_CS_MASK) ||
2756 !(ss_e2 & DESC_W_MASK))
2757 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2758 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2759 if (dpl != rpl)
2760 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2761 if (!(ss_e2 & DESC_P_MASK))
2762 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2763 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2764 get_seg_base(ss_e1, ss_e2),
2765 get_seg_limit(ss_e1, ss_e2),
2766 ss_e2);
2767 }
2768
2769 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2770 get_seg_base(e1, e2),
2771 get_seg_limit(e1, e2),
2772 e2);
2773 cpu_x86_set_cpl(env, rpl);
2774 sp = new_esp;
2775#ifdef TARGET_X86_64
2776 if (env->hflags & HF_CS64_MASK)
2777 sp_mask = -1;
2778 else
2779#endif
2780 sp_mask = get_sp_mask(ss_e2);
2781
2782 /* validate data segments */
2783 validate_seg(R_ES, rpl);
2784 validate_seg(R_DS, rpl);
2785 validate_seg(R_FS, rpl);
2786 validate_seg(R_GS, rpl);
2787
2788 sp += addend;
2789 }
2790 SET_ESP(sp, sp_mask);
2791 env->eip = new_eip;
2792 if (is_iret) {
2793 /* NOTE: 'cpl' is the _old_ CPL */
2794 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2795 if (cpl == 0)
2796 eflags_mask |= IOPL_MASK;
2797 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2798 if (cpl <= iopl)
2799 eflags_mask |= IF_MASK;
2800 if (shift == 0)
2801 eflags_mask &= 0xffff;
2802 load_eflags(new_eflags, eflags_mask);
2803 }
2804 return;
2805
2806 return_to_vm86:
2807 POPL(ssp, sp, sp_mask, new_esp);
2808 POPL(ssp, sp, sp_mask, new_ss);
2809 POPL(ssp, sp, sp_mask, new_es);
2810 POPL(ssp, sp, sp_mask, new_ds);
2811 POPL(ssp, sp, sp_mask, new_fs);
2812 POPL(ssp, sp, sp_mask, new_gs);
2813
2814 /* modify processor state */
2815 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2816 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2817 load_seg_vm(R_CS, new_cs & 0xffff);
2818 cpu_x86_set_cpl(env, 3);
2819 load_seg_vm(R_SS, new_ss & 0xffff);
2820 load_seg_vm(R_ES, new_es & 0xffff);
2821 load_seg_vm(R_DS, new_ds & 0xffff);
2822 load_seg_vm(R_FS, new_fs & 0xffff);
2823 load_seg_vm(R_GS, new_gs & 0xffff);
2824
2825 env->eip = new_eip & 0xffff;
2826 ESP = new_esp;
2827}
2828
2829void helper_iret_protected(int shift, int next_eip)
2830{
2831 int tss_selector, type;
2832 uint32_t e1, e2;
2833
2834 /* specific case for TSS */
2835 if (env->eflags & NT_MASK) {
2836#ifdef TARGET_X86_64
2837 if (env->hflags & HF_LMA_MASK)
2838 raise_exception_err(EXCP0D_GPF, 0);
2839#endif
2840 tss_selector = lduw_kernel(env->tr.base + 0);
2841 if (tss_selector & 4)
2842 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2843 if (load_segment(&e1, &e2, tss_selector) != 0)
2844 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2845 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2846 /* NOTE: we check both segment and busy TSS */
2847 if (type != 3)
2848 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2849 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2850 } else {
2851 helper_ret_protected(shift, 1, 0);
2852 }
db620f46 2853 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2854#ifdef USE_KQEMU
2855 if (kqemu_is_ok(env)) {
2856 CC_OP = CC_OP_EFLAGS;
2857 env->exception_index = -1;
2858 cpu_loop_exit();
2859 }
2860#endif
2861}
2862
2863void helper_lret_protected(int shift, int addend)
2864{
2865 helper_ret_protected(shift, 0, addend);
2866#ifdef USE_KQEMU
2867 if (kqemu_is_ok(env)) {
2868 env->exception_index = -1;
2869 cpu_loop_exit();
2870 }
2871#endif
2872}
2873
2874void helper_sysenter(void)
2875{
2876 if (env->sysenter_cs == 0) {
2877 raise_exception_err(EXCP0D_GPF, 0);
2878 }
2879 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2880 cpu_x86_set_cpl(env, 0);
2881 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2882 0, 0xffffffff,
2883 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2884 DESC_S_MASK |
2885 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2886 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2887 0, 0xffffffff,
2888 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2889 DESC_S_MASK |
2890 DESC_W_MASK | DESC_A_MASK);
2891 ESP = env->sysenter_esp;
2892 EIP = env->sysenter_eip;
2893}
2894
2895void helper_sysexit(void)
2896{
2897 int cpl;
2898
2899 cpl = env->hflags & HF_CPL_MASK;
2900 if (env->sysenter_cs == 0 || cpl != 0) {
2901 raise_exception_err(EXCP0D_GPF, 0);
2902 }
2903 cpu_x86_set_cpl(env, 3);
2904 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2905 0, 0xffffffff,
2906 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2907 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2908 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2909 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2910 0, 0xffffffff,
2911 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2912 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2913 DESC_W_MASK | DESC_A_MASK);
2914 ESP = ECX;
2915 EIP = EDX;
2916#ifdef USE_KQEMU
2917 if (kqemu_is_ok(env)) {
2918 env->exception_index = -1;
2919 cpu_loop_exit();
2920 }
2921#endif
2922}
2923
872929aa
FB
2924#if defined(CONFIG_USER_ONLY)
2925target_ulong helper_read_crN(int reg)
eaa728ee 2926{
872929aa
FB
2927 return 0;
2928}
2929
2930void helper_write_crN(int reg, target_ulong t0)
2931{
2932}
2933#else
2934target_ulong helper_read_crN(int reg)
2935{
2936 target_ulong val;
2937
2938 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2939 switch(reg) {
2940 default:
2941 val = env->cr[reg];
2942 break;
2943 case 8:
db620f46
FB
2944 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2945 val = cpu_get_apic_tpr(env);
2946 } else {
2947 val = env->v_tpr;
2948 }
872929aa
FB
2949 break;
2950 }
2951 return val;
2952}
2953
2954void helper_write_crN(int reg, target_ulong t0)
2955{
2956 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2957 switch(reg) {
2958 case 0:
2959 cpu_x86_update_cr0(env, t0);
2960 break;
2961 case 3:
2962 cpu_x86_update_cr3(env, t0);
2963 break;
2964 case 4:
2965 cpu_x86_update_cr4(env, t0);
2966 break;
2967 case 8:
db620f46
FB
2968 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2969 cpu_set_apic_tpr(env, t0);
2970 }
2971 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
2972 break;
2973 default:
2974 env->cr[reg] = t0;
2975 break;
2976 }
eaa728ee 2977}
872929aa 2978#endif
eaa728ee
FB
2979
2980void helper_lmsw(target_ulong t0)
2981{
2982 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2983 if already set to one. */
2984 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2985 helper_write_crN(0, t0);
eaa728ee
FB
2986}
2987
2988void helper_clts(void)
2989{
2990 env->cr[0] &= ~CR0_TS_MASK;
2991 env->hflags &= ~HF_TS_MASK;
2992}
2993
eaa728ee
FB
2994/* XXX: do more */
2995void helper_movl_drN_T0(int reg, target_ulong t0)
2996{
2997 env->dr[reg] = t0;
2998}
2999
3000void helper_invlpg(target_ulong addr)
3001{
872929aa 3002 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 3003 tlb_flush_page(env, addr);
eaa728ee
FB
3004}
3005
3006void helper_rdtsc(void)
3007{
3008 uint64_t val;
3009
3010 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3011 raise_exception(EXCP0D_GPF);
3012 }
872929aa
FB
3013 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3014
33c263df 3015 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
3016 EAX = (uint32_t)(val);
3017 EDX = (uint32_t)(val >> 32);
3018}
3019
3020void helper_rdpmc(void)
3021{
3022 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3023 raise_exception(EXCP0D_GPF);
3024 }
eaa728ee
FB
3025 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3026
3027 /* currently unimplemented */
3028 raise_exception_err(EXCP06_ILLOP, 0);
3029}
3030
3031#if defined(CONFIG_USER_ONLY)
3032void helper_wrmsr(void)
3033{
3034}
3035
3036void helper_rdmsr(void)
3037{
3038}
3039#else
3040void helper_wrmsr(void)
3041{
3042 uint64_t val;
3043
872929aa
FB
3044 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3045
eaa728ee
FB
3046 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3047
3048 switch((uint32_t)ECX) {
3049 case MSR_IA32_SYSENTER_CS:
3050 env->sysenter_cs = val & 0xffff;
3051 break;
3052 case MSR_IA32_SYSENTER_ESP:
3053 env->sysenter_esp = val;
3054 break;
3055 case MSR_IA32_SYSENTER_EIP:
3056 env->sysenter_eip = val;
3057 break;
3058 case MSR_IA32_APICBASE:
3059 cpu_set_apic_base(env, val);
3060 break;
3061 case MSR_EFER:
3062 {
3063 uint64_t update_mask;
3064 update_mask = 0;
3065 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3066 update_mask |= MSR_EFER_SCE;
3067 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3068 update_mask |= MSR_EFER_LME;
3069 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3070 update_mask |= MSR_EFER_FFXSR;
3071 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3072 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3073 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3074 update_mask |= MSR_EFER_SVME;
3075 cpu_load_efer(env, (env->efer & ~update_mask) |
3076 (val & update_mask));
eaa728ee
FB
3077 }
3078 break;
3079 case MSR_STAR:
3080 env->star = val;
3081 break;
3082 case MSR_PAT:
3083 env->pat = val;
3084 break;
3085 case MSR_VM_HSAVE_PA:
3086 env->vm_hsave = val;
3087 break;
3088#ifdef TARGET_X86_64
3089 case MSR_LSTAR:
3090 env->lstar = val;
3091 break;
3092 case MSR_CSTAR:
3093 env->cstar = val;
3094 break;
3095 case MSR_FMASK:
3096 env->fmask = val;
3097 break;
3098 case MSR_FSBASE:
3099 env->segs[R_FS].base = val;
3100 break;
3101 case MSR_GSBASE:
3102 env->segs[R_GS].base = val;
3103 break;
3104 case MSR_KERNELGSBASE:
3105 env->kernelgsbase = val;
3106 break;
3107#endif
3108 default:
3109 /* XXX: exception ? */
3110 break;
3111 }
3112}
3113
3114void helper_rdmsr(void)
3115{
3116 uint64_t val;
872929aa
FB
3117
3118 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3119
eaa728ee
FB
3120 switch((uint32_t)ECX) {
3121 case MSR_IA32_SYSENTER_CS:
3122 val = env->sysenter_cs;
3123 break;
3124 case MSR_IA32_SYSENTER_ESP:
3125 val = env->sysenter_esp;
3126 break;
3127 case MSR_IA32_SYSENTER_EIP:
3128 val = env->sysenter_eip;
3129 break;
3130 case MSR_IA32_APICBASE:
3131 val = cpu_get_apic_base(env);
3132 break;
3133 case MSR_EFER:
3134 val = env->efer;
3135 break;
3136 case MSR_STAR:
3137 val = env->star;
3138 break;
3139 case MSR_PAT:
3140 val = env->pat;
3141 break;
3142 case MSR_VM_HSAVE_PA:
3143 val = env->vm_hsave;
3144 break;
3145#ifdef TARGET_X86_64
3146 case MSR_LSTAR:
3147 val = env->lstar;
3148 break;
3149 case MSR_CSTAR:
3150 val = env->cstar;
3151 break;
3152 case MSR_FMASK:
3153 val = env->fmask;
3154 break;
3155 case MSR_FSBASE:
3156 val = env->segs[R_FS].base;
3157 break;
3158 case MSR_GSBASE:
3159 val = env->segs[R_GS].base;
3160 break;
3161 case MSR_KERNELGSBASE:
3162 val = env->kernelgsbase;
3163 break;
da260249
FB
3164#endif
3165#ifdef USE_KQEMU
3166 case MSR_QPI_COMMBASE:
3167 if (env->kqemu_enabled) {
3168 val = kqemu_comm_base;
3169 } else {
3170 val = 0;
3171 }
3172 break;
eaa728ee
FB
3173#endif
3174 default:
3175 /* XXX: exception ? */
3176 val = 0;
3177 break;
3178 }
3179 EAX = (uint32_t)(val);
3180 EDX = (uint32_t)(val >> 32);
3181}
3182#endif
3183
3184target_ulong helper_lsl(target_ulong selector1)
3185{
3186 unsigned int limit;
3187 uint32_t e1, e2, eflags, selector;
3188 int rpl, dpl, cpl, type;
3189
3190 selector = selector1 & 0xffff;
3191 eflags = cc_table[CC_OP].compute_all();
3192 if (load_segment(&e1, &e2, selector) != 0)
3193 goto fail;
3194 rpl = selector & 3;
3195 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3196 cpl = env->hflags & HF_CPL_MASK;
3197 if (e2 & DESC_S_MASK) {
3198 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3199 /* conforming */
3200 } else {
3201 if (dpl < cpl || dpl < rpl)
3202 goto fail;
3203 }
3204 } else {
3205 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3206 switch(type) {
3207 case 1:
3208 case 2:
3209 case 3:
3210 case 9:
3211 case 11:
3212 break;
3213 default:
3214 goto fail;
3215 }
3216 if (dpl < cpl || dpl < rpl) {
3217 fail:
3218 CC_SRC = eflags & ~CC_Z;
3219 return 0;
3220 }
3221 }
3222 limit = get_seg_limit(e1, e2);
3223 CC_SRC = eflags | CC_Z;
3224 return limit;
3225}
3226
3227target_ulong helper_lar(target_ulong selector1)
3228{
3229 uint32_t e1, e2, eflags, selector;
3230 int rpl, dpl, cpl, type;
3231
3232 selector = selector1 & 0xffff;
3233 eflags = cc_table[CC_OP].compute_all();
3234 if ((selector & 0xfffc) == 0)
3235 goto fail;
3236 if (load_segment(&e1, &e2, selector) != 0)
3237 goto fail;
3238 rpl = selector & 3;
3239 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3240 cpl = env->hflags & HF_CPL_MASK;
3241 if (e2 & DESC_S_MASK) {
3242 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3243 /* conforming */
3244 } else {
3245 if (dpl < cpl || dpl < rpl)
3246 goto fail;
3247 }
3248 } else {
3249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3250 switch(type) {
3251 case 1:
3252 case 2:
3253 case 3:
3254 case 4:
3255 case 5:
3256 case 9:
3257 case 11:
3258 case 12:
3259 break;
3260 default:
3261 goto fail;
3262 }
3263 if (dpl < cpl || dpl < rpl) {
3264 fail:
3265 CC_SRC = eflags & ~CC_Z;
3266 return 0;
3267 }
3268 }
3269 CC_SRC = eflags | CC_Z;
3270 return e2 & 0x00f0ff00;
3271}
3272
3273void helper_verr(target_ulong selector1)
3274{
3275 uint32_t e1, e2, eflags, selector;
3276 int rpl, dpl, cpl;
3277
3278 selector = selector1 & 0xffff;
3279 eflags = cc_table[CC_OP].compute_all();
3280 if ((selector & 0xfffc) == 0)
3281 goto fail;
3282 if (load_segment(&e1, &e2, selector) != 0)
3283 goto fail;
3284 if (!(e2 & DESC_S_MASK))
3285 goto fail;
3286 rpl = selector & 3;
3287 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3288 cpl = env->hflags & HF_CPL_MASK;
3289 if (e2 & DESC_CS_MASK) {
3290 if (!(e2 & DESC_R_MASK))
3291 goto fail;
3292 if (!(e2 & DESC_C_MASK)) {
3293 if (dpl < cpl || dpl < rpl)
3294 goto fail;
3295 }
3296 } else {
3297 if (dpl < cpl || dpl < rpl) {
3298 fail:
3299 CC_SRC = eflags & ~CC_Z;
3300 return;
3301 }
3302 }
3303 CC_SRC = eflags | CC_Z;
3304}
3305
3306void helper_verw(target_ulong selector1)
3307{
3308 uint32_t e1, e2, eflags, selector;
3309 int rpl, dpl, cpl;
3310
3311 selector = selector1 & 0xffff;
3312 eflags = cc_table[CC_OP].compute_all();
3313 if ((selector & 0xfffc) == 0)
3314 goto fail;
3315 if (load_segment(&e1, &e2, selector) != 0)
3316 goto fail;
3317 if (!(e2 & DESC_S_MASK))
3318 goto fail;
3319 rpl = selector & 3;
3320 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3321 cpl = env->hflags & HF_CPL_MASK;
3322 if (e2 & DESC_CS_MASK) {
3323 goto fail;
3324 } else {
3325 if (dpl < cpl || dpl < rpl)
3326 goto fail;
3327 if (!(e2 & DESC_W_MASK)) {
3328 fail:
3329 CC_SRC = eflags & ~CC_Z;
3330 return;
3331 }
3332 }
3333 CC_SRC = eflags | CC_Z;
3334}
3335
3336/* x87 FPU helpers */
3337
3338static void fpu_set_exception(int mask)
3339{
3340 env->fpus |= mask;
3341 if (env->fpus & (~env->fpuc & FPUC_EM))
3342 env->fpus |= FPUS_SE | FPUS_B;
3343}
3344
3345static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3346{
3347 if (b == 0.0)
3348 fpu_set_exception(FPUS_ZE);
3349 return a / b;
3350}
3351
3352void fpu_raise_exception(void)
3353{
3354 if (env->cr[0] & CR0_NE_MASK) {
3355 raise_exception(EXCP10_COPR);
3356 }
3357#if !defined(CONFIG_USER_ONLY)
3358 else {
3359 cpu_set_ferr(env);
3360 }
3361#endif
3362}
3363
3364void helper_flds_FT0(uint32_t val)
3365{
3366 union {
3367 float32 f;
3368 uint32_t i;
3369 } u;
3370 u.i = val;
3371 FT0 = float32_to_floatx(u.f, &env->fp_status);
3372}
3373
3374void helper_fldl_FT0(uint64_t val)
3375{
3376 union {
3377 float64 f;
3378 uint64_t i;
3379 } u;
3380 u.i = val;
3381 FT0 = float64_to_floatx(u.f, &env->fp_status);
3382}
3383
3384void helper_fildl_FT0(int32_t val)
3385{
3386 FT0 = int32_to_floatx(val, &env->fp_status);
3387}
3388
3389void helper_flds_ST0(uint32_t val)
3390{
3391 int new_fpstt;
3392 union {
3393 float32 f;
3394 uint32_t i;
3395 } u;
3396 new_fpstt = (env->fpstt - 1) & 7;
3397 u.i = val;
3398 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3399 env->fpstt = new_fpstt;
3400 env->fptags[new_fpstt] = 0; /* validate stack entry */
3401}
3402
3403void helper_fldl_ST0(uint64_t val)
3404{
3405 int new_fpstt;
3406 union {
3407 float64 f;
3408 uint64_t i;
3409 } u;
3410 new_fpstt = (env->fpstt - 1) & 7;
3411 u.i = val;
3412 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3413 env->fpstt = new_fpstt;
3414 env->fptags[new_fpstt] = 0; /* validate stack entry */
3415}
3416
3417void helper_fildl_ST0(int32_t val)
3418{
3419 int new_fpstt;
3420 new_fpstt = (env->fpstt - 1) & 7;
3421 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3422 env->fpstt = new_fpstt;
3423 env->fptags[new_fpstt] = 0; /* validate stack entry */
3424}
3425
3426void helper_fildll_ST0(int64_t val)
3427{
3428 int new_fpstt;
3429 new_fpstt = (env->fpstt - 1) & 7;
3430 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3431 env->fpstt = new_fpstt;
3432 env->fptags[new_fpstt] = 0; /* validate stack entry */
3433}
3434
3435uint32_t helper_fsts_ST0(void)
3436{
3437 union {
3438 float32 f;
3439 uint32_t i;
3440 } u;
3441 u.f = floatx_to_float32(ST0, &env->fp_status);
3442 return u.i;
3443}
3444
3445uint64_t helper_fstl_ST0(void)
3446{
3447 union {
3448 float64 f;
3449 uint64_t i;
3450 } u;
3451 u.f = floatx_to_float64(ST0, &env->fp_status);
3452 return u.i;
3453}
3454
3455int32_t helper_fist_ST0(void)
3456{
3457 int32_t val;
3458 val = floatx_to_int32(ST0, &env->fp_status);
3459 if (val != (int16_t)val)
3460 val = -32768;
3461 return val;
3462}
3463
3464int32_t helper_fistl_ST0(void)
3465{
3466 int32_t val;
3467 val = floatx_to_int32(ST0, &env->fp_status);
3468 return val;
3469}
3470
3471int64_t helper_fistll_ST0(void)
3472{
3473 int64_t val;
3474 val = floatx_to_int64(ST0, &env->fp_status);
3475 return val;
3476}
3477
3478int32_t helper_fistt_ST0(void)
3479{
3480 int32_t val;
3481 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3482 if (val != (int16_t)val)
3483 val = -32768;
3484 return val;
3485}
3486
3487int32_t helper_fisttl_ST0(void)
3488{
3489 int32_t val;
3490 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3491 return val;
3492}
3493
3494int64_t helper_fisttll_ST0(void)
3495{
3496 int64_t val;
3497 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3498 return val;
3499}
3500
3501void helper_fldt_ST0(target_ulong ptr)
3502{
3503 int new_fpstt;
3504 new_fpstt = (env->fpstt - 1) & 7;
3505 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3506 env->fpstt = new_fpstt;
3507 env->fptags[new_fpstt] = 0; /* validate stack entry */
3508}
3509
3510void helper_fstt_ST0(target_ulong ptr)
3511{
3512 helper_fstt(ST0, ptr);
3513}
3514
3515void helper_fpush(void)
3516{
3517 fpush();
3518}
3519
3520void helper_fpop(void)
3521{
3522 fpop();
3523}
3524
3525void helper_fdecstp(void)
3526{
3527 env->fpstt = (env->fpstt - 1) & 7;
3528 env->fpus &= (~0x4700);
3529}
3530
3531void helper_fincstp(void)
3532{
3533 env->fpstt = (env->fpstt + 1) & 7;
3534 env->fpus &= (~0x4700);
3535}
3536
3537/* FPU move */
3538
3539void helper_ffree_STN(int st_index)
3540{
3541 env->fptags[(env->fpstt + st_index) & 7] = 1;
3542}
3543
3544void helper_fmov_ST0_FT0(void)
3545{
3546 ST0 = FT0;
3547}
3548
3549void helper_fmov_FT0_STN(int st_index)
3550{
3551 FT0 = ST(st_index);
3552}
3553
3554void helper_fmov_ST0_STN(int st_index)
3555{
3556 ST0 = ST(st_index);
3557}
3558
3559void helper_fmov_STN_ST0(int st_index)
3560{
3561 ST(st_index) = ST0;
3562}
3563
3564void helper_fxchg_ST0_STN(int st_index)
3565{
3566 CPU86_LDouble tmp;
3567 tmp = ST(st_index);
3568 ST(st_index) = ST0;
3569 ST0 = tmp;
3570}
3571
3572/* FPU operations */
3573
3574static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3575
3576void helper_fcom_ST0_FT0(void)
3577{
3578 int ret;
3579
3580 ret = floatx_compare(ST0, FT0, &env->fp_status);
3581 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3582 FORCE_RET();
3583}
3584
3585void helper_fucom_ST0_FT0(void)
3586{
3587 int ret;
3588
3589 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3590 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3591 FORCE_RET();
3592}
3593
3594static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3595
3596void helper_fcomi_ST0_FT0(void)
3597{
3598 int eflags;
3599 int ret;
3600
3601 ret = floatx_compare(ST0, FT0, &env->fp_status);
3602 eflags = cc_table[CC_OP].compute_all();
3603 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3604 CC_SRC = eflags;
3605 FORCE_RET();
3606}
3607
3608void helper_fucomi_ST0_FT0(void)
3609{
3610 int eflags;
3611 int ret;
3612
3613 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3614 eflags = cc_table[CC_OP].compute_all();
3615 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3616 CC_SRC = eflags;
3617 FORCE_RET();
3618}
3619
3620void helper_fadd_ST0_FT0(void)
3621{
3622 ST0 += FT0;
3623}
3624
3625void helper_fmul_ST0_FT0(void)
3626{
3627 ST0 *= FT0;
3628}
3629
3630void helper_fsub_ST0_FT0(void)
3631{
3632 ST0 -= FT0;
3633}
3634
3635void helper_fsubr_ST0_FT0(void)
3636{
3637 ST0 = FT0 - ST0;
3638}
3639
3640void helper_fdiv_ST0_FT0(void)
3641{
3642 ST0 = helper_fdiv(ST0, FT0);
3643}
3644
3645void helper_fdivr_ST0_FT0(void)
3646{
3647 ST0 = helper_fdiv(FT0, ST0);
3648}
3649
3650/* fp operations between STN and ST0 */
3651
3652void helper_fadd_STN_ST0(int st_index)
3653{
3654 ST(st_index) += ST0;
3655}
3656
3657void helper_fmul_STN_ST0(int st_index)
3658{
3659 ST(st_index) *= ST0;
3660}
3661
3662void helper_fsub_STN_ST0(int st_index)
3663{
3664 ST(st_index) -= ST0;
3665}
3666
3667void helper_fsubr_STN_ST0(int st_index)
3668{
3669 CPU86_LDouble *p;
3670 p = &ST(st_index);
3671 *p = ST0 - *p;
3672}
3673
3674void helper_fdiv_STN_ST0(int st_index)
3675{
3676 CPU86_LDouble *p;
3677 p = &ST(st_index);
3678 *p = helper_fdiv(*p, ST0);
3679}
3680
3681void helper_fdivr_STN_ST0(int st_index)
3682{
3683 CPU86_LDouble *p;
3684 p = &ST(st_index);
3685 *p = helper_fdiv(ST0, *p);
3686}
3687
3688/* misc FPU operations */
3689void helper_fchs_ST0(void)
3690{
3691 ST0 = floatx_chs(ST0);
3692}
3693
3694void helper_fabs_ST0(void)
3695{
3696 ST0 = floatx_abs(ST0);
3697}
3698
3699void helper_fld1_ST0(void)
3700{
3701 ST0 = f15rk[1];
3702}
3703
3704void helper_fldl2t_ST0(void)
3705{
3706 ST0 = f15rk[6];
3707}
3708
3709void helper_fldl2e_ST0(void)
3710{
3711 ST0 = f15rk[5];
3712}
3713
3714void helper_fldpi_ST0(void)
3715{
3716 ST0 = f15rk[2];
3717}
3718
3719void helper_fldlg2_ST0(void)
3720{
3721 ST0 = f15rk[3];
3722}
3723
3724void helper_fldln2_ST0(void)
3725{
3726 ST0 = f15rk[4];
3727}
3728
3729void helper_fldz_ST0(void)
3730{
3731 ST0 = f15rk[0];
3732}
3733
3734void helper_fldz_FT0(void)
3735{
3736 FT0 = f15rk[0];
3737}
3738
3739uint32_t helper_fnstsw(void)
3740{
3741 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3742}
3743
3744uint32_t helper_fnstcw(void)
3745{
3746 return env->fpuc;
3747}
3748
3749static void update_fp_status(void)
3750{
3751 int rnd_type;
3752
3753 /* set rounding mode */
3754 switch(env->fpuc & RC_MASK) {
3755 default:
3756 case RC_NEAR:
3757 rnd_type = float_round_nearest_even;
3758 break;
3759 case RC_DOWN:
3760 rnd_type = float_round_down;
3761 break;
3762 case RC_UP:
3763 rnd_type = float_round_up;
3764 break;
3765 case RC_CHOP:
3766 rnd_type = float_round_to_zero;
3767 break;
3768 }
3769 set_float_rounding_mode(rnd_type, &env->fp_status);
3770#ifdef FLOATX80
3771 switch((env->fpuc >> 8) & 3) {
3772 case 0:
3773 rnd_type = 32;
3774 break;
3775 case 2:
3776 rnd_type = 64;
3777 break;
3778 case 3:
3779 default:
3780 rnd_type = 80;
3781 break;
3782 }
3783 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3784#endif
3785}
3786
3787void helper_fldcw(uint32_t val)
3788{
3789 env->fpuc = val;
3790 update_fp_status();
3791}
3792
3793void helper_fclex(void)
3794{
3795 env->fpus &= 0x7f00;
3796}
3797
3798void helper_fwait(void)
3799{
3800 if (env->fpus & FPUS_SE)
3801 fpu_raise_exception();
3802 FORCE_RET();
3803}
3804
3805void helper_fninit(void)
3806{
3807 env->fpus = 0;
3808 env->fpstt = 0;
3809 env->fpuc = 0x37f;
3810 env->fptags[0] = 1;
3811 env->fptags[1] = 1;
3812 env->fptags[2] = 1;
3813 env->fptags[3] = 1;
3814 env->fptags[4] = 1;
3815 env->fptags[5] = 1;
3816 env->fptags[6] = 1;
3817 env->fptags[7] = 1;
3818}
3819
3820/* BCD ops */
3821
3822void helper_fbld_ST0(target_ulong ptr)
3823{
3824 CPU86_LDouble tmp;
3825 uint64_t val;
3826 unsigned int v;
3827 int i;
3828
3829 val = 0;
3830 for(i = 8; i >= 0; i--) {
3831 v = ldub(ptr + i);
3832 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3833 }
3834 tmp = val;
3835 if (ldub(ptr + 9) & 0x80)
3836 tmp = -tmp;
3837 fpush();
3838 ST0 = tmp;
3839}
3840
3841void helper_fbst_ST0(target_ulong ptr)
3842{
3843 int v;
3844 target_ulong mem_ref, mem_end;
3845 int64_t val;
3846
3847 val = floatx_to_int64(ST0, &env->fp_status);
3848 mem_ref = ptr;
3849 mem_end = mem_ref + 9;
3850 if (val < 0) {
3851 stb(mem_end, 0x80);
3852 val = -val;
3853 } else {
3854 stb(mem_end, 0x00);
3855 }
3856 while (mem_ref < mem_end) {
3857 if (val == 0)
3858 break;
3859 v = val % 100;
3860 val = val / 100;
3861 v = ((v / 10) << 4) | (v % 10);
3862 stb(mem_ref++, v);
3863 }
3864 while (mem_ref < mem_end) {
3865 stb(mem_ref++, 0);
3866 }
3867}
3868
3869void helper_f2xm1(void)
3870{
3871 ST0 = pow(2.0,ST0) - 1.0;
3872}
3873
3874void helper_fyl2x(void)
3875{
3876 CPU86_LDouble fptemp;
3877
3878 fptemp = ST0;
3879 if (fptemp>0.0){
3880 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3881 ST1 *= fptemp;
3882 fpop();
3883 } else {
3884 env->fpus &= (~0x4700);
3885 env->fpus |= 0x400;
3886 }
3887}
3888
3889void helper_fptan(void)
3890{
3891 CPU86_LDouble fptemp;
3892
3893 fptemp = ST0;
3894 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3895 env->fpus |= 0x400;
3896 } else {
3897 ST0 = tan(fptemp);
3898 fpush();
3899 ST0 = 1.0;
3900 env->fpus &= (~0x400); /* C2 <-- 0 */
3901 /* the above code is for |arg| < 2**52 only */
3902 }
3903}
3904
3905void helper_fpatan(void)
3906{
3907 CPU86_LDouble fptemp, fpsrcop;
3908
3909 fpsrcop = ST1;
3910 fptemp = ST0;
3911 ST1 = atan2(fpsrcop,fptemp);
3912 fpop();
3913}
3914
3915void helper_fxtract(void)
3916{
3917 CPU86_LDoubleU temp;
3918 unsigned int expdif;
3919
3920 temp.d = ST0;
3921 expdif = EXPD(temp) - EXPBIAS;
3922 /*DP exponent bias*/
3923 ST0 = expdif;
3924 fpush();
3925 BIASEXPONENT(temp);
3926 ST0 = temp.d;
3927}
3928
3929void helper_fprem1(void)
3930{
3931 CPU86_LDouble dblq, fpsrcop, fptemp;
3932 CPU86_LDoubleU fpsrcop1, fptemp1;
3933 int expdif;
3934 signed long long int q;
3935
3936 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3937 ST0 = 0.0 / 0.0; /* NaN */
3938 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3939 return;
3940 }
3941
3942 fpsrcop = ST0;
3943 fptemp = ST1;
3944 fpsrcop1.d = fpsrcop;
3945 fptemp1.d = fptemp;
3946 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3947
3948 if (expdif < 0) {
3949 /* optimisation? taken from the AMD docs */
3950 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3951 /* ST0 is unchanged */
3952 return;
3953 }
3954
3955 if (expdif < 53) {
3956 dblq = fpsrcop / fptemp;
3957 /* round dblq towards nearest integer */
3958 dblq = rint(dblq);
3959 ST0 = fpsrcop - fptemp * dblq;
3960
3961 /* convert dblq to q by truncating towards zero */
3962 if (dblq < 0.0)
3963 q = (signed long long int)(-dblq);
3964 else
3965 q = (signed long long int)dblq;
3966
3967 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3968 /* (C0,C3,C1) <-- (q2,q1,q0) */
3969 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3970 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3971 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3972 } else {
3973 env->fpus |= 0x400; /* C2 <-- 1 */
3974 fptemp = pow(2.0, expdif - 50);
3975 fpsrcop = (ST0 / ST1) / fptemp;
3976 /* fpsrcop = integer obtained by chopping */
3977 fpsrcop = (fpsrcop < 0.0) ?
3978 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3979 ST0 -= (ST1 * fpsrcop * fptemp);
3980 }
3981}
3982
3983void helper_fprem(void)
3984{
3985 CPU86_LDouble dblq, fpsrcop, fptemp;
3986 CPU86_LDoubleU fpsrcop1, fptemp1;
3987 int expdif;
3988 signed long long int q;
3989
3990 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3991 ST0 = 0.0 / 0.0; /* NaN */
3992 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3993 return;
3994 }
3995
3996 fpsrcop = (CPU86_LDouble)ST0;
3997 fptemp = (CPU86_LDouble)ST1;
3998 fpsrcop1.d = fpsrcop;
3999 fptemp1.d = fptemp;
4000 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4001
4002 if (expdif < 0) {
4003 /* optimisation? taken from the AMD docs */
4004 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4005 /* ST0 is unchanged */
4006 return;
4007 }
4008
4009 if ( expdif < 53 ) {
4010 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4011 /* round dblq towards zero */
4012 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4013 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4014
4015 /* convert dblq to q by truncating towards zero */
4016 if (dblq < 0.0)
4017 q = (signed long long int)(-dblq);
4018 else
4019 q = (signed long long int)dblq;
4020
4021 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4022 /* (C0,C3,C1) <-- (q2,q1,q0) */
4023 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4024 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4025 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4026 } else {
4027 int N = 32 + (expdif % 32); /* as per AMD docs */
4028 env->fpus |= 0x400; /* C2 <-- 1 */
4029 fptemp = pow(2.0, (double)(expdif - N));
4030 fpsrcop = (ST0 / ST1) / fptemp;
4031 /* fpsrcop = integer obtained by chopping */
4032 fpsrcop = (fpsrcop < 0.0) ?
4033 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4034 ST0 -= (ST1 * fpsrcop * fptemp);
4035 }
4036}
4037
4038void helper_fyl2xp1(void)
4039{
4040 CPU86_LDouble fptemp;
4041
4042 fptemp = ST0;
4043 if ((fptemp+1.0)>0.0) {
4044 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4045 ST1 *= fptemp;
4046 fpop();
4047 } else {
4048 env->fpus &= (~0x4700);
4049 env->fpus |= 0x400;
4050 }
4051}
4052
4053void helper_fsqrt(void)
4054{
4055 CPU86_LDouble fptemp;
4056
4057 fptemp = ST0;
4058 if (fptemp<0.0) {
4059 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4060 env->fpus |= 0x400;
4061 }
4062 ST0 = sqrt(fptemp);
4063}
4064
4065void helper_fsincos(void)
4066{
4067 CPU86_LDouble fptemp;
4068
4069 fptemp = ST0;
4070 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4071 env->fpus |= 0x400;
4072 } else {
4073 ST0 = sin(fptemp);
4074 fpush();
4075 ST0 = cos(fptemp);
4076 env->fpus &= (~0x400); /* C2 <-- 0 */
4077 /* the above code is for |arg| < 2**63 only */
4078 }
4079}
4080
4081void helper_frndint(void)
4082{
4083 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4084}
4085
4086void helper_fscale(void)
4087{
4088 ST0 = ldexp (ST0, (int)(ST1));
4089}
4090
4091void helper_fsin(void)
4092{
4093 CPU86_LDouble fptemp;
4094
4095 fptemp = ST0;
4096 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4097 env->fpus |= 0x400;
4098 } else {
4099 ST0 = sin(fptemp);
4100 env->fpus &= (~0x400); /* C2 <-- 0 */
4101 /* the above code is for |arg| < 2**53 only */
4102 }
4103}
4104
4105void helper_fcos(void)
4106{
4107 CPU86_LDouble fptemp;
4108
4109 fptemp = ST0;
4110 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4111 env->fpus |= 0x400;
4112 } else {
4113 ST0 = cos(fptemp);
4114 env->fpus &= (~0x400); /* C2 <-- 0 */
4115 /* the above code is for |arg5 < 2**63 only */
4116 }
4117}
4118
4119void helper_fxam_ST0(void)
4120{
4121 CPU86_LDoubleU temp;
4122 int expdif;
4123
4124 temp.d = ST0;
4125
4126 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4127 if (SIGND(temp))
4128 env->fpus |= 0x200; /* C1 <-- 1 */
4129
4130 /* XXX: test fptags too */
4131 expdif = EXPD(temp);
4132 if (expdif == MAXEXPD) {
4133#ifdef USE_X86LDOUBLE
4134 if (MANTD(temp) == 0x8000000000000000ULL)
4135#else
4136 if (MANTD(temp) == 0)
4137#endif
4138 env->fpus |= 0x500 /*Infinity*/;
4139 else
4140 env->fpus |= 0x100 /*NaN*/;
4141 } else if (expdif == 0) {
4142 if (MANTD(temp) == 0)
4143 env->fpus |= 0x4000 /*Zero*/;
4144 else
4145 env->fpus |= 0x4400 /*Denormal*/;
4146 } else {
4147 env->fpus |= 0x400;
4148 }
4149}
4150
4151void helper_fstenv(target_ulong ptr, int data32)
4152{
4153 int fpus, fptag, exp, i;
4154 uint64_t mant;
4155 CPU86_LDoubleU tmp;
4156
4157 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4158 fptag = 0;
4159 for (i=7; i>=0; i--) {
4160 fptag <<= 2;
4161 if (env->fptags[i]) {
4162 fptag |= 3;
4163 } else {
4164 tmp.d = env->fpregs[i].d;
4165 exp = EXPD(tmp);
4166 mant = MANTD(tmp);
4167 if (exp == 0 && mant == 0) {
4168 /* zero */
4169 fptag |= 1;
4170 } else if (exp == 0 || exp == MAXEXPD
4171#ifdef USE_X86LDOUBLE
4172 || (mant & (1LL << 63)) == 0
4173#endif
4174 ) {
4175 /* NaNs, infinity, denormal */
4176 fptag |= 2;
4177 }
4178 }
4179 }
4180 if (data32) {
4181 /* 32 bit */
4182 stl(ptr, env->fpuc);
4183 stl(ptr + 4, fpus);
4184 stl(ptr + 8, fptag);
4185 stl(ptr + 12, 0); /* fpip */
4186 stl(ptr + 16, 0); /* fpcs */
4187 stl(ptr + 20, 0); /* fpoo */
4188 stl(ptr + 24, 0); /* fpos */
4189 } else {
4190 /* 16 bit */
4191 stw(ptr, env->fpuc);
4192 stw(ptr + 2, fpus);
4193 stw(ptr + 4, fptag);
4194 stw(ptr + 6, 0);
4195 stw(ptr + 8, 0);
4196 stw(ptr + 10, 0);
4197 stw(ptr + 12, 0);
4198 }
4199}
4200
4201void helper_fldenv(target_ulong ptr, int data32)
4202{
4203 int i, fpus, fptag;
4204
4205 if (data32) {
4206 env->fpuc = lduw(ptr);
4207 fpus = lduw(ptr + 4);
4208 fptag = lduw(ptr + 8);
4209 }
4210 else {
4211 env->fpuc = lduw(ptr);
4212 fpus = lduw(ptr + 2);
4213 fptag = lduw(ptr + 4);
4214 }
4215 env->fpstt = (fpus >> 11) & 7;
4216 env->fpus = fpus & ~0x3800;
4217 for(i = 0;i < 8; i++) {
4218 env->fptags[i] = ((fptag & 3) == 3);
4219 fptag >>= 2;
4220 }
4221}
4222
4223void helper_fsave(target_ulong ptr, int data32)
4224{
4225 CPU86_LDouble tmp;
4226 int i;
4227
4228 helper_fstenv(ptr, data32);
4229
4230 ptr += (14 << data32);
4231 for(i = 0;i < 8; i++) {
4232 tmp = ST(i);
4233 helper_fstt(tmp, ptr);
4234 ptr += 10;
4235 }
4236
4237 /* fninit */
4238 env->fpus = 0;
4239 env->fpstt = 0;
4240 env->fpuc = 0x37f;
4241 env->fptags[0] = 1;
4242 env->fptags[1] = 1;
4243 env->fptags[2] = 1;
4244 env->fptags[3] = 1;
4245 env->fptags[4] = 1;
4246 env->fptags[5] = 1;
4247 env->fptags[6] = 1;
4248 env->fptags[7] = 1;
4249}
4250
4251void helper_frstor(target_ulong ptr, int data32)
4252{
4253 CPU86_LDouble tmp;
4254 int i;
4255
4256 helper_fldenv(ptr, data32);
4257 ptr += (14 << data32);
4258
4259 for(i = 0;i < 8; i++) {
4260 tmp = helper_fldt(ptr);
4261 ST(i) = tmp;
4262 ptr += 10;
4263 }
4264}
4265
4266void helper_fxsave(target_ulong ptr, int data64)
4267{
4268 int fpus, fptag, i, nb_xmm_regs;
4269 CPU86_LDouble tmp;
4270 target_ulong addr;
4271
4272 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4273 fptag = 0;
4274 for(i = 0; i < 8; i++) {
4275 fptag |= (env->fptags[i] << i);
4276 }
4277 stw(ptr, env->fpuc);
4278 stw(ptr + 2, fpus);
4279 stw(ptr + 4, fptag ^ 0xff);
4280#ifdef TARGET_X86_64
4281 if (data64) {
4282 stq(ptr + 0x08, 0); /* rip */
4283 stq(ptr + 0x10, 0); /* rdp */
4284 } else
4285#endif
4286 {
4287 stl(ptr + 0x08, 0); /* eip */
4288 stl(ptr + 0x0c, 0); /* sel */
4289 stl(ptr + 0x10, 0); /* dp */
4290 stl(ptr + 0x14, 0); /* sel */
4291 }
4292
4293 addr = ptr + 0x20;
4294 for(i = 0;i < 8; i++) {
4295 tmp = ST(i);
4296 helper_fstt(tmp, addr);
4297 addr += 16;
4298 }
4299
4300 if (env->cr[4] & CR4_OSFXSR_MASK) {
4301 /* XXX: finish it */
4302 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4303 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4304 if (env->hflags & HF_CS64_MASK)
4305 nb_xmm_regs = 16;
4306 else
4307 nb_xmm_regs = 8;
4308 addr = ptr + 0xa0;
4309 for(i = 0; i < nb_xmm_regs; i++) {
4310 stq(addr, env->xmm_regs[i].XMM_Q(0));
4311 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4312 addr += 16;
4313 }
4314 }
4315}
4316
4317void helper_fxrstor(target_ulong ptr, int data64)
4318{
4319 int i, fpus, fptag, nb_xmm_regs;
4320 CPU86_LDouble tmp;
4321 target_ulong addr;
4322
4323 env->fpuc = lduw(ptr);
4324 fpus = lduw(ptr + 2);
4325 fptag = lduw(ptr + 4);
4326 env->fpstt = (fpus >> 11) & 7;
4327 env->fpus = fpus & ~0x3800;
4328 fptag ^= 0xff;
4329 for(i = 0;i < 8; i++) {
4330 env->fptags[i] = ((fptag >> i) & 1);
4331 }
4332
4333 addr = ptr + 0x20;
4334 for(i = 0;i < 8; i++) {
4335 tmp = helper_fldt(addr);
4336 ST(i) = tmp;
4337 addr += 16;
4338 }
4339
4340 if (env->cr[4] & CR4_OSFXSR_MASK) {
4341 /* XXX: finish it */
4342 env->mxcsr = ldl(ptr + 0x18);
4343 //ldl(ptr + 0x1c);
4344 if (env->hflags & HF_CS64_MASK)
4345 nb_xmm_regs = 16;
4346 else
4347 nb_xmm_regs = 8;
4348 addr = ptr + 0xa0;
4349 for(i = 0; i < nb_xmm_regs; i++) {
4350 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4351 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4352 addr += 16;
4353 }
4354 }
4355}
4356
4357#ifndef USE_X86LDOUBLE
4358
4359void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4360{
4361 CPU86_LDoubleU temp;
4362 int e;
4363
4364 temp.d = f;
4365 /* mantissa */
4366 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4367 /* exponent + sign */
4368 e = EXPD(temp) - EXPBIAS + 16383;
4369 e |= SIGND(temp) >> 16;
4370 *pexp = e;
4371}
4372
4373CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4374{
4375 CPU86_LDoubleU temp;
4376 int e;
4377 uint64_t ll;
4378
4379 /* XXX: handle overflow ? */
4380 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4381 e |= (upper >> 4) & 0x800; /* sign */
4382 ll = (mant >> 11) & ((1LL << 52) - 1);
4383#ifdef __arm__
4384 temp.l.upper = (e << 20) | (ll >> 32);
4385 temp.l.lower = ll;
4386#else
4387 temp.ll = ll | ((uint64_t)e << 52);
4388#endif
4389 return temp.d;
4390}
4391
4392#else
4393
4394void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4395{
4396 CPU86_LDoubleU temp;
4397
4398 temp.d = f;
4399 *pmant = temp.l.lower;
4400 *pexp = temp.l.upper;
4401}
4402
4403CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4404{
4405 CPU86_LDoubleU temp;
4406
4407 temp.l.upper = upper;
4408 temp.l.lower = mant;
4409 return temp.d;
4410}
4411#endif
4412
4413#ifdef TARGET_X86_64
4414
4415//#define DEBUG_MULDIV
4416
4417static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4418{
4419 *plow += a;
4420 /* carry test */
4421 if (*plow < a)
4422 (*phigh)++;
4423 *phigh += b;
4424}
4425
4426static void neg128(uint64_t *plow, uint64_t *phigh)
4427{
4428 *plow = ~ *plow;
4429 *phigh = ~ *phigh;
4430 add128(plow, phigh, 1, 0);
4431}
4432
4433/* return TRUE if overflow */
4434static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4435{
4436 uint64_t q, r, a1, a0;
4437 int i, qb, ab;
4438
4439 a0 = *plow;
4440 a1 = *phigh;
4441 if (a1 == 0) {
4442 q = a0 / b;
4443 r = a0 % b;
4444 *plow = q;
4445 *phigh = r;
4446 } else {
4447 if (a1 >= b)
4448 return 1;
4449 /* XXX: use a better algorithm */
4450 for(i = 0; i < 64; i++) {
4451 ab = a1 >> 63;
4452 a1 = (a1 << 1) | (a0 >> 63);
4453 if (ab || a1 >= b) {
4454 a1 -= b;
4455 qb = 1;
4456 } else {
4457 qb = 0;
4458 }
4459 a0 = (a0 << 1) | qb;
4460 }
4461#if defined(DEBUG_MULDIV)
4462 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4463 *phigh, *plow, b, a0, a1);
4464#endif
4465 *plow = a0;
4466 *phigh = a1;
4467 }
4468 return 0;
4469}
4470
4471/* return TRUE if overflow */
4472static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4473{
4474 int sa, sb;
4475 sa = ((int64_t)*phigh < 0);
4476 if (sa)
4477 neg128(plow, phigh);
4478 sb = (b < 0);
4479 if (sb)
4480 b = -b;
4481 if (div64(plow, phigh, b) != 0)
4482 return 1;
4483 if (sa ^ sb) {
4484 if (*plow > (1ULL << 63))
4485 return 1;
4486 *plow = - *plow;
4487 } else {
4488 if (*plow >= (1ULL << 63))
4489 return 1;
4490 }
4491 if (sa)
4492 *phigh = - *phigh;
4493 return 0;
4494}
4495
4496void helper_mulq_EAX_T0(target_ulong t0)
4497{
4498 uint64_t r0, r1;
4499
4500 mulu64(&r0, &r1, EAX, t0);
4501 EAX = r0;
4502 EDX = r1;
4503 CC_DST = r0;
4504 CC_SRC = r1;
4505}
4506
4507void helper_imulq_EAX_T0(target_ulong t0)
4508{
4509 uint64_t r0, r1;
4510
4511 muls64(&r0, &r1, EAX, t0);
4512 EAX = r0;
4513 EDX = r1;
4514 CC_DST = r0;
4515 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4516}
4517
4518target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4519{
4520 uint64_t r0, r1;
4521
4522 muls64(&r0, &r1, t0, t1);
4523 CC_DST = r0;
4524 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4525 return r0;
4526}
4527
4528void helper_divq_EAX(target_ulong t0)
4529{
4530 uint64_t r0, r1;
4531 if (t0 == 0) {
4532 raise_exception(EXCP00_DIVZ);
4533 }
4534 r0 = EAX;
4535 r1 = EDX;
4536 if (div64(&r0, &r1, t0))
4537 raise_exception(EXCP00_DIVZ);
4538 EAX = r0;
4539 EDX = r1;
4540}
4541
4542void helper_idivq_EAX(target_ulong t0)
4543{
4544 uint64_t r0, r1;
4545 if (t0 == 0) {
4546 raise_exception(EXCP00_DIVZ);
4547 }
4548 r0 = EAX;
4549 r1 = EDX;
4550 if (idiv64(&r0, &r1, t0))
4551 raise_exception(EXCP00_DIVZ);
4552 EAX = r0;
4553 EDX = r1;
4554}
4555#endif
4556
94451178 4557static void do_hlt(void)
eaa728ee
FB
4558{
4559 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4560 env->halted = 1;
eaa728ee
FB
4561 env->exception_index = EXCP_HLT;
4562 cpu_loop_exit();
4563}
4564
94451178
FB
4565void helper_hlt(int next_eip_addend)
4566{
4567 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4568 EIP += next_eip_addend;
4569
4570 do_hlt();
4571}
4572
eaa728ee
FB
4573void helper_monitor(target_ulong ptr)
4574{
4575 if ((uint32_t)ECX != 0)
4576 raise_exception(EXCP0D_GPF);
4577 /* XXX: store address ? */
872929aa 4578 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4579}
4580
94451178 4581void helper_mwait(int next_eip_addend)
eaa728ee
FB
4582{
4583 if ((uint32_t)ECX != 0)
4584 raise_exception(EXCP0D_GPF);
872929aa 4585 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4586 EIP += next_eip_addend;
4587
eaa728ee
FB
4588 /* XXX: not complete but not completely erroneous */
4589 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4590 /* more than one CPU: do not sleep because another CPU may
4591 wake this one */
4592 } else {
94451178 4593 do_hlt();
eaa728ee
FB
4594 }
4595}
4596
4597void helper_debug(void)
4598{
4599 env->exception_index = EXCP_DEBUG;
4600 cpu_loop_exit();
4601}
4602
4603void helper_raise_interrupt(int intno, int next_eip_addend)
4604{
4605 raise_interrupt(intno, 1, 0, next_eip_addend);
4606}
4607
4608void helper_raise_exception(int exception_index)
4609{
4610 raise_exception(exception_index);
4611}
4612
4613void helper_cli(void)
4614{
4615 env->eflags &= ~IF_MASK;
4616}
4617
4618void helper_sti(void)
4619{
4620 env->eflags |= IF_MASK;
4621}
4622
4623#if 0
4624/* vm86plus instructions */
4625void helper_cli_vm(void)
4626{
4627 env->eflags &= ~VIF_MASK;
4628}
4629
4630void helper_sti_vm(void)
4631{
4632 env->eflags |= VIF_MASK;
4633 if (env->eflags & VIP_MASK) {
4634 raise_exception(EXCP0D_GPF);
4635 }
4636}
4637#endif
4638
4639void helper_set_inhibit_irq(void)
4640{
4641 env->hflags |= HF_INHIBIT_IRQ_MASK;
4642}
4643
4644void helper_reset_inhibit_irq(void)
4645{
4646 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4647}
4648
4649void helper_boundw(target_ulong a0, int v)
4650{
4651 int low, high;
4652 low = ldsw(a0);
4653 high = ldsw(a0 + 2);
4654 v = (int16_t)v;
4655 if (v < low || v > high) {
4656 raise_exception(EXCP05_BOUND);
4657 }
4658 FORCE_RET();
4659}
4660
4661void helper_boundl(target_ulong a0, int v)
4662{
4663 int low, high;
4664 low = ldl(a0);
4665 high = ldl(a0 + 4);
4666 if (v < low || v > high) {
4667 raise_exception(EXCP05_BOUND);
4668 }
4669 FORCE_RET();
4670}
4671
4672static float approx_rsqrt(float a)
4673{
4674 return 1.0 / sqrt(a);
4675}
4676
4677static float approx_rcp(float a)
4678{
4679 return 1.0 / a;
4680}
4681
4682#if !defined(CONFIG_USER_ONLY)
4683
4684#define MMUSUFFIX _mmu
4685
4686#define SHIFT 0
4687#include "softmmu_template.h"
4688
4689#define SHIFT 1
4690#include "softmmu_template.h"
4691
4692#define SHIFT 2
4693#include "softmmu_template.h"
4694
4695#define SHIFT 3
4696#include "softmmu_template.h"
4697
4698#endif
4699
4700/* try to fill the TLB and return an exception if error. If retaddr is
4701 NULL, it means that the function was called in C code (i.e. not
4702 from generated code or from helper.c) */
4703/* XXX: fix it to restore all registers */
4704void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4705{
4706 TranslationBlock *tb;
4707 int ret;
4708 unsigned long pc;
4709 CPUX86State *saved_env;
4710
4711 /* XXX: hack to restore env in all cases, even if not called from
4712 generated code */
4713 saved_env = env;
4714 env = cpu_single_env;
4715
4716 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4717 if (ret) {
4718 if (retaddr) {
4719 /* now we have a real cpu fault */
4720 pc = (unsigned long)retaddr;
4721 tb = tb_find_pc(pc);
4722 if (tb) {
4723 /* the PC is inside the translated code. It means that we have
4724 a virtual CPU fault */
4725 cpu_restore_state(tb, env, pc, NULL);
4726 }
4727 }
872929aa 4728 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4729 }
4730 env = saved_env;
4731}
4732
4733
4734/* Secure Virtual Machine helpers */
4735
eaa728ee
FB
4736#if defined(CONFIG_USER_ONLY)
4737
db620f46 4738void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4739{
4740}
4741void helper_vmmcall(void)
4742{
4743}
914178d3 4744void helper_vmload(int aflag)
eaa728ee
FB
4745{
4746}
914178d3 4747void helper_vmsave(int aflag)
eaa728ee
FB
4748{
4749}
872929aa
FB
4750void helper_stgi(void)
4751{
4752}
4753void helper_clgi(void)
4754{
4755}
eaa728ee
FB
4756void helper_skinit(void)
4757{
4758}
914178d3 4759void helper_invlpga(int aflag)
eaa728ee
FB
4760{
4761}
4762void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4763{
4764}
4765void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4766{
4767}
4768
4769void helper_svm_check_io(uint32_t port, uint32_t param,
4770 uint32_t next_eip_addend)
4771{
4772}
4773#else
4774
872929aa
FB
4775static inline void svm_save_seg(target_phys_addr_t addr,
4776 const SegmentCache *sc)
eaa728ee 4777{
872929aa
FB
4778 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4779 sc->selector);
4780 stq_phys(addr + offsetof(struct vmcb_seg, base),
4781 sc->base);
4782 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4783 sc->limit);
4784 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4785 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4786}
4787
4788static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4789{
4790 unsigned int flags;
4791
4792 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4793 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4794 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4795 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4796 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4797}
4798
872929aa
FB
4799static inline void svm_load_seg_cache(target_phys_addr_t addr,
4800 CPUState *env, int seg_reg)
eaa728ee 4801{
872929aa
FB
4802 SegmentCache sc1, *sc = &sc1;
4803 svm_load_seg(addr, sc);
4804 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4805 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4806}
4807
db620f46 4808void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4809{
4810 target_ulong addr;
4811 uint32_t event_inj;
4812 uint32_t int_ctl;
4813
872929aa
FB
4814 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4815
914178d3
FB
4816 if (aflag == 2)
4817 addr = EAX;
4818 else
4819 addr = (uint32_t)EAX;
4820
eaa728ee
FB
4821 if (loglevel & CPU_LOG_TB_IN_ASM)
4822 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4823
4824 env->vm_vmcb = addr;
4825
4826 /* save the current CPU state in the hsave page */
4827 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4828 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4829
4830 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4831 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4832
4833 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4834 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4835 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4836 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4837 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4838 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4839
4840 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4841 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4842
872929aa
FB
4843 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4844 &env->segs[R_ES]);
4845 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4846 &env->segs[R_CS]);
4847 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4848 &env->segs[R_SS]);
4849 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4850 &env->segs[R_DS]);
eaa728ee 4851
db620f46
FB
4852 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4853 EIP + next_eip_addend);
eaa728ee
FB
4854 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4855 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4856
4857 /* load the interception bitmaps so we do not need to access the
4858 vmcb in svm mode */
872929aa 4859 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
4860 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4861 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4862 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4863 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4864 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4865
872929aa
FB
4866 /* enable intercepts */
4867 env->hflags |= HF_SVMI_MASK;
4868
33c263df
FB
4869 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4870
eaa728ee
FB
4871 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4872 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4873
4874 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4875 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4876
4877 /* clear exit_info_2 so we behave like the real hardware */
4878 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4879
4880 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4881 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4882 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4883 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4884 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 4885 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 4886 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
4887 env->v_tpr = int_ctl & V_TPR_MASK;
4888 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 4889 if (env->eflags & IF_MASK)
db620f46 4890 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
4891 }
4892
5efc27bb
FB
4893 cpu_load_efer(env,
4894 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
4895 env->eflags = 0;
4896 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4897 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4898 CC_OP = CC_OP_EFLAGS;
eaa728ee 4899
872929aa
FB
4900 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4901 env, R_ES);
4902 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4903 env, R_CS);
4904 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4905 env, R_SS);
4906 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4907 env, R_DS);
eaa728ee
FB
4908
4909 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4910 env->eip = EIP;
4911 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4912 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4913 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4914 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4915 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4916
4917 /* FIXME: guest state consistency checks */
4918
4919 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4920 case TLB_CONTROL_DO_NOTHING:
4921 break;
4922 case TLB_CONTROL_FLUSH_ALL_ASID:
4923 /* FIXME: this is not 100% correct but should work for now */
4924 tlb_flush(env, 1);
4925 break;
4926 }
4927
960540b4 4928 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 4929
db620f46
FB
4930 if (int_ctl & V_IRQ_MASK) {
4931 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4932 }
4933
eaa728ee
FB
4934 /* maybe we need to inject an event */
4935 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4936 if (event_inj & SVM_EVTINJ_VALID) {
4937 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4938 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4939 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4940 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4941
4942 if (loglevel & CPU_LOG_TB_IN_ASM)
4943 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4944 /* FIXME: need to implement valid_err */
4945 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4946 case SVM_EVTINJ_TYPE_INTR:
4947 env->exception_index = vector;
4948 env->error_code = event_inj_err;
4949 env->exception_is_int = 0;
4950 env->exception_next_eip = -1;
4951 if (loglevel & CPU_LOG_TB_IN_ASM)
4952 fprintf(logfile, "INTR");
db620f46
FB
4953 /* XXX: is it always correct ? */
4954 do_interrupt(vector, 0, 0, 0, 1);
eaa728ee
FB
4955 break;
4956 case SVM_EVTINJ_TYPE_NMI:
db620f46 4957 env->exception_index = EXCP02_NMI;
eaa728ee
FB
4958 env->error_code = event_inj_err;
4959 env->exception_is_int = 0;
4960 env->exception_next_eip = EIP;
4961 if (loglevel & CPU_LOG_TB_IN_ASM)
4962 fprintf(logfile, "NMI");
db620f46 4963 cpu_loop_exit();
eaa728ee
FB
4964 break;
4965 case SVM_EVTINJ_TYPE_EXEPT:
4966 env->exception_index = vector;
4967 env->error_code = event_inj_err;
4968 env->exception_is_int = 0;
4969 env->exception_next_eip = -1;
4970 if (loglevel & CPU_LOG_TB_IN_ASM)
4971 fprintf(logfile, "EXEPT");
db620f46 4972 cpu_loop_exit();
eaa728ee
FB
4973 break;
4974 case SVM_EVTINJ_TYPE_SOFT:
4975 env->exception_index = vector;
4976 env->error_code = event_inj_err;
4977 env->exception_is_int = 1;
4978 env->exception_next_eip = EIP;
4979 if (loglevel & CPU_LOG_TB_IN_ASM)
4980 fprintf(logfile, "SOFT");
db620f46 4981 cpu_loop_exit();
eaa728ee
FB
4982 break;
4983 }
4984 if (loglevel & CPU_LOG_TB_IN_ASM)
4985 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4986 }
eaa728ee
FB
4987}
4988
4989void helper_vmmcall(void)
4990{
872929aa
FB
4991 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
4992 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
4993}
4994
914178d3 4995void helper_vmload(int aflag)
eaa728ee
FB
4996{
4997 target_ulong addr;
872929aa
FB
4998 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
4999
914178d3
FB
5000 if (aflag == 2)
5001 addr = EAX;
5002 else
5003 addr = (uint32_t)EAX;
5004
eaa728ee
FB
5005 if (loglevel & CPU_LOG_TB_IN_ASM)
5006 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5007 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5008 env->segs[R_FS].base);
5009
872929aa
FB
5010 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5011 env, R_FS);
5012 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5013 env, R_GS);
5014 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5015 &env->tr);
5016 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5017 &env->ldt);
eaa728ee
FB
5018
5019#ifdef TARGET_X86_64
5020 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5021 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5022 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5023 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5024#endif
5025 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5026 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5027 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5028 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5029}
5030
914178d3 5031void helper_vmsave(int aflag)
eaa728ee
FB
5032{
5033 target_ulong addr;
872929aa 5034 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5035
5036 if (aflag == 2)
5037 addr = EAX;
5038 else
5039 addr = (uint32_t)EAX;
5040
eaa728ee
FB
5041 if (loglevel & CPU_LOG_TB_IN_ASM)
5042 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5043 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5044 env->segs[R_FS].base);
5045
872929aa
FB
5046 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5047 &env->segs[R_FS]);
5048 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5049 &env->segs[R_GS]);
5050 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5051 &env->tr);
5052 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5053 &env->ldt);
eaa728ee
FB
5054
5055#ifdef TARGET_X86_64
5056 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5057 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5058 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5059 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5060#endif
5061 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5062 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5063 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5064 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5065}
5066
872929aa
FB
5067void helper_stgi(void)
5068{
5069 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5070 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5071}
5072
5073void helper_clgi(void)
5074{
5075 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5076 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5077}
5078
eaa728ee
FB
5079void helper_skinit(void)
5080{
872929aa
FB
5081 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5082 /* XXX: not implemented */
872929aa 5083 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5084}
5085
914178d3 5086void helper_invlpga(int aflag)
eaa728ee 5087{
914178d3 5088 target_ulong addr;
872929aa 5089 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5090
5091 if (aflag == 2)
5092 addr = EAX;
5093 else
5094 addr = (uint32_t)EAX;
5095
5096 /* XXX: could use the ASID to see if it is needed to do the
5097 flush */
5098 tlb_flush_page(env, addr);
eaa728ee
FB
5099}
5100
5101void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5102{
872929aa
FB
5103 if (likely(!(env->hflags & HF_SVMI_MASK)))
5104 return;
eaa728ee
FB
5105 switch(type) {
5106 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5107 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5108 helper_vmexit(type, param);
5109 }
5110 break;
872929aa
FB
5111 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5112 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5113 helper_vmexit(type, param);
5114 }
5115 break;
872929aa
FB
5116 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5117 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5118 helper_vmexit(type, param);
5119 }
5120 break;
872929aa
FB
5121 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5122 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5123 helper_vmexit(type, param);
5124 }
5125 break;
872929aa
FB
5126 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5127 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5128 helper_vmexit(type, param);
5129 }
5130 break;
eaa728ee 5131 case SVM_EXIT_MSR:
872929aa 5132 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5133 /* FIXME: this should be read in at vmrun (faster this way?) */
5134 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5135 uint32_t t0, t1;
5136 switch((uint32_t)ECX) {
5137 case 0 ... 0x1fff:
5138 t0 = (ECX * 2) % 8;
5139 t1 = ECX / 8;
5140 break;
5141 case 0xc0000000 ... 0xc0001fff:
5142 t0 = (8192 + ECX - 0xc0000000) * 2;
5143 t1 = (t0 / 8);
5144 t0 %= 8;
5145 break;
5146 case 0xc0010000 ... 0xc0011fff:
5147 t0 = (16384 + ECX - 0xc0010000) * 2;
5148 t1 = (t0 / 8);
5149 t0 %= 8;
5150 break;
5151 default:
5152 helper_vmexit(type, param);
5153 t0 = 0;
5154 t1 = 0;
5155 break;
5156 }
5157 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5158 helper_vmexit(type, param);
5159 }
5160 break;
5161 default:
872929aa 5162 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5163 helper_vmexit(type, param);
5164 }
5165 break;
5166 }
5167}
5168
5169void helper_svm_check_io(uint32_t port, uint32_t param,
5170 uint32_t next_eip_addend)
5171{
872929aa 5172 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5173 /* FIXME: this should be read in at vmrun (faster this way?) */
5174 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5175 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5176 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5177 /* next EIP */
5178 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5179 env->eip + next_eip_addend);
5180 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5181 }
5182 }
5183}
5184
5185/* Note: currently only 32 bits of exit_code are used */
5186void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5187{
5188 uint32_t int_ctl;
5189
5190 if (loglevel & CPU_LOG_TB_IN_ASM)
5191 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5192 exit_code, exit_info_1,
5193 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5194 EIP);
5195
5196 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5197 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5198 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5199 } else {
5200 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5201 }
5202
5203 /* Save the VM state in the vmcb */
872929aa
FB
5204 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5205 &env->segs[R_ES]);
5206 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5207 &env->segs[R_CS]);
5208 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5209 &env->segs[R_SS]);
5210 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5211 &env->segs[R_DS]);
eaa728ee
FB
5212
5213 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5214 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5215
5216 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5217 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5218
5219 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5220 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5221 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5222 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5223 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5224
db620f46
FB
5225 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5226 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5227 int_ctl |= env->v_tpr & V_TPR_MASK;
5228 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5229 int_ctl |= V_IRQ_MASK;
5230 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5231
5232 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5233 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5234 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5235 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5236 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5237 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5238 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5239
5240 /* Reload the host state from vm_hsave */
db620f46 5241 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5242 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5243 env->intercept = 0;
5244 env->intercept_exceptions = 0;
5245 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5246 env->tsc_offset = 0;
eaa728ee
FB
5247
5248 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5249 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5250
5251 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5252 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5253
5254 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5255 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5256 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5257 /* we need to set the efer after the crs so the hidden flags get
5258 set properly */
5efc27bb
FB
5259 cpu_load_efer(env,
5260 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5261 env->eflags = 0;
5262 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5263 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5264 CC_OP = CC_OP_EFLAGS;
5265
872929aa
FB
5266 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5267 env, R_ES);
5268 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5269 env, R_CS);
5270 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5271 env, R_SS);
5272 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5273 env, R_DS);
eaa728ee
FB
5274
5275 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5276 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5277 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5278
5279 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5280 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5281
5282 /* other setups */
5283 cpu_x86_set_cpl(env, 0);
5284 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5285 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5286
960540b4 5287 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5288 /* FIXME: Resets the current ASID register to zero (host ASID). */
5289
5290 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5291
5292 /* Clears the TSC_OFFSET inside the processor. */
5293
5294 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5295 from the page table indicated the host's CR3. If the PDPEs contain
5296 illegal state, the processor causes a shutdown. */
5297
5298 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5299 env->cr[0] |= CR0_PE_MASK;
5300 env->eflags &= ~VM_MASK;
5301
5302 /* Disables all breakpoints in the host DR7 register. */
5303
5304 /* Checks the reloaded host state for consistency. */
5305
5306 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5307 host's code segment or non-canonical (in the case of long mode), a
5308 #GP fault is delivered inside the host.) */
5309
5310 /* remove any pending exception */
5311 env->exception_index = -1;
5312 env->error_code = 0;
5313 env->old_exception = -1;
5314
5315 cpu_loop_exit();
5316}
5317
5318#endif
5319
5320/* MMX/SSE */
5321/* XXX: optimize by storing fptt and fptags in the static cpu state */
5322void helper_enter_mmx(void)
5323{
5324 env->fpstt = 0;
5325 *(uint32_t *)(env->fptags) = 0;
5326 *(uint32_t *)(env->fptags + 4) = 0;
5327}
5328
5329void helper_emms(void)
5330{
5331 /* set to empty state */
5332 *(uint32_t *)(env->fptags) = 0x01010101;
5333 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5334}
5335
5336/* XXX: suppress */
5337void helper_movq(uint64_t *d, uint64_t *s)
5338{
5339 *d = *s;
5340}
5341
5342#define SHIFT 0
5343#include "ops_sse.h"
5344
5345#define SHIFT 1
5346#include "ops_sse.h"
5347
5348#define SHIFT 0
5349#include "helper_template.h"
5350#undef SHIFT
5351
5352#define SHIFT 1
5353#include "helper_template.h"
5354#undef SHIFT
5355
5356#define SHIFT 2
5357#include "helper_template.h"
5358#undef SHIFT
5359
5360#ifdef TARGET_X86_64
5361
5362#define SHIFT 3
5363#include "helper_template.h"
5364#undef SHIFT
5365
5366#endif
5367
5368/* bit operations */
5369target_ulong helper_bsf(target_ulong t0)
5370{
5371 int count;
5372 target_ulong res;
5373
5374 res = t0;
5375 count = 0;
5376 while ((res & 1) == 0) {
5377 count++;
5378 res >>= 1;
5379 }
5380 return count;
5381}
5382
5383target_ulong helper_bsr(target_ulong t0)
5384{
5385 int count;
5386 target_ulong res, mask;
5387
5388 res = t0;
5389 count = TARGET_LONG_BITS - 1;
5390 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5391 while ((res & mask) == 0) {
5392 count--;
5393 res <<= 1;
5394 }
5395 return count;
5396}
5397
5398
5399static int compute_all_eflags(void)
5400{
5401 return CC_SRC;
5402}
5403
5404static int compute_c_eflags(void)
5405{
5406 return CC_SRC & CC_C;
5407}
5408
5409CCTable cc_table[CC_OP_NB] = {
5410 [CC_OP_DYNAMIC] = { /* should never happen */ },
5411
5412 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5413
5414 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5415 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5416 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5417
5418 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5419 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5420 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5421
5422 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5423 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5424 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5425
5426 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5427 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5428 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5429
5430 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5431 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5432 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5433
5434 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5435 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5436 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5437
5438 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5439 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5440 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5441
5442 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5443 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5444 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5445
5446 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5447 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5448 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5449
5450 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5451 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5452 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5453
5454#ifdef TARGET_X86_64
5455 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5456
5457 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5458
5459 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5460
5461 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5462
5463 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5464
5465 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5466
5467 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5468
5469 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5470
5471 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5472
5473 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5474#endif
5475};
5476