]> git.proxmox.com Git - qemu.git/blob - target-i386/helper.c
0d0f5ff4d86509f624462255245c4ac5e54a101c
[qemu.git] / target-i386 / helper.c
1 /*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "exec.h"
21 #include "host-utils.h"
22
23 //#define DEBUG_PCALL
24
25 #if 0
26 #define raise_exception_err(a, b)\
27 do {\
28 if (logfile)\
29 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30 (raise_exception_err)(a, b);\
31 } while (0)
32 #endif
33
34 const uint8_t parity_table[256] = {
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 };
68
69 /* modulo 17 table */
70 const uint8_t rclw_table[32] = {
71 0, 1, 2, 3, 4, 5, 6, 7,
72 8, 9,10,11,12,13,14,15,
73 16, 0, 1, 2, 3, 4, 5, 6,
74 7, 8, 9,10,11,12,13,14,
75 };
76
77 /* modulo 9 table */
78 const uint8_t rclb_table[32] = {
79 0, 1, 2, 3, 4, 5, 6, 7,
80 8, 0, 1, 2, 3, 4, 5, 6,
81 7, 8, 0, 1, 2, 3, 4, 5,
82 6, 7, 8, 0, 1, 2, 3, 4,
83 };
84
85 const CPU86_LDouble f15rk[7] =
86 {
87 0.00000000000000000000L,
88 1.00000000000000000000L,
89 3.14159265358979323851L, /*pi*/
90 0.30102999566398119523L, /*lg2*/
91 0.69314718055994530943L, /*ln2*/
92 1.44269504088896340739L, /*l2e*/
93 3.32192809488736234781L, /*l2t*/
94 };
95
96 /* thread support */
97
98 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
99
100 void cpu_lock(void)
101 {
102 spin_lock(&global_cpu_lock);
103 }
104
105 void cpu_unlock(void)
106 {
107 spin_unlock(&global_cpu_lock);
108 }
109
110 /* return non zero if error */
111 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
112 int selector)
113 {
114 SegmentCache *dt;
115 int index;
116 target_ulong ptr;
117
118 if (selector & 0x4)
119 dt = &env->ldt;
120 else
121 dt = &env->gdt;
122 index = selector & ~7;
123 if ((index + 7) > dt->limit)
124 return -1;
125 ptr = dt->base + index;
126 *e1_ptr = ldl_kernel(ptr);
127 *e2_ptr = ldl_kernel(ptr + 4);
128 return 0;
129 }
130
131 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
132 {
133 unsigned int limit;
134 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135 if (e2 & DESC_G_MASK)
136 limit = (limit << 12) | 0xfff;
137 return limit;
138 }
139
140 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
141 {
142 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
143 }
144
145 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
146 {
147 sc->base = get_seg_base(e1, e2);
148 sc->limit = get_seg_limit(e1, e2);
149 sc->flags = e2;
150 }
151
152 /* init the segment cache in vm86 mode. */
153 static inline void load_seg_vm(int seg, int selector)
154 {
155 selector &= 0xffff;
156 cpu_x86_load_seg_cache(env, seg, selector,
157 (selector << 4), 0xffff, 0);
158 }
159
160 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161 uint32_t *esp_ptr, int dpl)
162 {
163 int type, index, shift;
164
165 #if 0
166 {
167 int i;
168 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169 for(i=0;i<env->tr.limit;i++) {
170 printf("%02x ", env->tr.base[i]);
171 if ((i & 7) == 7) printf("\n");
172 }
173 printf("\n");
174 }
175 #endif
176
177 if (!(env->tr.flags & DESC_P_MASK))
178 cpu_abort(env, "invalid tss");
179 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
180 if ((type & 7) != 1)
181 cpu_abort(env, "invalid tss type");
182 shift = type >> 3;
183 index = (dpl * 4 + 2) << shift;
184 if (index + (4 << shift) - 1 > env->tr.limit)
185 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
186 if (shift == 0) {
187 *esp_ptr = lduw_kernel(env->tr.base + index);
188 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
189 } else {
190 *esp_ptr = ldl_kernel(env->tr.base + index);
191 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
192 }
193 }
194
195 /* XXX: merge with load_seg() */
196 static void tss_load_seg(int seg_reg, int selector)
197 {
198 uint32_t e1, e2;
199 int rpl, dpl, cpl;
200
201 if ((selector & 0xfffc) != 0) {
202 if (load_segment(&e1, &e2, selector) != 0)
203 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204 if (!(e2 & DESC_S_MASK))
205 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
206 rpl = selector & 3;
207 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208 cpl = env->hflags & HF_CPL_MASK;
209 if (seg_reg == R_CS) {
210 if (!(e2 & DESC_CS_MASK))
211 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212 /* XXX: is it correct ? */
213 if (dpl != rpl)
214 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215 if ((e2 & DESC_C_MASK) && dpl > rpl)
216 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217 } else if (seg_reg == R_SS) {
218 /* SS must be writable data */
219 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 if (dpl != cpl || dpl != rpl)
222 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
223 } else {
224 /* not readable code */
225 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* if data or non conforming code, checks the rights */
228 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229 if (dpl < cpl || dpl < rpl)
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231 }
232 }
233 if (!(e2 & DESC_P_MASK))
234 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235 cpu_x86_load_seg_cache(env, seg_reg, selector,
236 get_seg_base(e1, e2),
237 get_seg_limit(e1, e2),
238 e2);
239 } else {
240 if (seg_reg == R_SS || seg_reg == R_CS)
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 }
243 }
244
245 #define SWITCH_TSS_JMP 0
246 #define SWITCH_TSS_IRET 1
247 #define SWITCH_TSS_CALL 2
248
249 /* XXX: restore CPU state in registers (PowerPC case) */
250 static void switch_tss(int tss_selector,
251 uint32_t e1, uint32_t e2, int source,
252 uint32_t next_eip)
253 {
254 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255 target_ulong tss_base;
256 uint32_t new_regs[8], new_segs[6];
257 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258 uint32_t old_eflags, eflags_mask;
259 SegmentCache *dt;
260 int index;
261 target_ulong ptr;
262
263 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
264 #ifdef DEBUG_PCALL
265 if (loglevel & CPU_LOG_PCALL)
266 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
267 #endif
268
269 /* if task gate, we read the TSS segment and we load it */
270 if (type == 5) {
271 if (!(e2 & DESC_P_MASK))
272 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273 tss_selector = e1 >> 16;
274 if (tss_selector & 4)
275 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276 if (load_segment(&e1, &e2, tss_selector) != 0)
277 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278 if (e2 & DESC_S_MASK)
279 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
281 if ((type & 7) != 1)
282 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
283 }
284
285 if (!(e2 & DESC_P_MASK))
286 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
287
288 if (type & 8)
289 tss_limit_max = 103;
290 else
291 tss_limit_max = 43;
292 tss_limit = get_seg_limit(e1, e2);
293 tss_base = get_seg_base(e1, e2);
294 if ((tss_selector & 4) != 0 ||
295 tss_limit < tss_limit_max)
296 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
298 if (old_type & 8)
299 old_tss_limit_max = 103;
300 else
301 old_tss_limit_max = 43;
302
303 /* read all the registers from the new TSS */
304 if (type & 8) {
305 /* 32 bit */
306 new_cr3 = ldl_kernel(tss_base + 0x1c);
307 new_eip = ldl_kernel(tss_base + 0x20);
308 new_eflags = ldl_kernel(tss_base + 0x24);
309 for(i = 0; i < 8; i++)
310 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311 for(i = 0; i < 6; i++)
312 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313 new_ldt = lduw_kernel(tss_base + 0x60);
314 new_trap = ldl_kernel(tss_base + 0x64);
315 } else {
316 /* 16 bit */
317 new_cr3 = 0;
318 new_eip = lduw_kernel(tss_base + 0x0e);
319 new_eflags = lduw_kernel(tss_base + 0x10);
320 for(i = 0; i < 8; i++)
321 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322 for(i = 0; i < 4; i++)
323 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324 new_ldt = lduw_kernel(tss_base + 0x2a);
325 new_segs[R_FS] = 0;
326 new_segs[R_GS] = 0;
327 new_trap = 0;
328 }
329
330 /* NOTE: we must avoid memory exceptions during the task switch,
331 so we make dummy accesses before */
332 /* XXX: it can still fail in some cases, so a bigger hack is
333 necessary to valid the TLB after having done the accesses */
334
335 v1 = ldub_kernel(env->tr.base);
336 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337 stb_kernel(env->tr.base, v1);
338 stb_kernel(env->tr.base + old_tss_limit_max, v2);
339
340 /* clear busy bit (it is restartable) */
341 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
342 target_ulong ptr;
343 uint32_t e2;
344 ptr = env->gdt.base + (env->tr.selector & ~7);
345 e2 = ldl_kernel(ptr + 4);
346 e2 &= ~DESC_TSS_BUSY_MASK;
347 stl_kernel(ptr + 4, e2);
348 }
349 old_eflags = compute_eflags();
350 if (source == SWITCH_TSS_IRET)
351 old_eflags &= ~NT_MASK;
352
353 /* save the current state in the old TSS */
354 if (type & 8) {
355 /* 32 bit */
356 stl_kernel(env->tr.base + 0x20, next_eip);
357 stl_kernel(env->tr.base + 0x24, old_eflags);
358 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366 for(i = 0; i < 6; i++)
367 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
368 } else {
369 /* 16 bit */
370 stw_kernel(env->tr.base + 0x0e, next_eip);
371 stw_kernel(env->tr.base + 0x10, old_eflags);
372 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380 for(i = 0; i < 4; i++)
381 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
382 }
383
384 /* now if an exception occurs, it will occurs in the next task
385 context */
386
387 if (source == SWITCH_TSS_CALL) {
388 stw_kernel(tss_base, env->tr.selector);
389 new_eflags |= NT_MASK;
390 }
391
392 /* set busy bit */
393 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
394 target_ulong ptr;
395 uint32_t e2;
396 ptr = env->gdt.base + (tss_selector & ~7);
397 e2 = ldl_kernel(ptr + 4);
398 e2 |= DESC_TSS_BUSY_MASK;
399 stl_kernel(ptr + 4, e2);
400 }
401
402 /* set the new CPU state */
403 /* from this point, any exception which occurs can give problems */
404 env->cr[0] |= CR0_TS_MASK;
405 env->hflags |= HF_TS_MASK;
406 env->tr.selector = tss_selector;
407 env->tr.base = tss_base;
408 env->tr.limit = tss_limit;
409 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
410
411 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412 cpu_x86_update_cr3(env, new_cr3);
413 }
414
415 /* load all registers without an exception, then reload them with
416 possible exception */
417 env->eip = new_eip;
418 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
420 if (!(type & 8))
421 eflags_mask &= 0xffff;
422 load_eflags(new_eflags, eflags_mask);
423 /* XXX: what to do in 16 bit case ? */
424 EAX = new_regs[0];
425 ECX = new_regs[1];
426 EDX = new_regs[2];
427 EBX = new_regs[3];
428 ESP = new_regs[4];
429 EBP = new_regs[5];
430 ESI = new_regs[6];
431 EDI = new_regs[7];
432 if (new_eflags & VM_MASK) {
433 for(i = 0; i < 6; i++)
434 load_seg_vm(i, new_segs[i]);
435 /* in vm86, CPL is always 3 */
436 cpu_x86_set_cpl(env, 3);
437 } else {
438 /* CPL is set the RPL of CS */
439 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440 /* first just selectors as the rest may trigger exceptions */
441 for(i = 0; i < 6; i++)
442 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
443 }
444
445 env->ldt.selector = new_ldt & ~4;
446 env->ldt.base = 0;
447 env->ldt.limit = 0;
448 env->ldt.flags = 0;
449
450 /* load the LDT */
451 if (new_ldt & 4)
452 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
453
454 if ((new_ldt & 0xfffc) != 0) {
455 dt = &env->gdt;
456 index = new_ldt & ~7;
457 if ((index + 7) > dt->limit)
458 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459 ptr = dt->base + index;
460 e1 = ldl_kernel(ptr);
461 e2 = ldl_kernel(ptr + 4);
462 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464 if (!(e2 & DESC_P_MASK))
465 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466 load_seg_cache_raw_dt(&env->ldt, e1, e2);
467 }
468
469 /* load the segments */
470 if (!(new_eflags & VM_MASK)) {
471 tss_load_seg(R_CS, new_segs[R_CS]);
472 tss_load_seg(R_SS, new_segs[R_SS]);
473 tss_load_seg(R_ES, new_segs[R_ES]);
474 tss_load_seg(R_DS, new_segs[R_DS]);
475 tss_load_seg(R_FS, new_segs[R_FS]);
476 tss_load_seg(R_GS, new_segs[R_GS]);
477 }
478
479 /* check that EIP is in the CS segment limits */
480 if (new_eip > env->segs[R_CS].limit) {
481 /* XXX: different exception if CALL ? */
482 raise_exception_err(EXCP0D_GPF, 0);
483 }
484 }
485
486 /* check if Port I/O is allowed in TSS */
487 static inline void check_io(int addr, int size)
488 {
489 int io_offset, val, mask;
490
491 /* TSS must be a valid 32 bit one */
492 if (!(env->tr.flags & DESC_P_MASK) ||
493 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
494 env->tr.limit < 103)
495 goto fail;
496 io_offset = lduw_kernel(env->tr.base + 0x66);
497 io_offset += (addr >> 3);
498 /* Note: the check needs two bytes */
499 if ((io_offset + 1) > env->tr.limit)
500 goto fail;
501 val = lduw_kernel(env->tr.base + io_offset);
502 val >>= (addr & 7);
503 mask = (1 << size) - 1;
504 /* all bits must be zero to allow the I/O */
505 if ((val & mask) != 0) {
506 fail:
507 raise_exception_err(EXCP0D_GPF, 0);
508 }
509 }
510
511 void check_iob_T0(void)
512 {
513 check_io(T0, 1);
514 }
515
516 void check_iow_T0(void)
517 {
518 check_io(T0, 2);
519 }
520
521 void check_iol_T0(void)
522 {
523 check_io(T0, 4);
524 }
525
526 void check_iob_DX(void)
527 {
528 check_io(EDX & 0xffff, 1);
529 }
530
531 void check_iow_DX(void)
532 {
533 check_io(EDX & 0xffff, 2);
534 }
535
536 void check_iol_DX(void)
537 {
538 check_io(EDX & 0xffff, 4);
539 }
540
541 static inline unsigned int get_sp_mask(unsigned int e2)
542 {
543 if (e2 & DESC_B_MASK)
544 return 0xffffffff;
545 else
546 return 0xffff;
547 }
548
549 #ifdef TARGET_X86_64
550 #define SET_ESP(val, sp_mask)\
551 do {\
552 if ((sp_mask) == 0xffff)\
553 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554 else if ((sp_mask) == 0xffffffffLL)\
555 ESP = (uint32_t)(val);\
556 else\
557 ESP = (val);\
558 } while (0)
559 #else
560 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
561 #endif
562
563 /* XXX: add a is_user flag to have proper security support */
564 #define PUSHW(ssp, sp, sp_mask, val)\
565 {\
566 sp -= 2;\
567 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
568 }
569
570 #define PUSHL(ssp, sp, sp_mask, val)\
571 {\
572 sp -= 4;\
573 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
574 }
575
576 #define POPW(ssp, sp, sp_mask, val)\
577 {\
578 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
579 sp += 2;\
580 }
581
582 #define POPL(ssp, sp, sp_mask, val)\
583 {\
584 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
585 sp += 4;\
586 }
587
588 /* protected mode interrupt */
589 static void do_interrupt_protected(int intno, int is_int, int error_code,
590 unsigned int next_eip, int is_hw)
591 {
592 SegmentCache *dt;
593 target_ulong ptr, ssp;
594 int type, dpl, selector, ss_dpl, cpl;
595 int has_error_code, new_stack, shift;
596 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597 uint32_t old_eip, sp_mask;
598 int svm_should_check = 1;
599
600 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
601 next_eip = EIP;
602 svm_should_check = 0;
603 }
604
605 if (svm_should_check
606 && (INTERCEPTEDl(_exceptions, 1 << intno)
607 && !is_int)) {
608 raise_interrupt(intno, is_int, error_code, 0);
609 }
610 has_error_code = 0;
611 if (!is_int && !is_hw) {
612 switch(intno) {
613 case 8:
614 case 10:
615 case 11:
616 case 12:
617 case 13:
618 case 14:
619 case 17:
620 has_error_code = 1;
621 break;
622 }
623 }
624 if (is_int)
625 old_eip = next_eip;
626 else
627 old_eip = env->eip;
628
629 dt = &env->idt;
630 if (intno * 8 + 7 > dt->limit)
631 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632 ptr = dt->base + intno * 8;
633 e1 = ldl_kernel(ptr);
634 e2 = ldl_kernel(ptr + 4);
635 /* check gate type */
636 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
637 switch(type) {
638 case 5: /* task gate */
639 /* must do that check here to return the correct error code */
640 if (!(e2 & DESC_P_MASK))
641 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643 if (has_error_code) {
644 int type;
645 uint32_t mask;
646 /* push the error code */
647 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
648 shift = type >> 3;
649 if (env->segs[R_SS].flags & DESC_B_MASK)
650 mask = 0xffffffff;
651 else
652 mask = 0xffff;
653 esp = (ESP - (2 << shift)) & mask;
654 ssp = env->segs[R_SS].base + esp;
655 if (shift)
656 stl_kernel(ssp, error_code);
657 else
658 stw_kernel(ssp, error_code);
659 SET_ESP(esp, mask);
660 }
661 return;
662 case 6: /* 286 interrupt gate */
663 case 7: /* 286 trap gate */
664 case 14: /* 386 interrupt gate */
665 case 15: /* 386 trap gate */
666 break;
667 default:
668 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
669 break;
670 }
671 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672 cpl = env->hflags & HF_CPL_MASK;
673 /* check privledge if software int */
674 if (is_int && dpl < cpl)
675 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676 /* check valid bit */
677 if (!(e2 & DESC_P_MASK))
678 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
679 selector = e1 >> 16;
680 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681 if ((selector & 0xfffc) == 0)
682 raise_exception_err(EXCP0D_GPF, 0);
683
684 if (load_segment(&e1, &e2, selector) != 0)
685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
689 if (dpl > cpl)
690 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691 if (!(e2 & DESC_P_MASK))
692 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694 /* to inner privilege */
695 get_ss_esp_from_tss(&ss, &esp, dpl);
696 if ((ss & 0xfffc) == 0)
697 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
698 if ((ss & 3) != dpl)
699 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
703 if (ss_dpl != dpl)
704 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705 if (!(ss_e2 & DESC_S_MASK) ||
706 (ss_e2 & DESC_CS_MASK) ||
707 !(ss_e2 & DESC_W_MASK))
708 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709 if (!(ss_e2 & DESC_P_MASK))
710 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
711 new_stack = 1;
712 sp_mask = get_sp_mask(ss_e2);
713 ssp = get_seg_base(ss_e1, ss_e2);
714 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715 /* to same privilege */
716 if (env->eflags & VM_MASK)
717 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718 new_stack = 0;
719 sp_mask = get_sp_mask(env->segs[R_SS].flags);
720 ssp = env->segs[R_SS].base;
721 esp = ESP;
722 dpl = cpl;
723 } else {
724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725 new_stack = 0; /* avoid warning */
726 sp_mask = 0; /* avoid warning */
727 ssp = 0; /* avoid warning */
728 esp = 0; /* avoid warning */
729 }
730
731 shift = type >> 3;
732
733 #if 0
734 /* XXX: check that enough room is available */
735 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736 if (env->eflags & VM_MASK)
737 push_size += 8;
738 push_size <<= shift;
739 #endif
740 if (shift == 1) {
741 if (new_stack) {
742 if (env->eflags & VM_MASK) {
743 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
747 }
748 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749 PUSHL(ssp, esp, sp_mask, ESP);
750 }
751 PUSHL(ssp, esp, sp_mask, compute_eflags());
752 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753 PUSHL(ssp, esp, sp_mask, old_eip);
754 if (has_error_code) {
755 PUSHL(ssp, esp, sp_mask, error_code);
756 }
757 } else {
758 if (new_stack) {
759 if (env->eflags & VM_MASK) {
760 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
764 }
765 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766 PUSHW(ssp, esp, sp_mask, ESP);
767 }
768 PUSHW(ssp, esp, sp_mask, compute_eflags());
769 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770 PUSHW(ssp, esp, sp_mask, old_eip);
771 if (has_error_code) {
772 PUSHW(ssp, esp, sp_mask, error_code);
773 }
774 }
775
776 if (new_stack) {
777 if (env->eflags & VM_MASK) {
778 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
782 }
783 ss = (ss & ~3) | dpl;
784 cpu_x86_load_seg_cache(env, R_SS, ss,
785 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
786 }
787 SET_ESP(esp, sp_mask);
788
789 selector = (selector & ~3) | dpl;
790 cpu_x86_load_seg_cache(env, R_CS, selector,
791 get_seg_base(e1, e2),
792 get_seg_limit(e1, e2),
793 e2);
794 cpu_x86_set_cpl(env, dpl);
795 env->eip = offset;
796
797 /* interrupt gate clear IF mask */
798 if ((type & 1) == 0) {
799 env->eflags &= ~IF_MASK;
800 }
801 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
802 }
803
804 #ifdef TARGET_X86_64
805
806 #define PUSHQ(sp, val)\
807 {\
808 sp -= 8;\
809 stq_kernel(sp, (val));\
810 }
811
812 #define POPQ(sp, val)\
813 {\
814 val = ldq_kernel(sp);\
815 sp += 8;\
816 }
817
818 static inline target_ulong get_rsp_from_tss(int level)
819 {
820 int index;
821
822 #if 0
823 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824 env->tr.base, env->tr.limit);
825 #endif
826
827 if (!(env->tr.flags & DESC_P_MASK))
828 cpu_abort(env, "invalid tss");
829 index = 8 * level + 4;
830 if ((index + 7) > env->tr.limit)
831 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832 return ldq_kernel(env->tr.base + index);
833 }
834
835 /* 64 bit interrupt */
836 static void do_interrupt64(int intno, int is_int, int error_code,
837 target_ulong next_eip, int is_hw)
838 {
839 SegmentCache *dt;
840 target_ulong ptr;
841 int type, dpl, selector, cpl, ist;
842 int has_error_code, new_stack;
843 uint32_t e1, e2, e3, ss;
844 target_ulong old_eip, esp, offset;
845 int svm_should_check = 1;
846
847 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
848 next_eip = EIP;
849 svm_should_check = 0;
850 }
851 if (svm_should_check
852 && INTERCEPTEDl(_exceptions, 1 << intno)
853 && !is_int) {
854 raise_interrupt(intno, is_int, error_code, 0);
855 }
856 has_error_code = 0;
857 if (!is_int && !is_hw) {
858 switch(intno) {
859 case 8:
860 case 10:
861 case 11:
862 case 12:
863 case 13:
864 case 14:
865 case 17:
866 has_error_code = 1;
867 break;
868 }
869 }
870 if (is_int)
871 old_eip = next_eip;
872 else
873 old_eip = env->eip;
874
875 dt = &env->idt;
876 if (intno * 16 + 15 > dt->limit)
877 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878 ptr = dt->base + intno * 16;
879 e1 = ldl_kernel(ptr);
880 e2 = ldl_kernel(ptr + 4);
881 e3 = ldl_kernel(ptr + 8);
882 /* check gate type */
883 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
884 switch(type) {
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
887 break;
888 default:
889 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
890 break;
891 }
892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893 cpl = env->hflags & HF_CPL_MASK;
894 /* check privledge if software int */
895 if (is_int && dpl < cpl)
896 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897 /* check valid bit */
898 if (!(e2 & DESC_P_MASK))
899 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
900 selector = e1 >> 16;
901 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902 ist = e2 & 7;
903 if ((selector & 0xfffc) == 0)
904 raise_exception_err(EXCP0D_GPF, 0);
905
906 if (load_segment(&e1, &e2, selector) != 0)
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
911 if (dpl > cpl)
912 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913 if (!(e2 & DESC_P_MASK))
914 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918 /* to inner privilege */
919 if (ist != 0)
920 esp = get_rsp_from_tss(ist + 3);
921 else
922 esp = get_rsp_from_tss(dpl);
923 esp &= ~0xfLL; /* align stack */
924 ss = 0;
925 new_stack = 1;
926 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927 /* to same privilege */
928 if (env->eflags & VM_MASK)
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
930 new_stack = 0;
931 if (ist != 0)
932 esp = get_rsp_from_tss(ist + 3);
933 else
934 esp = ESP;
935 esp &= ~0xfLL; /* align stack */
936 dpl = cpl;
937 } else {
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 new_stack = 0; /* avoid warning */
940 esp = 0; /* avoid warning */
941 }
942
943 PUSHQ(esp, env->segs[R_SS].selector);
944 PUSHQ(esp, ESP);
945 PUSHQ(esp, compute_eflags());
946 PUSHQ(esp, env->segs[R_CS].selector);
947 PUSHQ(esp, old_eip);
948 if (has_error_code) {
949 PUSHQ(esp, error_code);
950 }
951
952 if (new_stack) {
953 ss = 0 | dpl;
954 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
955 }
956 ESP = esp;
957
958 selector = (selector & ~3) | dpl;
959 cpu_x86_load_seg_cache(env, R_CS, selector,
960 get_seg_base(e1, e2),
961 get_seg_limit(e1, e2),
962 e2);
963 cpu_x86_set_cpl(env, dpl);
964 env->eip = offset;
965
966 /* interrupt gate clear IF mask */
967 if ((type & 1) == 0) {
968 env->eflags &= ~IF_MASK;
969 }
970 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
971 }
972 #endif
973
974 #if defined(CONFIG_USER_ONLY)
975 void helper_syscall(int next_eip_addend)
976 {
977 env->exception_index = EXCP_SYSCALL;
978 env->exception_next_eip = env->eip + next_eip_addend;
979 cpu_loop_exit();
980 }
981 #else
982 void helper_syscall(int next_eip_addend)
983 {
984 int selector;
985
986 if (!(env->efer & MSR_EFER_SCE)) {
987 raise_exception_err(EXCP06_ILLOP, 0);
988 }
989 selector = (env->star >> 32) & 0xffff;
990 #ifdef TARGET_X86_64
991 if (env->hflags & HF_LMA_MASK) {
992 int code64;
993
994 ECX = env->eip + next_eip_addend;
995 env->regs[11] = compute_eflags();
996
997 code64 = env->hflags & HF_CS64_MASK;
998
999 cpu_x86_set_cpl(env, 0);
1000 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001 0, 0xffffffff,
1002 DESC_G_MASK | DESC_P_MASK |
1003 DESC_S_MASK |
1004 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1005 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1006 0, 0xffffffff,
1007 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1008 DESC_S_MASK |
1009 DESC_W_MASK | DESC_A_MASK);
1010 env->eflags &= ~env->fmask;
1011 load_eflags(env->eflags, 0);
1012 if (code64)
1013 env->eip = env->lstar;
1014 else
1015 env->eip = env->cstar;
1016 } else
1017 #endif
1018 {
1019 ECX = (uint32_t)(env->eip + next_eip_addend);
1020
1021 cpu_x86_set_cpl(env, 0);
1022 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1023 0, 0xffffffff,
1024 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1025 DESC_S_MASK |
1026 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1027 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1028 0, 0xffffffff,
1029 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1030 DESC_S_MASK |
1031 DESC_W_MASK | DESC_A_MASK);
1032 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1033 env->eip = (uint32_t)env->star;
1034 }
1035 }
1036 #endif
1037
1038 void helper_sysret(int dflag)
1039 {
1040 int cpl, selector;
1041
1042 if (!(env->efer & MSR_EFER_SCE)) {
1043 raise_exception_err(EXCP06_ILLOP, 0);
1044 }
1045 cpl = env->hflags & HF_CPL_MASK;
1046 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1047 raise_exception_err(EXCP0D_GPF, 0);
1048 }
1049 selector = (env->star >> 48) & 0xffff;
1050 #ifdef TARGET_X86_64
1051 if (env->hflags & HF_LMA_MASK) {
1052 if (dflag == 2) {
1053 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1054 0, 0xffffffff,
1055 DESC_G_MASK | DESC_P_MASK |
1056 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1058 DESC_L_MASK);
1059 env->eip = ECX;
1060 } else {
1061 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1062 0, 0xffffffff,
1063 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1064 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1065 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1066 env->eip = (uint32_t)ECX;
1067 }
1068 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1069 0, 0xffffffff,
1070 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1072 DESC_W_MASK | DESC_A_MASK);
1073 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1074 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1075 cpu_x86_set_cpl(env, 3);
1076 } else
1077 #endif
1078 {
1079 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1080 0, 0xffffffff,
1081 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1082 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1083 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1084 env->eip = (uint32_t)ECX;
1085 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1086 0, 0xffffffff,
1087 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089 DESC_W_MASK | DESC_A_MASK);
1090 env->eflags |= IF_MASK;
1091 cpu_x86_set_cpl(env, 3);
1092 }
1093 #ifdef USE_KQEMU
1094 if (kqemu_is_ok(env)) {
1095 if (env->hflags & HF_LMA_MASK)
1096 CC_OP = CC_OP_EFLAGS;
1097 env->exception_index = -1;
1098 cpu_loop_exit();
1099 }
1100 #endif
1101 }
1102
1103 /* real mode interrupt */
1104 static void do_interrupt_real(int intno, int is_int, int error_code,
1105 unsigned int next_eip)
1106 {
1107 SegmentCache *dt;
1108 target_ulong ptr, ssp;
1109 int selector;
1110 uint32_t offset, esp;
1111 uint32_t old_cs, old_eip;
1112 int svm_should_check = 1;
1113
1114 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1115 next_eip = EIP;
1116 svm_should_check = 0;
1117 }
1118 if (svm_should_check
1119 && INTERCEPTEDl(_exceptions, 1 << intno)
1120 && !is_int) {
1121 raise_interrupt(intno, is_int, error_code, 0);
1122 }
1123 /* real mode (simpler !) */
1124 dt = &env->idt;
1125 if (intno * 4 + 3 > dt->limit)
1126 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1127 ptr = dt->base + intno * 4;
1128 offset = lduw_kernel(ptr);
1129 selector = lduw_kernel(ptr + 2);
1130 esp = ESP;
1131 ssp = env->segs[R_SS].base;
1132 if (is_int)
1133 old_eip = next_eip;
1134 else
1135 old_eip = env->eip;
1136 old_cs = env->segs[R_CS].selector;
1137 /* XXX: use SS segment size ? */
1138 PUSHW(ssp, esp, 0xffff, compute_eflags());
1139 PUSHW(ssp, esp, 0xffff, old_cs);
1140 PUSHW(ssp, esp, 0xffff, old_eip);
1141
1142 /* update processor state */
1143 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1144 env->eip = offset;
1145 env->segs[R_CS].selector = selector;
1146 env->segs[R_CS].base = (selector << 4);
1147 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1148 }
1149
1150 /* fake user mode interrupt */
1151 void do_interrupt_user(int intno, int is_int, int error_code,
1152 target_ulong next_eip)
1153 {
1154 SegmentCache *dt;
1155 target_ulong ptr;
1156 int dpl, cpl, shift;
1157 uint32_t e2;
1158
1159 dt = &env->idt;
1160 if (env->hflags & HF_LMA_MASK) {
1161 shift = 4;
1162 } else {
1163 shift = 3;
1164 }
1165 ptr = dt->base + (intno << shift);
1166 e2 = ldl_kernel(ptr + 4);
1167
1168 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1169 cpl = env->hflags & HF_CPL_MASK;
1170 /* check privledge if software int */
1171 if (is_int && dpl < cpl)
1172 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1173
1174 /* Since we emulate only user space, we cannot do more than
1175 exiting the emulation with the suitable exception and error
1176 code */
1177 if (is_int)
1178 EIP = next_eip;
1179 }
1180
1181 /*
1182 * Begin execution of an interruption. is_int is TRUE if coming from
1183 * the int instruction. next_eip is the EIP value AFTER the interrupt
1184 * instruction. It is only relevant if is_int is TRUE.
1185 */
1186 void do_interrupt(int intno, int is_int, int error_code,
1187 target_ulong next_eip, int is_hw)
1188 {
1189 if (loglevel & CPU_LOG_INT) {
1190 if ((env->cr[0] & CR0_PE_MASK)) {
1191 static int count;
1192 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1193 count, intno, error_code, is_int,
1194 env->hflags & HF_CPL_MASK,
1195 env->segs[R_CS].selector, EIP,
1196 (int)env->segs[R_CS].base + EIP,
1197 env->segs[R_SS].selector, ESP);
1198 if (intno == 0x0e) {
1199 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1200 } else {
1201 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1202 }
1203 fprintf(logfile, "\n");
1204 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1205 #if 0
1206 {
1207 int i;
1208 uint8_t *ptr;
1209 fprintf(logfile, " code=");
1210 ptr = env->segs[R_CS].base + env->eip;
1211 for(i = 0; i < 16; i++) {
1212 fprintf(logfile, " %02x", ldub(ptr + i));
1213 }
1214 fprintf(logfile, "\n");
1215 }
1216 #endif
1217 count++;
1218 }
1219 }
1220 if (env->cr[0] & CR0_PE_MASK) {
1221 #if TARGET_X86_64
1222 if (env->hflags & HF_LMA_MASK) {
1223 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1224 } else
1225 #endif
1226 {
1227 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1228 }
1229 } else {
1230 do_interrupt_real(intno, is_int, error_code, next_eip);
1231 }
1232 }
1233
1234 /*
1235 * Check nested exceptions and change to double or triple fault if
1236 * needed. It should only be called, if this is not an interrupt.
1237 * Returns the new exception number.
1238 */
1239 static int check_exception(int intno, int *error_code)
1240 {
1241 int first_contributory = env->old_exception == 0 ||
1242 (env->old_exception >= 10 &&
1243 env->old_exception <= 13);
1244 int second_contributory = intno == 0 ||
1245 (intno >= 10 && intno <= 13);
1246
1247 if (loglevel & CPU_LOG_INT)
1248 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1249 env->old_exception, intno);
1250
1251 if (env->old_exception == EXCP08_DBLE)
1252 cpu_abort(env, "triple fault");
1253
1254 if ((first_contributory && second_contributory)
1255 || (env->old_exception == EXCP0E_PAGE &&
1256 (second_contributory || (intno == EXCP0E_PAGE)))) {
1257 intno = EXCP08_DBLE;
1258 *error_code = 0;
1259 }
1260
1261 if (second_contributory || (intno == EXCP0E_PAGE) ||
1262 (intno == EXCP08_DBLE))
1263 env->old_exception = intno;
1264
1265 return intno;
1266 }
1267
1268 /*
1269 * Signal an interruption. It is executed in the main CPU loop.
1270 * is_int is TRUE if coming from the int instruction. next_eip is the
1271 * EIP value AFTER the interrupt instruction. It is only relevant if
1272 * is_int is TRUE.
1273 */
1274 void raise_interrupt(int intno, int is_int, int error_code,
1275 int next_eip_addend)
1276 {
1277 if (!is_int) {
1278 svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1279 intno = check_exception(intno, &error_code);
1280 }
1281
1282 env->exception_index = intno;
1283 env->error_code = error_code;
1284 env->exception_is_int = is_int;
1285 env->exception_next_eip = env->eip + next_eip_addend;
1286 cpu_loop_exit();
1287 }
1288
1289 /* same as raise_exception_err, but do not restore global registers */
1290 static void raise_exception_err_norestore(int exception_index, int error_code)
1291 {
1292 exception_index = check_exception(exception_index, &error_code);
1293
1294 env->exception_index = exception_index;
1295 env->error_code = error_code;
1296 env->exception_is_int = 0;
1297 env->exception_next_eip = 0;
1298 longjmp(env->jmp_env, 1);
1299 }
1300
1301 /* shortcuts to generate exceptions */
1302
1303 void (raise_exception_err)(int exception_index, int error_code)
1304 {
1305 raise_interrupt(exception_index, 0, error_code, 0);
1306 }
1307
1308 void raise_exception(int exception_index)
1309 {
1310 raise_interrupt(exception_index, 0, 0, 0);
1311 }
1312
1313 /* SMM support */
1314
1315 #if defined(CONFIG_USER_ONLY)
1316
1317 void do_smm_enter(void)
1318 {
1319 }
1320
1321 void helper_rsm(void)
1322 {
1323 }
1324
1325 #else
1326
1327 #ifdef TARGET_X86_64
1328 #define SMM_REVISION_ID 0x00020064
1329 #else
1330 #define SMM_REVISION_ID 0x00020000
1331 #endif
1332
1333 void do_smm_enter(void)
1334 {
1335 target_ulong sm_state;
1336 SegmentCache *dt;
1337 int i, offset;
1338
1339 if (loglevel & CPU_LOG_INT) {
1340 fprintf(logfile, "SMM: enter\n");
1341 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1342 }
1343
1344 env->hflags |= HF_SMM_MASK;
1345 cpu_smm_update(env);
1346
1347 sm_state = env->smbase + 0x8000;
1348
1349 #ifdef TARGET_X86_64
1350 for(i = 0; i < 6; i++) {
1351 dt = &env->segs[i];
1352 offset = 0x7e00 + i * 16;
1353 stw_phys(sm_state + offset, dt->selector);
1354 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1355 stl_phys(sm_state + offset + 4, dt->limit);
1356 stq_phys(sm_state + offset + 8, dt->base);
1357 }
1358
1359 stq_phys(sm_state + 0x7e68, env->gdt.base);
1360 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1361
1362 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1363 stq_phys(sm_state + 0x7e78, env->ldt.base);
1364 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1365 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1366
1367 stq_phys(sm_state + 0x7e88, env->idt.base);
1368 stl_phys(sm_state + 0x7e84, env->idt.limit);
1369
1370 stw_phys(sm_state + 0x7e90, env->tr.selector);
1371 stq_phys(sm_state + 0x7e98, env->tr.base);
1372 stl_phys(sm_state + 0x7e94, env->tr.limit);
1373 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1374
1375 stq_phys(sm_state + 0x7ed0, env->efer);
1376
1377 stq_phys(sm_state + 0x7ff8, EAX);
1378 stq_phys(sm_state + 0x7ff0, ECX);
1379 stq_phys(sm_state + 0x7fe8, EDX);
1380 stq_phys(sm_state + 0x7fe0, EBX);
1381 stq_phys(sm_state + 0x7fd8, ESP);
1382 stq_phys(sm_state + 0x7fd0, EBP);
1383 stq_phys(sm_state + 0x7fc8, ESI);
1384 stq_phys(sm_state + 0x7fc0, EDI);
1385 for(i = 8; i < 16; i++)
1386 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1387 stq_phys(sm_state + 0x7f78, env->eip);
1388 stl_phys(sm_state + 0x7f70, compute_eflags());
1389 stl_phys(sm_state + 0x7f68, env->dr[6]);
1390 stl_phys(sm_state + 0x7f60, env->dr[7]);
1391
1392 stl_phys(sm_state + 0x7f48, env->cr[4]);
1393 stl_phys(sm_state + 0x7f50, env->cr[3]);
1394 stl_phys(sm_state + 0x7f58, env->cr[0]);
1395
1396 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1397 stl_phys(sm_state + 0x7f00, env->smbase);
1398 #else
1399 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1400 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1401 stl_phys(sm_state + 0x7ff4, compute_eflags());
1402 stl_phys(sm_state + 0x7ff0, env->eip);
1403 stl_phys(sm_state + 0x7fec, EDI);
1404 stl_phys(sm_state + 0x7fe8, ESI);
1405 stl_phys(sm_state + 0x7fe4, EBP);
1406 stl_phys(sm_state + 0x7fe0, ESP);
1407 stl_phys(sm_state + 0x7fdc, EBX);
1408 stl_phys(sm_state + 0x7fd8, EDX);
1409 stl_phys(sm_state + 0x7fd4, ECX);
1410 stl_phys(sm_state + 0x7fd0, EAX);
1411 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1412 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1413
1414 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1415 stl_phys(sm_state + 0x7f64, env->tr.base);
1416 stl_phys(sm_state + 0x7f60, env->tr.limit);
1417 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1418
1419 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1420 stl_phys(sm_state + 0x7f80, env->ldt.base);
1421 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1422 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1423
1424 stl_phys(sm_state + 0x7f74, env->gdt.base);
1425 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1426
1427 stl_phys(sm_state + 0x7f58, env->idt.base);
1428 stl_phys(sm_state + 0x7f54, env->idt.limit);
1429
1430 for(i = 0; i < 6; i++) {
1431 dt = &env->segs[i];
1432 if (i < 3)
1433 offset = 0x7f84 + i * 12;
1434 else
1435 offset = 0x7f2c + (i - 3) * 12;
1436 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1437 stl_phys(sm_state + offset + 8, dt->base);
1438 stl_phys(sm_state + offset + 4, dt->limit);
1439 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1440 }
1441 stl_phys(sm_state + 0x7f14, env->cr[4]);
1442
1443 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1444 stl_phys(sm_state + 0x7ef8, env->smbase);
1445 #endif
1446 /* init SMM cpu state */
1447
1448 #ifdef TARGET_X86_64
1449 env->efer = 0;
1450 env->hflags &= ~HF_LMA_MASK;
1451 #endif
1452 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1453 env->eip = 0x00008000;
1454 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1455 0xffffffff, 0);
1456 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1457 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1458 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1459 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1460 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1461
1462 cpu_x86_update_cr0(env,
1463 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1464 cpu_x86_update_cr4(env, 0);
1465 env->dr[7] = 0x00000400;
1466 CC_OP = CC_OP_EFLAGS;
1467 }
1468
1469 void helper_rsm(void)
1470 {
1471 target_ulong sm_state;
1472 int i, offset;
1473 uint32_t val;
1474
1475 sm_state = env->smbase + 0x8000;
1476 #ifdef TARGET_X86_64
1477 env->efer = ldq_phys(sm_state + 0x7ed0);
1478 if (env->efer & MSR_EFER_LMA)
1479 env->hflags |= HF_LMA_MASK;
1480 else
1481 env->hflags &= ~HF_LMA_MASK;
1482
1483 for(i = 0; i < 6; i++) {
1484 offset = 0x7e00 + i * 16;
1485 cpu_x86_load_seg_cache(env, i,
1486 lduw_phys(sm_state + offset),
1487 ldq_phys(sm_state + offset + 8),
1488 ldl_phys(sm_state + offset + 4),
1489 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1490 }
1491
1492 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1493 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1494
1495 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1496 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1497 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1498 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1499
1500 env->idt.base = ldq_phys(sm_state + 0x7e88);
1501 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1502
1503 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1504 env->tr.base = ldq_phys(sm_state + 0x7e98);
1505 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1506 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1507
1508 EAX = ldq_phys(sm_state + 0x7ff8);
1509 ECX = ldq_phys(sm_state + 0x7ff0);
1510 EDX = ldq_phys(sm_state + 0x7fe8);
1511 EBX = ldq_phys(sm_state + 0x7fe0);
1512 ESP = ldq_phys(sm_state + 0x7fd8);
1513 EBP = ldq_phys(sm_state + 0x7fd0);
1514 ESI = ldq_phys(sm_state + 0x7fc8);
1515 EDI = ldq_phys(sm_state + 0x7fc0);
1516 for(i = 8; i < 16; i++)
1517 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1518 env->eip = ldq_phys(sm_state + 0x7f78);
1519 load_eflags(ldl_phys(sm_state + 0x7f70),
1520 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1522 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1523
1524 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1525 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1526 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1527
1528 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1529 if (val & 0x20000) {
1530 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1531 }
1532 #else
1533 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1534 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1535 load_eflags(ldl_phys(sm_state + 0x7ff4),
1536 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1537 env->eip = ldl_phys(sm_state + 0x7ff0);
1538 EDI = ldl_phys(sm_state + 0x7fec);
1539 ESI = ldl_phys(sm_state + 0x7fe8);
1540 EBP = ldl_phys(sm_state + 0x7fe4);
1541 ESP = ldl_phys(sm_state + 0x7fe0);
1542 EBX = ldl_phys(sm_state + 0x7fdc);
1543 EDX = ldl_phys(sm_state + 0x7fd8);
1544 ECX = ldl_phys(sm_state + 0x7fd4);
1545 EAX = ldl_phys(sm_state + 0x7fd0);
1546 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1547 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1548
1549 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1550 env->tr.base = ldl_phys(sm_state + 0x7f64);
1551 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1552 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1553
1554 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1555 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1556 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1557 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1558
1559 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1560 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1561
1562 env->idt.base = ldl_phys(sm_state + 0x7f58);
1563 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1564
1565 for(i = 0; i < 6; i++) {
1566 if (i < 3)
1567 offset = 0x7f84 + i * 12;
1568 else
1569 offset = 0x7f2c + (i - 3) * 12;
1570 cpu_x86_load_seg_cache(env, i,
1571 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1572 ldl_phys(sm_state + offset + 8),
1573 ldl_phys(sm_state + offset + 4),
1574 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1575 }
1576 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1577
1578 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1579 if (val & 0x20000) {
1580 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1581 }
1582 #endif
1583 CC_OP = CC_OP_EFLAGS;
1584 env->hflags &= ~HF_SMM_MASK;
1585 cpu_smm_update(env);
1586
1587 if (loglevel & CPU_LOG_INT) {
1588 fprintf(logfile, "SMM: after RSM\n");
1589 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1590 }
1591 }
1592
1593 #endif /* !CONFIG_USER_ONLY */
1594
1595
1596 #ifdef BUGGY_GCC_DIV64
1597 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1598 call it from another function */
1599 uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1600 {
1601 *q_ptr = num / den;
1602 return num % den;
1603 }
1604
1605 int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1606 {
1607 *q_ptr = num / den;
1608 return num % den;
1609 }
1610 #endif
1611
1612 /* division, flags are undefined */
1613
1614 void helper_divb_AL(target_ulong t0)
1615 {
1616 unsigned int num, den, q, r;
1617
1618 num = (EAX & 0xffff);
1619 den = (t0 & 0xff);
1620 if (den == 0) {
1621 raise_exception(EXCP00_DIVZ);
1622 }
1623 q = (num / den);
1624 if (q > 0xff)
1625 raise_exception(EXCP00_DIVZ);
1626 q &= 0xff;
1627 r = (num % den) & 0xff;
1628 EAX = (EAX & ~0xffff) | (r << 8) | q;
1629 }
1630
1631 void helper_idivb_AL(target_ulong t0)
1632 {
1633 int num, den, q, r;
1634
1635 num = (int16_t)EAX;
1636 den = (int8_t)t0;
1637 if (den == 0) {
1638 raise_exception(EXCP00_DIVZ);
1639 }
1640 q = (num / den);
1641 if (q != (int8_t)q)
1642 raise_exception(EXCP00_DIVZ);
1643 q &= 0xff;
1644 r = (num % den) & 0xff;
1645 EAX = (EAX & ~0xffff) | (r << 8) | q;
1646 }
1647
1648 void helper_divw_AX(target_ulong t0)
1649 {
1650 unsigned int num, den, q, r;
1651
1652 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1653 den = (t0 & 0xffff);
1654 if (den == 0) {
1655 raise_exception(EXCP00_DIVZ);
1656 }
1657 q = (num / den);
1658 if (q > 0xffff)
1659 raise_exception(EXCP00_DIVZ);
1660 q &= 0xffff;
1661 r = (num % den) & 0xffff;
1662 EAX = (EAX & ~0xffff) | q;
1663 EDX = (EDX & ~0xffff) | r;
1664 }
1665
1666 void helper_idivw_AX(target_ulong t0)
1667 {
1668 int num, den, q, r;
1669
1670 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1671 den = (int16_t)t0;
1672 if (den == 0) {
1673 raise_exception(EXCP00_DIVZ);
1674 }
1675 q = (num / den);
1676 if (q != (int16_t)q)
1677 raise_exception(EXCP00_DIVZ);
1678 q &= 0xffff;
1679 r = (num % den) & 0xffff;
1680 EAX = (EAX & ~0xffff) | q;
1681 EDX = (EDX & ~0xffff) | r;
1682 }
1683
1684 void helper_divl_EAX(target_ulong t0)
1685 {
1686 unsigned int den, r;
1687 uint64_t num, q;
1688
1689 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1690 den = t0;
1691 if (den == 0) {
1692 raise_exception(EXCP00_DIVZ);
1693 }
1694 #ifdef BUGGY_GCC_DIV64
1695 r = div32(&q, num, den);
1696 #else
1697 q = (num / den);
1698 r = (num % den);
1699 #endif
1700 if (q > 0xffffffff)
1701 raise_exception(EXCP00_DIVZ);
1702 EAX = (uint32_t)q;
1703 EDX = (uint32_t)r;
1704 }
1705
1706 void helper_idivl_EAX(target_ulong t0)
1707 {
1708 int den, r;
1709 int64_t num, q;
1710
1711 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1712 den = t0;
1713 if (den == 0) {
1714 raise_exception(EXCP00_DIVZ);
1715 }
1716 #ifdef BUGGY_GCC_DIV64
1717 r = idiv32(&q, num, den);
1718 #else
1719 q = (num / den);
1720 r = (num % den);
1721 #endif
1722 if (q != (int32_t)q)
1723 raise_exception(EXCP00_DIVZ);
1724 EAX = (uint32_t)q;
1725 EDX = (uint32_t)r;
1726 }
1727
1728 /* bcd */
1729
1730 /* XXX: exception */
1731 void helper_aam(int base)
1732 {
1733 int al, ah;
1734 al = EAX & 0xff;
1735 ah = al / base;
1736 al = al % base;
1737 EAX = (EAX & ~0xffff) | al | (ah << 8);
1738 CC_DST = al;
1739 }
1740
1741 void helper_aad(int base)
1742 {
1743 int al, ah;
1744 al = EAX & 0xff;
1745 ah = (EAX >> 8) & 0xff;
1746 al = ((ah * base) + al) & 0xff;
1747 EAX = (EAX & ~0xffff) | al;
1748 CC_DST = al;
1749 }
1750
1751 void helper_aaa(void)
1752 {
1753 int icarry;
1754 int al, ah, af;
1755 int eflags;
1756
1757 eflags = cc_table[CC_OP].compute_all();
1758 af = eflags & CC_A;
1759 al = EAX & 0xff;
1760 ah = (EAX >> 8) & 0xff;
1761
1762 icarry = (al > 0xf9);
1763 if (((al & 0x0f) > 9 ) || af) {
1764 al = (al + 6) & 0x0f;
1765 ah = (ah + 1 + icarry) & 0xff;
1766 eflags |= CC_C | CC_A;
1767 } else {
1768 eflags &= ~(CC_C | CC_A);
1769 al &= 0x0f;
1770 }
1771 EAX = (EAX & ~0xffff) | al | (ah << 8);
1772 CC_SRC = eflags;
1773 FORCE_RET();
1774 }
1775
1776 void helper_aas(void)
1777 {
1778 int icarry;
1779 int al, ah, af;
1780 int eflags;
1781
1782 eflags = cc_table[CC_OP].compute_all();
1783 af = eflags & CC_A;
1784 al = EAX & 0xff;
1785 ah = (EAX >> 8) & 0xff;
1786
1787 icarry = (al < 6);
1788 if (((al & 0x0f) > 9 ) || af) {
1789 al = (al - 6) & 0x0f;
1790 ah = (ah - 1 - icarry) & 0xff;
1791 eflags |= CC_C | CC_A;
1792 } else {
1793 eflags &= ~(CC_C | CC_A);
1794 al &= 0x0f;
1795 }
1796 EAX = (EAX & ~0xffff) | al | (ah << 8);
1797 CC_SRC = eflags;
1798 FORCE_RET();
1799 }
1800
1801 void helper_daa(void)
1802 {
1803 int al, af, cf;
1804 int eflags;
1805
1806 eflags = cc_table[CC_OP].compute_all();
1807 cf = eflags & CC_C;
1808 af = eflags & CC_A;
1809 al = EAX & 0xff;
1810
1811 eflags = 0;
1812 if (((al & 0x0f) > 9 ) || af) {
1813 al = (al + 6) & 0xff;
1814 eflags |= CC_A;
1815 }
1816 if ((al > 0x9f) || cf) {
1817 al = (al + 0x60) & 0xff;
1818 eflags |= CC_C;
1819 }
1820 EAX = (EAX & ~0xff) | al;
1821 /* well, speed is not an issue here, so we compute the flags by hand */
1822 eflags |= (al == 0) << 6; /* zf */
1823 eflags |= parity_table[al]; /* pf */
1824 eflags |= (al & 0x80); /* sf */
1825 CC_SRC = eflags;
1826 FORCE_RET();
1827 }
1828
1829 void helper_das(void)
1830 {
1831 int al, al1, af, cf;
1832 int eflags;
1833
1834 eflags = cc_table[CC_OP].compute_all();
1835 cf = eflags & CC_C;
1836 af = eflags & CC_A;
1837 al = EAX & 0xff;
1838
1839 eflags = 0;
1840 al1 = al;
1841 if (((al & 0x0f) > 9 ) || af) {
1842 eflags |= CC_A;
1843 if (al < 6 || cf)
1844 eflags |= CC_C;
1845 al = (al - 6) & 0xff;
1846 }
1847 if ((al1 > 0x99) || cf) {
1848 al = (al - 0x60) & 0xff;
1849 eflags |= CC_C;
1850 }
1851 EAX = (EAX & ~0xff) | al;
1852 /* well, speed is not an issue here, so we compute the flags by hand */
1853 eflags |= (al == 0) << 6; /* zf */
1854 eflags |= parity_table[al]; /* pf */
1855 eflags |= (al & 0x80); /* sf */
1856 CC_SRC = eflags;
1857 FORCE_RET();
1858 }
1859
1860 void helper_cmpxchg8b(void)
1861 {
1862 uint64_t d;
1863 int eflags;
1864
1865 eflags = cc_table[CC_OP].compute_all();
1866 d = ldq(A0);
1867 if (d == (((uint64_t)EDX << 32) | EAX)) {
1868 stq(A0, ((uint64_t)ECX << 32) | EBX);
1869 eflags |= CC_Z;
1870 } else {
1871 EDX = d >> 32;
1872 EAX = d;
1873 eflags &= ~CC_Z;
1874 }
1875 CC_SRC = eflags;
1876 }
1877
1878 void helper_single_step(void)
1879 {
1880 env->dr[6] |= 0x4000;
1881 raise_exception(EXCP01_SSTP);
1882 }
1883
1884 void helper_cpuid(void)
1885 {
1886 uint32_t index;
1887 index = (uint32_t)EAX;
1888
1889 /* test if maximum index reached */
1890 if (index & 0x80000000) {
1891 if (index > env->cpuid_xlevel)
1892 index = env->cpuid_level;
1893 } else {
1894 if (index > env->cpuid_level)
1895 index = env->cpuid_level;
1896 }
1897
1898 switch(index) {
1899 case 0:
1900 EAX = env->cpuid_level;
1901 EBX = env->cpuid_vendor1;
1902 EDX = env->cpuid_vendor2;
1903 ECX = env->cpuid_vendor3;
1904 break;
1905 case 1:
1906 EAX = env->cpuid_version;
1907 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1908 ECX = env->cpuid_ext_features;
1909 EDX = env->cpuid_features;
1910 break;
1911 case 2:
1912 /* cache info: needed for Pentium Pro compatibility */
1913 EAX = 1;
1914 EBX = 0;
1915 ECX = 0;
1916 EDX = 0x2c307d;
1917 break;
1918 case 0x80000000:
1919 EAX = env->cpuid_xlevel;
1920 EBX = env->cpuid_vendor1;
1921 EDX = env->cpuid_vendor2;
1922 ECX = env->cpuid_vendor3;
1923 break;
1924 case 0x80000001:
1925 EAX = env->cpuid_features;
1926 EBX = 0;
1927 ECX = env->cpuid_ext3_features;
1928 EDX = env->cpuid_ext2_features;
1929 break;
1930 case 0x80000002:
1931 case 0x80000003:
1932 case 0x80000004:
1933 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1934 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1935 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1936 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1937 break;
1938 case 0x80000005:
1939 /* cache info (L1 cache) */
1940 EAX = 0x01ff01ff;
1941 EBX = 0x01ff01ff;
1942 ECX = 0x40020140;
1943 EDX = 0x40020140;
1944 break;
1945 case 0x80000006:
1946 /* cache info (L2 cache) */
1947 EAX = 0;
1948 EBX = 0x42004200;
1949 ECX = 0x02008140;
1950 EDX = 0;
1951 break;
1952 case 0x80000008:
1953 /* virtual & phys address size in low 2 bytes. */
1954 /* XXX: This value must match the one used in the MMU code. */
1955 #if defined(TARGET_X86_64)
1956 # if defined(USE_KQEMU)
1957 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1958 # else
1959 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1960 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1961 # endif
1962 #else
1963 # if defined(USE_KQEMU)
1964 EAX = 0x00000020; /* 32 bits physical */
1965 # else
1966 EAX = 0x00000024; /* 36 bits physical */
1967 # endif
1968 #endif
1969 EBX = 0;
1970 ECX = 0;
1971 EDX = 0;
1972 break;
1973 case 0x8000000A:
1974 EAX = 0x00000001;
1975 EBX = 0;
1976 ECX = 0;
1977 EDX = 0;
1978 break;
1979 default:
1980 /* reserved values: zero */
1981 EAX = 0;
1982 EBX = 0;
1983 ECX = 0;
1984 EDX = 0;
1985 break;
1986 }
1987 }
1988
1989 void helper_enter_level(int level, int data32)
1990 {
1991 target_ulong ssp;
1992 uint32_t esp_mask, esp, ebp;
1993
1994 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1995 ssp = env->segs[R_SS].base;
1996 ebp = EBP;
1997 esp = ESP;
1998 if (data32) {
1999 /* 32 bit */
2000 esp -= 4;
2001 while (--level) {
2002 esp -= 4;
2003 ebp -= 4;
2004 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2005 }
2006 esp -= 4;
2007 stl(ssp + (esp & esp_mask), T1);
2008 } else {
2009 /* 16 bit */
2010 esp -= 2;
2011 while (--level) {
2012 esp -= 2;
2013 ebp -= 2;
2014 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2015 }
2016 esp -= 2;
2017 stw(ssp + (esp & esp_mask), T1);
2018 }
2019 }
2020
2021 #ifdef TARGET_X86_64
2022 void helper_enter64_level(int level, int data64)
2023 {
2024 target_ulong esp, ebp;
2025 ebp = EBP;
2026 esp = ESP;
2027
2028 if (data64) {
2029 /* 64 bit */
2030 esp -= 8;
2031 while (--level) {
2032 esp -= 8;
2033 ebp -= 8;
2034 stq(esp, ldq(ebp));
2035 }
2036 esp -= 8;
2037 stq(esp, T1);
2038 } else {
2039 /* 16 bit */
2040 esp -= 2;
2041 while (--level) {
2042 esp -= 2;
2043 ebp -= 2;
2044 stw(esp, lduw(ebp));
2045 }
2046 esp -= 2;
2047 stw(esp, T1);
2048 }
2049 }
2050 #endif
2051
2052 void helper_lldt(int selector)
2053 {
2054 SegmentCache *dt;
2055 uint32_t e1, e2;
2056 int index, entry_limit;
2057 target_ulong ptr;
2058
2059 selector &= 0xffff;
2060 if ((selector & 0xfffc) == 0) {
2061 /* XXX: NULL selector case: invalid LDT */
2062 env->ldt.base = 0;
2063 env->ldt.limit = 0;
2064 } else {
2065 if (selector & 0x4)
2066 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2067 dt = &env->gdt;
2068 index = selector & ~7;
2069 #ifdef TARGET_X86_64
2070 if (env->hflags & HF_LMA_MASK)
2071 entry_limit = 15;
2072 else
2073 #endif
2074 entry_limit = 7;
2075 if ((index + entry_limit) > dt->limit)
2076 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2077 ptr = dt->base + index;
2078 e1 = ldl_kernel(ptr);
2079 e2 = ldl_kernel(ptr + 4);
2080 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2081 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2082 if (!(e2 & DESC_P_MASK))
2083 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2084 #ifdef TARGET_X86_64
2085 if (env->hflags & HF_LMA_MASK) {
2086 uint32_t e3;
2087 e3 = ldl_kernel(ptr + 8);
2088 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2089 env->ldt.base |= (target_ulong)e3 << 32;
2090 } else
2091 #endif
2092 {
2093 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2094 }
2095 }
2096 env->ldt.selector = selector;
2097 }
2098
2099 void helper_ltr(int selector)
2100 {
2101 SegmentCache *dt;
2102 uint32_t e1, e2;
2103 int index, type, entry_limit;
2104 target_ulong ptr;
2105
2106 selector &= 0xffff;
2107 if ((selector & 0xfffc) == 0) {
2108 /* NULL selector case: invalid TR */
2109 env->tr.base = 0;
2110 env->tr.limit = 0;
2111 env->tr.flags = 0;
2112 } else {
2113 if (selector & 0x4)
2114 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2115 dt = &env->gdt;
2116 index = selector & ~7;
2117 #ifdef TARGET_X86_64
2118 if (env->hflags & HF_LMA_MASK)
2119 entry_limit = 15;
2120 else
2121 #endif
2122 entry_limit = 7;
2123 if ((index + entry_limit) > dt->limit)
2124 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2125 ptr = dt->base + index;
2126 e1 = ldl_kernel(ptr);
2127 e2 = ldl_kernel(ptr + 4);
2128 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2129 if ((e2 & DESC_S_MASK) ||
2130 (type != 1 && type != 9))
2131 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2132 if (!(e2 & DESC_P_MASK))
2133 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2134 #ifdef TARGET_X86_64
2135 if (env->hflags & HF_LMA_MASK) {
2136 uint32_t e3, e4;
2137 e3 = ldl_kernel(ptr + 8);
2138 e4 = ldl_kernel(ptr + 12);
2139 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2140 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2141 load_seg_cache_raw_dt(&env->tr, e1, e2);
2142 env->tr.base |= (target_ulong)e3 << 32;
2143 } else
2144 #endif
2145 {
2146 load_seg_cache_raw_dt(&env->tr, e1, e2);
2147 }
2148 e2 |= DESC_TSS_BUSY_MASK;
2149 stl_kernel(ptr + 4, e2);
2150 }
2151 env->tr.selector = selector;
2152 }
2153
2154 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2155 void helper_load_seg(int seg_reg, int selector)
2156 {
2157 uint32_t e1, e2;
2158 int cpl, dpl, rpl;
2159 SegmentCache *dt;
2160 int index;
2161 target_ulong ptr;
2162
2163 selector &= 0xffff;
2164 cpl = env->hflags & HF_CPL_MASK;
2165 if ((selector & 0xfffc) == 0) {
2166 /* null selector case */
2167 if (seg_reg == R_SS
2168 #ifdef TARGET_X86_64
2169 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2170 #endif
2171 )
2172 raise_exception_err(EXCP0D_GPF, 0);
2173 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2174 } else {
2175
2176 if (selector & 0x4)
2177 dt = &env->ldt;
2178 else
2179 dt = &env->gdt;
2180 index = selector & ~7;
2181 if ((index + 7) > dt->limit)
2182 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2183 ptr = dt->base + index;
2184 e1 = ldl_kernel(ptr);
2185 e2 = ldl_kernel(ptr + 4);
2186
2187 if (!(e2 & DESC_S_MASK))
2188 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2189 rpl = selector & 3;
2190 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2191 if (seg_reg == R_SS) {
2192 /* must be writable segment */
2193 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2194 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2195 if (rpl != cpl || dpl != cpl)
2196 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2197 } else {
2198 /* must be readable segment */
2199 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2200 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2201
2202 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2203 /* if not conforming code, test rights */
2204 if (dpl < cpl || dpl < rpl)
2205 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2206 }
2207 }
2208
2209 if (!(e2 & DESC_P_MASK)) {
2210 if (seg_reg == R_SS)
2211 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2212 else
2213 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2214 }
2215
2216 /* set the access bit if not already set */
2217 if (!(e2 & DESC_A_MASK)) {
2218 e2 |= DESC_A_MASK;
2219 stl_kernel(ptr + 4, e2);
2220 }
2221
2222 cpu_x86_load_seg_cache(env, seg_reg, selector,
2223 get_seg_base(e1, e2),
2224 get_seg_limit(e1, e2),
2225 e2);
2226 #if 0
2227 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2228 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2229 #endif
2230 }
2231 }
2232
2233 /* protected mode jump */
2234 void helper_ljmp_protected_T0_T1(int next_eip_addend)
2235 {
2236 int new_cs, gate_cs, type;
2237 uint32_t e1, e2, cpl, dpl, rpl, limit;
2238 target_ulong new_eip, next_eip;
2239
2240 new_cs = T0;
2241 new_eip = T1;
2242 if ((new_cs & 0xfffc) == 0)
2243 raise_exception_err(EXCP0D_GPF, 0);
2244 if (load_segment(&e1, &e2, new_cs) != 0)
2245 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246 cpl = env->hflags & HF_CPL_MASK;
2247 if (e2 & DESC_S_MASK) {
2248 if (!(e2 & DESC_CS_MASK))
2249 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2250 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2251 if (e2 & DESC_C_MASK) {
2252 /* conforming code segment */
2253 if (dpl > cpl)
2254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2255 } else {
2256 /* non conforming code segment */
2257 rpl = new_cs & 3;
2258 if (rpl > cpl)
2259 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2260 if (dpl != cpl)
2261 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2262 }
2263 if (!(e2 & DESC_P_MASK))
2264 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2265 limit = get_seg_limit(e1, e2);
2266 if (new_eip > limit &&
2267 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2268 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2269 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2270 get_seg_base(e1, e2), limit, e2);
2271 EIP = new_eip;
2272 } else {
2273 /* jump to call or task gate */
2274 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2275 rpl = new_cs & 3;
2276 cpl = env->hflags & HF_CPL_MASK;
2277 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2278 switch(type) {
2279 case 1: /* 286 TSS */
2280 case 9: /* 386 TSS */
2281 case 5: /* task gate */
2282 if (dpl < cpl || dpl < rpl)
2283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2284 next_eip = env->eip + next_eip_addend;
2285 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2286 CC_OP = CC_OP_EFLAGS;
2287 break;
2288 case 4: /* 286 call gate */
2289 case 12: /* 386 call gate */
2290 if ((dpl < cpl) || (dpl < rpl))
2291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2292 if (!(e2 & DESC_P_MASK))
2293 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2294 gate_cs = e1 >> 16;
2295 new_eip = (e1 & 0xffff);
2296 if (type == 12)
2297 new_eip |= (e2 & 0xffff0000);
2298 if (load_segment(&e1, &e2, gate_cs) != 0)
2299 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2300 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2301 /* must be code segment */
2302 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2303 (DESC_S_MASK | DESC_CS_MASK)))
2304 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2305 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2306 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2307 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2308 if (!(e2 & DESC_P_MASK))
2309 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2310 limit = get_seg_limit(e1, e2);
2311 if (new_eip > limit)
2312 raise_exception_err(EXCP0D_GPF, 0);
2313 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2314 get_seg_base(e1, e2), limit, e2);
2315 EIP = new_eip;
2316 break;
2317 default:
2318 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2319 break;
2320 }
2321 }
2322 }
2323
2324 /* real mode call */
2325 void helper_lcall_real_T0_T1(int shift, int next_eip)
2326 {
2327 int new_cs, new_eip;
2328 uint32_t esp, esp_mask;
2329 target_ulong ssp;
2330
2331 new_cs = T0;
2332 new_eip = T1;
2333 esp = ESP;
2334 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2335 ssp = env->segs[R_SS].base;
2336 if (shift) {
2337 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2338 PUSHL(ssp, esp, esp_mask, next_eip);
2339 } else {
2340 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2341 PUSHW(ssp, esp, esp_mask, next_eip);
2342 }
2343
2344 SET_ESP(esp, esp_mask);
2345 env->eip = new_eip;
2346 env->segs[R_CS].selector = new_cs;
2347 env->segs[R_CS].base = (new_cs << 4);
2348 }
2349
2350 /* protected mode call */
2351 void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2352 {
2353 int new_cs, new_stack, i;
2354 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2355 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2356 uint32_t val, limit, old_sp_mask;
2357 target_ulong ssp, old_ssp, next_eip, new_eip;
2358
2359 new_cs = T0;
2360 new_eip = T1;
2361 next_eip = env->eip + next_eip_addend;
2362 #ifdef DEBUG_PCALL
2363 if (loglevel & CPU_LOG_PCALL) {
2364 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2365 new_cs, (uint32_t)new_eip, shift);
2366 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2367 }
2368 #endif
2369 if ((new_cs & 0xfffc) == 0)
2370 raise_exception_err(EXCP0D_GPF, 0);
2371 if (load_segment(&e1, &e2, new_cs) != 0)
2372 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2373 cpl = env->hflags & HF_CPL_MASK;
2374 #ifdef DEBUG_PCALL
2375 if (loglevel & CPU_LOG_PCALL) {
2376 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2377 }
2378 #endif
2379 if (e2 & DESC_S_MASK) {
2380 if (!(e2 & DESC_CS_MASK))
2381 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2382 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2383 if (e2 & DESC_C_MASK) {
2384 /* conforming code segment */
2385 if (dpl > cpl)
2386 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2387 } else {
2388 /* non conforming code segment */
2389 rpl = new_cs & 3;
2390 if (rpl > cpl)
2391 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2392 if (dpl != cpl)
2393 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2394 }
2395 if (!(e2 & DESC_P_MASK))
2396 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2397
2398 #ifdef TARGET_X86_64
2399 /* XXX: check 16/32 bit cases in long mode */
2400 if (shift == 2) {
2401 target_ulong rsp;
2402 /* 64 bit case */
2403 rsp = ESP;
2404 PUSHQ(rsp, env->segs[R_CS].selector);
2405 PUSHQ(rsp, next_eip);
2406 /* from this point, not restartable */
2407 ESP = rsp;
2408 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2409 get_seg_base(e1, e2),
2410 get_seg_limit(e1, e2), e2);
2411 EIP = new_eip;
2412 } else
2413 #endif
2414 {
2415 sp = ESP;
2416 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2417 ssp = env->segs[R_SS].base;
2418 if (shift) {
2419 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2420 PUSHL(ssp, sp, sp_mask, next_eip);
2421 } else {
2422 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2423 PUSHW(ssp, sp, sp_mask, next_eip);
2424 }
2425
2426 limit = get_seg_limit(e1, e2);
2427 if (new_eip > limit)
2428 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2429 /* from this point, not restartable */
2430 SET_ESP(sp, sp_mask);
2431 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2432 get_seg_base(e1, e2), limit, e2);
2433 EIP = new_eip;
2434 }
2435 } else {
2436 /* check gate type */
2437 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2438 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2439 rpl = new_cs & 3;
2440 switch(type) {
2441 case 1: /* available 286 TSS */
2442 case 9: /* available 386 TSS */
2443 case 5: /* task gate */
2444 if (dpl < cpl || dpl < rpl)
2445 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2446 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2447 CC_OP = CC_OP_EFLAGS;
2448 return;
2449 case 4: /* 286 call gate */
2450 case 12: /* 386 call gate */
2451 break;
2452 default:
2453 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2454 break;
2455 }
2456 shift = type >> 3;
2457
2458 if (dpl < cpl || dpl < rpl)
2459 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2460 /* check valid bit */
2461 if (!(e2 & DESC_P_MASK))
2462 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2463 selector = e1 >> 16;
2464 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2465 param_count = e2 & 0x1f;
2466 if ((selector & 0xfffc) == 0)
2467 raise_exception_err(EXCP0D_GPF, 0);
2468
2469 if (load_segment(&e1, &e2, selector) != 0)
2470 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2471 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2472 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2473 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2474 if (dpl > cpl)
2475 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2476 if (!(e2 & DESC_P_MASK))
2477 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2478
2479 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2480 /* to inner privilege */
2481 get_ss_esp_from_tss(&ss, &sp, dpl);
2482 #ifdef DEBUG_PCALL
2483 if (loglevel & CPU_LOG_PCALL)
2484 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2485 ss, sp, param_count, ESP);
2486 #endif
2487 if ((ss & 0xfffc) == 0)
2488 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2489 if ((ss & 3) != dpl)
2490 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2491 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2492 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2493 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2494 if (ss_dpl != dpl)
2495 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2496 if (!(ss_e2 & DESC_S_MASK) ||
2497 (ss_e2 & DESC_CS_MASK) ||
2498 !(ss_e2 & DESC_W_MASK))
2499 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2500 if (!(ss_e2 & DESC_P_MASK))
2501 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2502
2503 // push_size = ((param_count * 2) + 8) << shift;
2504
2505 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2506 old_ssp = env->segs[R_SS].base;
2507
2508 sp_mask = get_sp_mask(ss_e2);
2509 ssp = get_seg_base(ss_e1, ss_e2);
2510 if (shift) {
2511 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2512 PUSHL(ssp, sp, sp_mask, ESP);
2513 for(i = param_count - 1; i >= 0; i--) {
2514 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2515 PUSHL(ssp, sp, sp_mask, val);
2516 }
2517 } else {
2518 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2519 PUSHW(ssp, sp, sp_mask, ESP);
2520 for(i = param_count - 1; i >= 0; i--) {
2521 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2522 PUSHW(ssp, sp, sp_mask, val);
2523 }
2524 }
2525 new_stack = 1;
2526 } else {
2527 /* to same privilege */
2528 sp = ESP;
2529 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2530 ssp = env->segs[R_SS].base;
2531 // push_size = (4 << shift);
2532 new_stack = 0;
2533 }
2534
2535 if (shift) {
2536 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2537 PUSHL(ssp, sp, sp_mask, next_eip);
2538 } else {
2539 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2540 PUSHW(ssp, sp, sp_mask, next_eip);
2541 }
2542
2543 /* from this point, not restartable */
2544
2545 if (new_stack) {
2546 ss = (ss & ~3) | dpl;
2547 cpu_x86_load_seg_cache(env, R_SS, ss,
2548 ssp,
2549 get_seg_limit(ss_e1, ss_e2),
2550 ss_e2);
2551 }
2552
2553 selector = (selector & ~3) | dpl;
2554 cpu_x86_load_seg_cache(env, R_CS, selector,
2555 get_seg_base(e1, e2),
2556 get_seg_limit(e1, e2),
2557 e2);
2558 cpu_x86_set_cpl(env, dpl);
2559 SET_ESP(sp, sp_mask);
2560 EIP = offset;
2561 }
2562 #ifdef USE_KQEMU
2563 if (kqemu_is_ok(env)) {
2564 env->exception_index = -1;
2565 cpu_loop_exit();
2566 }
2567 #endif
2568 }
2569
2570 /* real and vm86 mode iret */
2571 void helper_iret_real(int shift)
2572 {
2573 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2574 target_ulong ssp;
2575 int eflags_mask;
2576
2577 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2578 sp = ESP;
2579 ssp = env->segs[R_SS].base;
2580 if (shift == 1) {
2581 /* 32 bits */
2582 POPL(ssp, sp, sp_mask, new_eip);
2583 POPL(ssp, sp, sp_mask, new_cs);
2584 new_cs &= 0xffff;
2585 POPL(ssp, sp, sp_mask, new_eflags);
2586 } else {
2587 /* 16 bits */
2588 POPW(ssp, sp, sp_mask, new_eip);
2589 POPW(ssp, sp, sp_mask, new_cs);
2590 POPW(ssp, sp, sp_mask, new_eflags);
2591 }
2592 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2593 load_seg_vm(R_CS, new_cs);
2594 env->eip = new_eip;
2595 if (env->eflags & VM_MASK)
2596 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2597 else
2598 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2599 if (shift == 0)
2600 eflags_mask &= 0xffff;
2601 load_eflags(new_eflags, eflags_mask);
2602 env->hflags &= ~HF_NMI_MASK;
2603 }
2604
2605 static inline void validate_seg(int seg_reg, int cpl)
2606 {
2607 int dpl;
2608 uint32_t e2;
2609
2610 /* XXX: on x86_64, we do not want to nullify FS and GS because
2611 they may still contain a valid base. I would be interested to
2612 know how a real x86_64 CPU behaves */
2613 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2614 (env->segs[seg_reg].selector & 0xfffc) == 0)
2615 return;
2616
2617 e2 = env->segs[seg_reg].flags;
2618 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2619 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2620 /* data or non conforming code segment */
2621 if (dpl < cpl) {
2622 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2623 }
2624 }
2625 }
2626
2627 /* protected mode iret */
2628 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2629 {
2630 uint32_t new_cs, new_eflags, new_ss;
2631 uint32_t new_es, new_ds, new_fs, new_gs;
2632 uint32_t e1, e2, ss_e1, ss_e2;
2633 int cpl, dpl, rpl, eflags_mask, iopl;
2634 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2635
2636 #ifdef TARGET_X86_64
2637 if (shift == 2)
2638 sp_mask = -1;
2639 else
2640 #endif
2641 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2642 sp = ESP;
2643 ssp = env->segs[R_SS].base;
2644 new_eflags = 0; /* avoid warning */
2645 #ifdef TARGET_X86_64
2646 if (shift == 2) {
2647 POPQ(sp, new_eip);
2648 POPQ(sp, new_cs);
2649 new_cs &= 0xffff;
2650 if (is_iret) {
2651 POPQ(sp, new_eflags);
2652 }
2653 } else
2654 #endif
2655 if (shift == 1) {
2656 /* 32 bits */
2657 POPL(ssp, sp, sp_mask, new_eip);
2658 POPL(ssp, sp, sp_mask, new_cs);
2659 new_cs &= 0xffff;
2660 if (is_iret) {
2661 POPL(ssp, sp, sp_mask, new_eflags);
2662 if (new_eflags & VM_MASK)
2663 goto return_to_vm86;
2664 }
2665 } else {
2666 /* 16 bits */
2667 POPW(ssp, sp, sp_mask, new_eip);
2668 POPW(ssp, sp, sp_mask, new_cs);
2669 if (is_iret)
2670 POPW(ssp, sp, sp_mask, new_eflags);
2671 }
2672 #ifdef DEBUG_PCALL
2673 if (loglevel & CPU_LOG_PCALL) {
2674 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2675 new_cs, new_eip, shift, addend);
2676 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2677 }
2678 #endif
2679 if ((new_cs & 0xfffc) == 0)
2680 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2681 if (load_segment(&e1, &e2, new_cs) != 0)
2682 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2683 if (!(e2 & DESC_S_MASK) ||
2684 !(e2 & DESC_CS_MASK))
2685 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2686 cpl = env->hflags & HF_CPL_MASK;
2687 rpl = new_cs & 3;
2688 if (rpl < cpl)
2689 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2690 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2691 if (e2 & DESC_C_MASK) {
2692 if (dpl > rpl)
2693 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2694 } else {
2695 if (dpl != rpl)
2696 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2697 }
2698 if (!(e2 & DESC_P_MASK))
2699 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2700
2701 sp += addend;
2702 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2703 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2704 /* return to same priledge level */
2705 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2706 get_seg_base(e1, e2),
2707 get_seg_limit(e1, e2),
2708 e2);
2709 } else {
2710 /* return to different privilege level */
2711 #ifdef TARGET_X86_64
2712 if (shift == 2) {
2713 POPQ(sp, new_esp);
2714 POPQ(sp, new_ss);
2715 new_ss &= 0xffff;
2716 } else
2717 #endif
2718 if (shift == 1) {
2719 /* 32 bits */
2720 POPL(ssp, sp, sp_mask, new_esp);
2721 POPL(ssp, sp, sp_mask, new_ss);
2722 new_ss &= 0xffff;
2723 } else {
2724 /* 16 bits */
2725 POPW(ssp, sp, sp_mask, new_esp);
2726 POPW(ssp, sp, sp_mask, new_ss);
2727 }
2728 #ifdef DEBUG_PCALL
2729 if (loglevel & CPU_LOG_PCALL) {
2730 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2731 new_ss, new_esp);
2732 }
2733 #endif
2734 if ((new_ss & 0xfffc) == 0) {
2735 #ifdef TARGET_X86_64
2736 /* NULL ss is allowed in long mode if cpl != 3*/
2737 /* XXX: test CS64 ? */
2738 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2739 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2740 0, 0xffffffff,
2741 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2742 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2743 DESC_W_MASK | DESC_A_MASK);
2744 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2745 } else
2746 #endif
2747 {
2748 raise_exception_err(EXCP0D_GPF, 0);
2749 }
2750 } else {
2751 if ((new_ss & 3) != rpl)
2752 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2753 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2754 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2755 if (!(ss_e2 & DESC_S_MASK) ||
2756 (ss_e2 & DESC_CS_MASK) ||
2757 !(ss_e2 & DESC_W_MASK))
2758 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2759 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2760 if (dpl != rpl)
2761 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2762 if (!(ss_e2 & DESC_P_MASK))
2763 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2764 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2765 get_seg_base(ss_e1, ss_e2),
2766 get_seg_limit(ss_e1, ss_e2),
2767 ss_e2);
2768 }
2769
2770 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2771 get_seg_base(e1, e2),
2772 get_seg_limit(e1, e2),
2773 e2);
2774 cpu_x86_set_cpl(env, rpl);
2775 sp = new_esp;
2776 #ifdef TARGET_X86_64
2777 if (env->hflags & HF_CS64_MASK)
2778 sp_mask = -1;
2779 else
2780 #endif
2781 sp_mask = get_sp_mask(ss_e2);
2782
2783 /* validate data segments */
2784 validate_seg(R_ES, rpl);
2785 validate_seg(R_DS, rpl);
2786 validate_seg(R_FS, rpl);
2787 validate_seg(R_GS, rpl);
2788
2789 sp += addend;
2790 }
2791 SET_ESP(sp, sp_mask);
2792 env->eip = new_eip;
2793 if (is_iret) {
2794 /* NOTE: 'cpl' is the _old_ CPL */
2795 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2796 if (cpl == 0)
2797 eflags_mask |= IOPL_MASK;
2798 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2799 if (cpl <= iopl)
2800 eflags_mask |= IF_MASK;
2801 if (shift == 0)
2802 eflags_mask &= 0xffff;
2803 load_eflags(new_eflags, eflags_mask);
2804 }
2805 return;
2806
2807 return_to_vm86:
2808 POPL(ssp, sp, sp_mask, new_esp);
2809 POPL(ssp, sp, sp_mask, new_ss);
2810 POPL(ssp, sp, sp_mask, new_es);
2811 POPL(ssp, sp, sp_mask, new_ds);
2812 POPL(ssp, sp, sp_mask, new_fs);
2813 POPL(ssp, sp, sp_mask, new_gs);
2814
2815 /* modify processor state */
2816 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2817 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2818 load_seg_vm(R_CS, new_cs & 0xffff);
2819 cpu_x86_set_cpl(env, 3);
2820 load_seg_vm(R_SS, new_ss & 0xffff);
2821 load_seg_vm(R_ES, new_es & 0xffff);
2822 load_seg_vm(R_DS, new_ds & 0xffff);
2823 load_seg_vm(R_FS, new_fs & 0xffff);
2824 load_seg_vm(R_GS, new_gs & 0xffff);
2825
2826 env->eip = new_eip & 0xffff;
2827 ESP = new_esp;
2828 }
2829
2830 void helper_iret_protected(int shift, int next_eip)
2831 {
2832 int tss_selector, type;
2833 uint32_t e1, e2;
2834
2835 /* specific case for TSS */
2836 if (env->eflags & NT_MASK) {
2837 #ifdef TARGET_X86_64
2838 if (env->hflags & HF_LMA_MASK)
2839 raise_exception_err(EXCP0D_GPF, 0);
2840 #endif
2841 tss_selector = lduw_kernel(env->tr.base + 0);
2842 if (tss_selector & 4)
2843 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2844 if (load_segment(&e1, &e2, tss_selector) != 0)
2845 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2846 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2847 /* NOTE: we check both segment and busy TSS */
2848 if (type != 3)
2849 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2850 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2851 } else {
2852 helper_ret_protected(shift, 1, 0);
2853 }
2854 env->hflags &= ~HF_NMI_MASK;
2855 #ifdef USE_KQEMU
2856 if (kqemu_is_ok(env)) {
2857 CC_OP = CC_OP_EFLAGS;
2858 env->exception_index = -1;
2859 cpu_loop_exit();
2860 }
2861 #endif
2862 }
2863
2864 void helper_lret_protected(int shift, int addend)
2865 {
2866 helper_ret_protected(shift, 0, addend);
2867 #ifdef USE_KQEMU
2868 if (kqemu_is_ok(env)) {
2869 env->exception_index = -1;
2870 cpu_loop_exit();
2871 }
2872 #endif
2873 }
2874
2875 void helper_sysenter(void)
2876 {
2877 if (env->sysenter_cs == 0) {
2878 raise_exception_err(EXCP0D_GPF, 0);
2879 }
2880 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2881 cpu_x86_set_cpl(env, 0);
2882 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2883 0, 0xffffffff,
2884 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2885 DESC_S_MASK |
2886 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2887 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2888 0, 0xffffffff,
2889 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2890 DESC_S_MASK |
2891 DESC_W_MASK | DESC_A_MASK);
2892 ESP = env->sysenter_esp;
2893 EIP = env->sysenter_eip;
2894 }
2895
2896 void helper_sysexit(void)
2897 {
2898 int cpl;
2899
2900 cpl = env->hflags & HF_CPL_MASK;
2901 if (env->sysenter_cs == 0 || cpl != 0) {
2902 raise_exception_err(EXCP0D_GPF, 0);
2903 }
2904 cpu_x86_set_cpl(env, 3);
2905 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2906 0, 0xffffffff,
2907 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2908 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2909 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2910 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2911 0, 0xffffffff,
2912 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2913 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2914 DESC_W_MASK | DESC_A_MASK);
2915 ESP = ECX;
2916 EIP = EDX;
2917 #ifdef USE_KQEMU
2918 if (kqemu_is_ok(env)) {
2919 env->exception_index = -1;
2920 cpu_loop_exit();
2921 }
2922 #endif
2923 }
2924
2925 void helper_movl_crN_T0(int reg)
2926 {
2927 #if !defined(CONFIG_USER_ONLY)
2928 switch(reg) {
2929 case 0:
2930 cpu_x86_update_cr0(env, T0);
2931 break;
2932 case 3:
2933 cpu_x86_update_cr3(env, T0);
2934 break;
2935 case 4:
2936 cpu_x86_update_cr4(env, T0);
2937 break;
2938 case 8:
2939 cpu_set_apic_tpr(env, T0);
2940 env->cr[8] = T0;
2941 break;
2942 default:
2943 env->cr[reg] = T0;
2944 break;
2945 }
2946 #endif
2947 }
2948
2949 /* XXX: do more */
2950 void helper_movl_drN_T0(int reg)
2951 {
2952 env->dr[reg] = T0;
2953 }
2954
2955 void helper_invlpg(target_ulong addr)
2956 {
2957 cpu_x86_flush_tlb(env, addr);
2958 }
2959
2960 void helper_rdtsc(void)
2961 {
2962 uint64_t val;
2963
2964 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2965 raise_exception(EXCP0D_GPF);
2966 }
2967 val = cpu_get_tsc(env);
2968 EAX = (uint32_t)(val);
2969 EDX = (uint32_t)(val >> 32);
2970 }
2971
2972 void helper_rdpmc(void)
2973 {
2974 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2975 raise_exception(EXCP0D_GPF);
2976 }
2977
2978 if (!svm_check_intercept_param(SVM_EXIT_RDPMC, 0)) {
2979 /* currently unimplemented */
2980 raise_exception_err(EXCP06_ILLOP, 0);
2981 }
2982 }
2983
2984 #if defined(CONFIG_USER_ONLY)
2985 void helper_wrmsr(void)
2986 {
2987 }
2988
2989 void helper_rdmsr(void)
2990 {
2991 }
2992 #else
2993 void helper_wrmsr(void)
2994 {
2995 uint64_t val;
2996
2997 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2998
2999 switch((uint32_t)ECX) {
3000 case MSR_IA32_SYSENTER_CS:
3001 env->sysenter_cs = val & 0xffff;
3002 break;
3003 case MSR_IA32_SYSENTER_ESP:
3004 env->sysenter_esp = val;
3005 break;
3006 case MSR_IA32_SYSENTER_EIP:
3007 env->sysenter_eip = val;
3008 break;
3009 case MSR_IA32_APICBASE:
3010 cpu_set_apic_base(env, val);
3011 break;
3012 case MSR_EFER:
3013 {
3014 uint64_t update_mask;
3015 update_mask = 0;
3016 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3017 update_mask |= MSR_EFER_SCE;
3018 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3019 update_mask |= MSR_EFER_LME;
3020 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3021 update_mask |= MSR_EFER_FFXSR;
3022 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3023 update_mask |= MSR_EFER_NXE;
3024 env->efer = (env->efer & ~update_mask) |
3025 (val & update_mask);
3026 }
3027 break;
3028 case MSR_STAR:
3029 env->star = val;
3030 break;
3031 case MSR_PAT:
3032 env->pat = val;
3033 break;
3034 case MSR_VM_HSAVE_PA:
3035 env->vm_hsave = val;
3036 break;
3037 #ifdef TARGET_X86_64
3038 case MSR_LSTAR:
3039 env->lstar = val;
3040 break;
3041 case MSR_CSTAR:
3042 env->cstar = val;
3043 break;
3044 case MSR_FMASK:
3045 env->fmask = val;
3046 break;
3047 case MSR_FSBASE:
3048 env->segs[R_FS].base = val;
3049 break;
3050 case MSR_GSBASE:
3051 env->segs[R_GS].base = val;
3052 break;
3053 case MSR_KERNELGSBASE:
3054 env->kernelgsbase = val;
3055 break;
3056 #endif
3057 default:
3058 /* XXX: exception ? */
3059 break;
3060 }
3061 }
3062
3063 void helper_rdmsr(void)
3064 {
3065 uint64_t val;
3066 switch((uint32_t)ECX) {
3067 case MSR_IA32_SYSENTER_CS:
3068 val = env->sysenter_cs;
3069 break;
3070 case MSR_IA32_SYSENTER_ESP:
3071 val = env->sysenter_esp;
3072 break;
3073 case MSR_IA32_SYSENTER_EIP:
3074 val = env->sysenter_eip;
3075 break;
3076 case MSR_IA32_APICBASE:
3077 val = cpu_get_apic_base(env);
3078 break;
3079 case MSR_EFER:
3080 val = env->efer;
3081 break;
3082 case MSR_STAR:
3083 val = env->star;
3084 break;
3085 case MSR_PAT:
3086 val = env->pat;
3087 break;
3088 case MSR_VM_HSAVE_PA:
3089 val = env->vm_hsave;
3090 break;
3091 #ifdef TARGET_X86_64
3092 case MSR_LSTAR:
3093 val = env->lstar;
3094 break;
3095 case MSR_CSTAR:
3096 val = env->cstar;
3097 break;
3098 case MSR_FMASK:
3099 val = env->fmask;
3100 break;
3101 case MSR_FSBASE:
3102 val = env->segs[R_FS].base;
3103 break;
3104 case MSR_GSBASE:
3105 val = env->segs[R_GS].base;
3106 break;
3107 case MSR_KERNELGSBASE:
3108 val = env->kernelgsbase;
3109 break;
3110 #endif
3111 default:
3112 /* XXX: exception ? */
3113 val = 0;
3114 break;
3115 }
3116 EAX = (uint32_t)(val);
3117 EDX = (uint32_t)(val >> 32);
3118 }
3119 #endif
3120
3121 void helper_lsl(uint32_t selector)
3122 {
3123 unsigned int limit;
3124 uint32_t e1, e2, eflags;
3125 int rpl, dpl, cpl, type;
3126
3127 selector &= 0xffff;
3128 eflags = cc_table[CC_OP].compute_all();
3129 if (load_segment(&e1, &e2, selector) != 0)
3130 goto fail;
3131 rpl = selector & 3;
3132 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3133 cpl = env->hflags & HF_CPL_MASK;
3134 if (e2 & DESC_S_MASK) {
3135 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3136 /* conforming */
3137 } else {
3138 if (dpl < cpl || dpl < rpl)
3139 goto fail;
3140 }
3141 } else {
3142 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3143 switch(type) {
3144 case 1:
3145 case 2:
3146 case 3:
3147 case 9:
3148 case 11:
3149 break;
3150 default:
3151 goto fail;
3152 }
3153 if (dpl < cpl || dpl < rpl) {
3154 fail:
3155 CC_SRC = eflags & ~CC_Z;
3156 return;
3157 }
3158 }
3159 limit = get_seg_limit(e1, e2);
3160 T1 = limit;
3161 CC_SRC = eflags | CC_Z;
3162 }
3163
3164 void helper_lar(uint32_t selector)
3165 {
3166 uint32_t e1, e2, eflags;
3167 int rpl, dpl, cpl, type;
3168
3169 selector &= 0xffff;
3170 eflags = cc_table[CC_OP].compute_all();
3171 if ((selector & 0xfffc) == 0)
3172 goto fail;
3173 if (load_segment(&e1, &e2, selector) != 0)
3174 goto fail;
3175 rpl = selector & 3;
3176 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3177 cpl = env->hflags & HF_CPL_MASK;
3178 if (e2 & DESC_S_MASK) {
3179 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3180 /* conforming */
3181 } else {
3182 if (dpl < cpl || dpl < rpl)
3183 goto fail;
3184 }
3185 } else {
3186 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3187 switch(type) {
3188 case 1:
3189 case 2:
3190 case 3:
3191 case 4:
3192 case 5:
3193 case 9:
3194 case 11:
3195 case 12:
3196 break;
3197 default:
3198 goto fail;
3199 }
3200 if (dpl < cpl || dpl < rpl) {
3201 fail:
3202 CC_SRC = eflags & ~CC_Z;
3203 return;
3204 }
3205 }
3206 T1 = e2 & 0x00f0ff00;
3207 CC_SRC = eflags | CC_Z;
3208 }
3209
3210 void helper_verr(uint32_t selector)
3211 {
3212 uint32_t e1, e2, eflags;
3213 int rpl, dpl, cpl;
3214
3215 selector &= 0xffff;
3216 eflags = cc_table[CC_OP].compute_all();
3217 if ((selector & 0xfffc) == 0)
3218 goto fail;
3219 if (load_segment(&e1, &e2, selector) != 0)
3220 goto fail;
3221 if (!(e2 & DESC_S_MASK))
3222 goto fail;
3223 rpl = selector & 3;
3224 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3225 cpl = env->hflags & HF_CPL_MASK;
3226 if (e2 & DESC_CS_MASK) {
3227 if (!(e2 & DESC_R_MASK))
3228 goto fail;
3229 if (!(e2 & DESC_C_MASK)) {
3230 if (dpl < cpl || dpl < rpl)
3231 goto fail;
3232 }
3233 } else {
3234 if (dpl < cpl || dpl < rpl) {
3235 fail:
3236 CC_SRC = eflags & ~CC_Z;
3237 return;
3238 }
3239 }
3240 CC_SRC = eflags | CC_Z;
3241 }
3242
3243 void helper_verw(uint32_t selector)
3244 {
3245 uint32_t e1, e2, eflags;
3246 int rpl, dpl, cpl;
3247
3248 selector &= 0xffff;
3249 eflags = cc_table[CC_OP].compute_all();
3250 if ((selector & 0xfffc) == 0)
3251 goto fail;
3252 if (load_segment(&e1, &e2, selector) != 0)
3253 goto fail;
3254 if (!(e2 & DESC_S_MASK))
3255 goto fail;
3256 rpl = selector & 3;
3257 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3258 cpl = env->hflags & HF_CPL_MASK;
3259 if (e2 & DESC_CS_MASK) {
3260 goto fail;
3261 } else {
3262 if (dpl < cpl || dpl < rpl)
3263 goto fail;
3264 if (!(e2 & DESC_W_MASK)) {
3265 fail:
3266 CC_SRC = eflags & ~CC_Z;
3267 return;
3268 }
3269 }
3270 CC_SRC = eflags | CC_Z;
3271 }
3272
3273 /* x87 FPU helpers */
3274
3275 static void fpu_set_exception(int mask)
3276 {
3277 env->fpus |= mask;
3278 if (env->fpus & (~env->fpuc & FPUC_EM))
3279 env->fpus |= FPUS_SE | FPUS_B;
3280 }
3281
3282 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3283 {
3284 if (b == 0.0)
3285 fpu_set_exception(FPUS_ZE);
3286 return a / b;
3287 }
3288
3289 void fpu_raise_exception(void)
3290 {
3291 if (env->cr[0] & CR0_NE_MASK) {
3292 raise_exception(EXCP10_COPR);
3293 }
3294 #if !defined(CONFIG_USER_ONLY)
3295 else {
3296 cpu_set_ferr(env);
3297 }
3298 #endif
3299 }
3300
3301 void helper_flds_FT0(uint32_t val)
3302 {
3303 union {
3304 float32 f;
3305 uint32_t i;
3306 } u;
3307 u.i = val;
3308 FT0 = float32_to_floatx(u.f, &env->fp_status);
3309 }
3310
3311 void helper_fldl_FT0(uint64_t val)
3312 {
3313 union {
3314 float64 f;
3315 uint64_t i;
3316 } u;
3317 u.i = val;
3318 FT0 = float64_to_floatx(u.f, &env->fp_status);
3319 }
3320
3321 void helper_fildl_FT0(int32_t val)
3322 {
3323 FT0 = int32_to_floatx(val, &env->fp_status);
3324 }
3325
3326 void helper_flds_ST0(uint32_t val)
3327 {
3328 int new_fpstt;
3329 union {
3330 float32 f;
3331 uint32_t i;
3332 } u;
3333 new_fpstt = (env->fpstt - 1) & 7;
3334 u.i = val;
3335 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3336 env->fpstt = new_fpstt;
3337 env->fptags[new_fpstt] = 0; /* validate stack entry */
3338 }
3339
3340 void helper_fldl_ST0(uint64_t val)
3341 {
3342 int new_fpstt;
3343 union {
3344 float64 f;
3345 uint64_t i;
3346 } u;
3347 new_fpstt = (env->fpstt - 1) & 7;
3348 u.i = val;
3349 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3350 env->fpstt = new_fpstt;
3351 env->fptags[new_fpstt] = 0; /* validate stack entry */
3352 }
3353
3354 void helper_fildl_ST0(int32_t val)
3355 {
3356 int new_fpstt;
3357 new_fpstt = (env->fpstt - 1) & 7;
3358 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3359 env->fpstt = new_fpstt;
3360 env->fptags[new_fpstt] = 0; /* validate stack entry */
3361 }
3362
3363 void helper_fildll_ST0(int64_t val)
3364 {
3365 int new_fpstt;
3366 new_fpstt = (env->fpstt - 1) & 7;
3367 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3368 env->fpstt = new_fpstt;
3369 env->fptags[new_fpstt] = 0; /* validate stack entry */
3370 }
3371
3372 uint32_t helper_fsts_ST0(void)
3373 {
3374 union {
3375 float32 f;
3376 uint32_t i;
3377 } u;
3378 u.f = floatx_to_float32(ST0, &env->fp_status);
3379 return u.i;
3380 }
3381
3382 uint64_t helper_fstl_ST0(void)
3383 {
3384 union {
3385 float64 f;
3386 uint64_t i;
3387 } u;
3388 u.f = floatx_to_float64(ST0, &env->fp_status);
3389 return u.i;
3390 }
3391
3392 int32_t helper_fist_ST0(void)
3393 {
3394 int32_t val;
3395 val = floatx_to_int32(ST0, &env->fp_status);
3396 if (val != (int16_t)val)
3397 val = -32768;
3398 return val;
3399 }
3400
3401 int32_t helper_fistl_ST0(void)
3402 {
3403 int32_t val;
3404 val = floatx_to_int32(ST0, &env->fp_status);
3405 return val;
3406 }
3407
3408 int64_t helper_fistll_ST0(void)
3409 {
3410 int64_t val;
3411 val = floatx_to_int64(ST0, &env->fp_status);
3412 return val;
3413 }
3414
3415 int32_t helper_fistt_ST0(void)
3416 {
3417 int32_t val;
3418 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3419 if (val != (int16_t)val)
3420 val = -32768;
3421 return val;
3422 }
3423
3424 int32_t helper_fisttl_ST0(void)
3425 {
3426 int32_t val;
3427 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3428 return val;
3429 }
3430
3431 int64_t helper_fisttll_ST0(void)
3432 {
3433 int64_t val;
3434 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3435 return val;
3436 }
3437
3438 void helper_fldt_ST0(target_ulong ptr)
3439 {
3440 int new_fpstt;
3441 new_fpstt = (env->fpstt - 1) & 7;
3442 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3443 env->fpstt = new_fpstt;
3444 env->fptags[new_fpstt] = 0; /* validate stack entry */
3445 }
3446
3447 void helper_fstt_ST0(target_ulong ptr)
3448 {
3449 helper_fstt(ST0, ptr);
3450 }
3451
3452 void helper_fpush(void)
3453 {
3454 fpush();
3455 }
3456
3457 void helper_fpop(void)
3458 {
3459 fpop();
3460 }
3461
3462 void helper_fdecstp(void)
3463 {
3464 env->fpstt = (env->fpstt - 1) & 7;
3465 env->fpus &= (~0x4700);
3466 }
3467
3468 void helper_fincstp(void)
3469 {
3470 env->fpstt = (env->fpstt + 1) & 7;
3471 env->fpus &= (~0x4700);
3472 }
3473
3474 /* FPU move */
3475
3476 void helper_ffree_STN(int st_index)
3477 {
3478 env->fptags[(env->fpstt + st_index) & 7] = 1;
3479 }
3480
3481 void helper_fmov_ST0_FT0(void)
3482 {
3483 ST0 = FT0;
3484 }
3485
3486 void helper_fmov_FT0_STN(int st_index)
3487 {
3488 FT0 = ST(st_index);
3489 }
3490
3491 void helper_fmov_ST0_STN(int st_index)
3492 {
3493 ST0 = ST(st_index);
3494 }
3495
3496 void helper_fmov_STN_ST0(int st_index)
3497 {
3498 ST(st_index) = ST0;
3499 }
3500
3501 void helper_fxchg_ST0_STN(int st_index)
3502 {
3503 CPU86_LDouble tmp;
3504 tmp = ST(st_index);
3505 ST(st_index) = ST0;
3506 ST0 = tmp;
3507 }
3508
3509 /* FPU operations */
3510
3511 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3512
3513 void helper_fcom_ST0_FT0(void)
3514 {
3515 int ret;
3516
3517 ret = floatx_compare(ST0, FT0, &env->fp_status);
3518 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3519 FORCE_RET();
3520 }
3521
3522 void helper_fucom_ST0_FT0(void)
3523 {
3524 int ret;
3525
3526 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3527 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3528 FORCE_RET();
3529 }
3530
3531 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3532
3533 void helper_fcomi_ST0_FT0(void)
3534 {
3535 int eflags;
3536 int ret;
3537
3538 ret = floatx_compare(ST0, FT0, &env->fp_status);
3539 eflags = cc_table[CC_OP].compute_all();
3540 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3541 CC_SRC = eflags;
3542 FORCE_RET();
3543 }
3544
3545 void helper_fucomi_ST0_FT0(void)
3546 {
3547 int eflags;
3548 int ret;
3549
3550 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3551 eflags = cc_table[CC_OP].compute_all();
3552 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3553 CC_SRC = eflags;
3554 FORCE_RET();
3555 }
3556
3557 void helper_fadd_ST0_FT0(void)
3558 {
3559 ST0 += FT0;
3560 }
3561
3562 void helper_fmul_ST0_FT0(void)
3563 {
3564 ST0 *= FT0;
3565 }
3566
3567 void helper_fsub_ST0_FT0(void)
3568 {
3569 ST0 -= FT0;
3570 }
3571
3572 void helper_fsubr_ST0_FT0(void)
3573 {
3574 ST0 = FT0 - ST0;
3575 }
3576
3577 void helper_fdiv_ST0_FT0(void)
3578 {
3579 ST0 = helper_fdiv(ST0, FT0);
3580 }
3581
3582 void helper_fdivr_ST0_FT0(void)
3583 {
3584 ST0 = helper_fdiv(FT0, ST0);
3585 }
3586
3587 /* fp operations between STN and ST0 */
3588
3589 void helper_fadd_STN_ST0(int st_index)
3590 {
3591 ST(st_index) += ST0;
3592 }
3593
3594 void helper_fmul_STN_ST0(int st_index)
3595 {
3596 ST(st_index) *= ST0;
3597 }
3598
3599 void helper_fsub_STN_ST0(int st_index)
3600 {
3601 ST(st_index) -= ST0;
3602 }
3603
3604 void helper_fsubr_STN_ST0(int st_index)
3605 {
3606 CPU86_LDouble *p;
3607 p = &ST(st_index);
3608 *p = ST0 - *p;
3609 }
3610
3611 void helper_fdiv_STN_ST0(int st_index)
3612 {
3613 CPU86_LDouble *p;
3614 p = &ST(st_index);
3615 *p = helper_fdiv(*p, ST0);
3616 }
3617
3618 void helper_fdivr_STN_ST0(int st_index)
3619 {
3620 CPU86_LDouble *p;
3621 p = &ST(st_index);
3622 *p = helper_fdiv(ST0, *p);
3623 }
3624
3625 /* misc FPU operations */
3626 void helper_fchs_ST0(void)
3627 {
3628 ST0 = floatx_chs(ST0);
3629 }
3630
3631 void helper_fabs_ST0(void)
3632 {
3633 ST0 = floatx_abs(ST0);
3634 }
3635
3636 void helper_fld1_ST0(void)
3637 {
3638 ST0 = f15rk[1];
3639 }
3640
3641 void helper_fldl2t_ST0(void)
3642 {
3643 ST0 = f15rk[6];
3644 }
3645
3646 void helper_fldl2e_ST0(void)
3647 {
3648 ST0 = f15rk[5];
3649 }
3650
3651 void helper_fldpi_ST0(void)
3652 {
3653 ST0 = f15rk[2];
3654 }
3655
3656 void helper_fldlg2_ST0(void)
3657 {
3658 ST0 = f15rk[3];
3659 }
3660
3661 void helper_fldln2_ST0(void)
3662 {
3663 ST0 = f15rk[4];
3664 }
3665
3666 void helper_fldz_ST0(void)
3667 {
3668 ST0 = f15rk[0];
3669 }
3670
3671 void helper_fldz_FT0(void)
3672 {
3673 FT0 = f15rk[0];
3674 }
3675
3676 uint32_t helper_fnstsw(void)
3677 {
3678 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3679 }
3680
3681 uint32_t helper_fnstcw(void)
3682 {
3683 return env->fpuc;
3684 }
3685
3686 static void update_fp_status(void)
3687 {
3688 int rnd_type;
3689
3690 /* set rounding mode */
3691 switch(env->fpuc & RC_MASK) {
3692 default:
3693 case RC_NEAR:
3694 rnd_type = float_round_nearest_even;
3695 break;
3696 case RC_DOWN:
3697 rnd_type = float_round_down;
3698 break;
3699 case RC_UP:
3700 rnd_type = float_round_up;
3701 break;
3702 case RC_CHOP:
3703 rnd_type = float_round_to_zero;
3704 break;
3705 }
3706 set_float_rounding_mode(rnd_type, &env->fp_status);
3707 #ifdef FLOATX80
3708 switch((env->fpuc >> 8) & 3) {
3709 case 0:
3710 rnd_type = 32;
3711 break;
3712 case 2:
3713 rnd_type = 64;
3714 break;
3715 case 3:
3716 default:
3717 rnd_type = 80;
3718 break;
3719 }
3720 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3721 #endif
3722 }
3723
3724 void helper_fldcw(uint32_t val)
3725 {
3726 env->fpuc = val;
3727 update_fp_status();
3728 }
3729
3730 void helper_fclex(void)
3731 {
3732 env->fpus &= 0x7f00;
3733 }
3734
3735 void helper_fwait(void)
3736 {
3737 if (env->fpus & FPUS_SE)
3738 fpu_raise_exception();
3739 FORCE_RET();
3740 }
3741
3742 void helper_fninit(void)
3743 {
3744 env->fpus = 0;
3745 env->fpstt = 0;
3746 env->fpuc = 0x37f;
3747 env->fptags[0] = 1;
3748 env->fptags[1] = 1;
3749 env->fptags[2] = 1;
3750 env->fptags[3] = 1;
3751 env->fptags[4] = 1;
3752 env->fptags[5] = 1;
3753 env->fptags[6] = 1;
3754 env->fptags[7] = 1;
3755 }
3756
3757 /* BCD ops */
3758
3759 void helper_fbld_ST0(target_ulong ptr)
3760 {
3761 CPU86_LDouble tmp;
3762 uint64_t val;
3763 unsigned int v;
3764 int i;
3765
3766 val = 0;
3767 for(i = 8; i >= 0; i--) {
3768 v = ldub(ptr + i);
3769 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3770 }
3771 tmp = val;
3772 if (ldub(ptr + 9) & 0x80)
3773 tmp = -tmp;
3774 fpush();
3775 ST0 = tmp;
3776 }
3777
3778 void helper_fbst_ST0(target_ulong ptr)
3779 {
3780 int v;
3781 target_ulong mem_ref, mem_end;
3782 int64_t val;
3783
3784 val = floatx_to_int64(ST0, &env->fp_status);
3785 mem_ref = ptr;
3786 mem_end = mem_ref + 9;
3787 if (val < 0) {
3788 stb(mem_end, 0x80);
3789 val = -val;
3790 } else {
3791 stb(mem_end, 0x00);
3792 }
3793 while (mem_ref < mem_end) {
3794 if (val == 0)
3795 break;
3796 v = val % 100;
3797 val = val / 100;
3798 v = ((v / 10) << 4) | (v % 10);
3799 stb(mem_ref++, v);
3800 }
3801 while (mem_ref < mem_end) {
3802 stb(mem_ref++, 0);
3803 }
3804 }
3805
3806 void helper_f2xm1(void)
3807 {
3808 ST0 = pow(2.0,ST0) - 1.0;
3809 }
3810
3811 void helper_fyl2x(void)
3812 {
3813 CPU86_LDouble fptemp;
3814
3815 fptemp = ST0;
3816 if (fptemp>0.0){
3817 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3818 ST1 *= fptemp;
3819 fpop();
3820 } else {
3821 env->fpus &= (~0x4700);
3822 env->fpus |= 0x400;
3823 }
3824 }
3825
3826 void helper_fptan(void)
3827 {
3828 CPU86_LDouble fptemp;
3829
3830 fptemp = ST0;
3831 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3832 env->fpus |= 0x400;
3833 } else {
3834 ST0 = tan(fptemp);
3835 fpush();
3836 ST0 = 1.0;
3837 env->fpus &= (~0x400); /* C2 <-- 0 */
3838 /* the above code is for |arg| < 2**52 only */
3839 }
3840 }
3841
3842 void helper_fpatan(void)
3843 {
3844 CPU86_LDouble fptemp, fpsrcop;
3845
3846 fpsrcop = ST1;
3847 fptemp = ST0;
3848 ST1 = atan2(fpsrcop,fptemp);
3849 fpop();
3850 }
3851
3852 void helper_fxtract(void)
3853 {
3854 CPU86_LDoubleU temp;
3855 unsigned int expdif;
3856
3857 temp.d = ST0;
3858 expdif = EXPD(temp) - EXPBIAS;
3859 /*DP exponent bias*/
3860 ST0 = expdif;
3861 fpush();
3862 BIASEXPONENT(temp);
3863 ST0 = temp.d;
3864 }
3865
3866 void helper_fprem1(void)
3867 {
3868 CPU86_LDouble dblq, fpsrcop, fptemp;
3869 CPU86_LDoubleU fpsrcop1, fptemp1;
3870 int expdif;
3871 signed long long int q;
3872
3873 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3874 ST0 = 0.0 / 0.0; /* NaN */
3875 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3876 return;
3877 }
3878
3879 fpsrcop = ST0;
3880 fptemp = ST1;
3881 fpsrcop1.d = fpsrcop;
3882 fptemp1.d = fptemp;
3883 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3884
3885 if (expdif < 0) {
3886 /* optimisation? taken from the AMD docs */
3887 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3888 /* ST0 is unchanged */
3889 return;
3890 }
3891
3892 if (expdif < 53) {
3893 dblq = fpsrcop / fptemp;
3894 /* round dblq towards nearest integer */
3895 dblq = rint(dblq);
3896 ST0 = fpsrcop - fptemp * dblq;
3897
3898 /* convert dblq to q by truncating towards zero */
3899 if (dblq < 0.0)
3900 q = (signed long long int)(-dblq);
3901 else
3902 q = (signed long long int)dblq;
3903
3904 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3905 /* (C0,C3,C1) <-- (q2,q1,q0) */
3906 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3907 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3908 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3909 } else {
3910 env->fpus |= 0x400; /* C2 <-- 1 */
3911 fptemp = pow(2.0, expdif - 50);
3912 fpsrcop = (ST0 / ST1) / fptemp;
3913 /* fpsrcop = integer obtained by chopping */
3914 fpsrcop = (fpsrcop < 0.0) ?
3915 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3916 ST0 -= (ST1 * fpsrcop * fptemp);
3917 }
3918 }
3919
3920 void helper_fprem(void)
3921 {
3922 CPU86_LDouble dblq, fpsrcop, fptemp;
3923 CPU86_LDoubleU fpsrcop1, fptemp1;
3924 int expdif;
3925 signed long long int q;
3926
3927 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3928 ST0 = 0.0 / 0.0; /* NaN */
3929 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3930 return;
3931 }
3932
3933 fpsrcop = (CPU86_LDouble)ST0;
3934 fptemp = (CPU86_LDouble)ST1;
3935 fpsrcop1.d = fpsrcop;
3936 fptemp1.d = fptemp;
3937 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3938
3939 if (expdif < 0) {
3940 /* optimisation? taken from the AMD docs */
3941 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3942 /* ST0 is unchanged */
3943 return;
3944 }
3945
3946 if ( expdif < 53 ) {
3947 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3948 /* round dblq towards zero */
3949 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3950 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3951
3952 /* convert dblq to q by truncating towards zero */
3953 if (dblq < 0.0)
3954 q = (signed long long int)(-dblq);
3955 else
3956 q = (signed long long int)dblq;
3957
3958 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3959 /* (C0,C3,C1) <-- (q2,q1,q0) */
3960 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3961 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3962 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3963 } else {
3964 int N = 32 + (expdif % 32); /* as per AMD docs */
3965 env->fpus |= 0x400; /* C2 <-- 1 */
3966 fptemp = pow(2.0, (double)(expdif - N));
3967 fpsrcop = (ST0 / ST1) / fptemp;
3968 /* fpsrcop = integer obtained by chopping */
3969 fpsrcop = (fpsrcop < 0.0) ?
3970 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3971 ST0 -= (ST1 * fpsrcop * fptemp);
3972 }
3973 }
3974
3975 void helper_fyl2xp1(void)
3976 {
3977 CPU86_LDouble fptemp;
3978
3979 fptemp = ST0;
3980 if ((fptemp+1.0)>0.0) {
3981 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3982 ST1 *= fptemp;
3983 fpop();
3984 } else {
3985 env->fpus &= (~0x4700);
3986 env->fpus |= 0x400;
3987 }
3988 }
3989
3990 void helper_fsqrt(void)
3991 {
3992 CPU86_LDouble fptemp;
3993
3994 fptemp = ST0;
3995 if (fptemp<0.0) {
3996 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3997 env->fpus |= 0x400;
3998 }
3999 ST0 = sqrt(fptemp);
4000 }
4001
4002 void helper_fsincos(void)
4003 {
4004 CPU86_LDouble fptemp;
4005
4006 fptemp = ST0;
4007 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4008 env->fpus |= 0x400;
4009 } else {
4010 ST0 = sin(fptemp);
4011 fpush();
4012 ST0 = cos(fptemp);
4013 env->fpus &= (~0x400); /* C2 <-- 0 */
4014 /* the above code is for |arg| < 2**63 only */
4015 }
4016 }
4017
4018 void helper_frndint(void)
4019 {
4020 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4021 }
4022
4023 void helper_fscale(void)
4024 {
4025 ST0 = ldexp (ST0, (int)(ST1));
4026 }
4027
4028 void helper_fsin(void)
4029 {
4030 CPU86_LDouble fptemp;
4031
4032 fptemp = ST0;
4033 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4034 env->fpus |= 0x400;
4035 } else {
4036 ST0 = sin(fptemp);
4037 env->fpus &= (~0x400); /* C2 <-- 0 */
4038 /* the above code is for |arg| < 2**53 only */
4039 }
4040 }
4041
4042 void helper_fcos(void)
4043 {
4044 CPU86_LDouble fptemp;
4045
4046 fptemp = ST0;
4047 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4048 env->fpus |= 0x400;
4049 } else {
4050 ST0 = cos(fptemp);
4051 env->fpus &= (~0x400); /* C2 <-- 0 */
4052 /* the above code is for |arg5 < 2**63 only */
4053 }
4054 }
4055
4056 void helper_fxam_ST0(void)
4057 {
4058 CPU86_LDoubleU temp;
4059 int expdif;
4060
4061 temp.d = ST0;
4062
4063 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4064 if (SIGND(temp))
4065 env->fpus |= 0x200; /* C1 <-- 1 */
4066
4067 /* XXX: test fptags too */
4068 expdif = EXPD(temp);
4069 if (expdif == MAXEXPD) {
4070 #ifdef USE_X86LDOUBLE
4071 if (MANTD(temp) == 0x8000000000000000ULL)
4072 #else
4073 if (MANTD(temp) == 0)
4074 #endif
4075 env->fpus |= 0x500 /*Infinity*/;
4076 else
4077 env->fpus |= 0x100 /*NaN*/;
4078 } else if (expdif == 0) {
4079 if (MANTD(temp) == 0)
4080 env->fpus |= 0x4000 /*Zero*/;
4081 else
4082 env->fpus |= 0x4400 /*Denormal*/;
4083 } else {
4084 env->fpus |= 0x400;
4085 }
4086 }
4087
4088 void helper_fstenv(target_ulong ptr, int data32)
4089 {
4090 int fpus, fptag, exp, i;
4091 uint64_t mant;
4092 CPU86_LDoubleU tmp;
4093
4094 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4095 fptag = 0;
4096 for (i=7; i>=0; i--) {
4097 fptag <<= 2;
4098 if (env->fptags[i]) {
4099 fptag |= 3;
4100 } else {
4101 tmp.d = env->fpregs[i].d;
4102 exp = EXPD(tmp);
4103 mant = MANTD(tmp);
4104 if (exp == 0 && mant == 0) {
4105 /* zero */
4106 fptag |= 1;
4107 } else if (exp == 0 || exp == MAXEXPD
4108 #ifdef USE_X86LDOUBLE
4109 || (mant & (1LL << 63)) == 0
4110 #endif
4111 ) {
4112 /* NaNs, infinity, denormal */
4113 fptag |= 2;
4114 }
4115 }
4116 }
4117 if (data32) {
4118 /* 32 bit */
4119 stl(ptr, env->fpuc);
4120 stl(ptr + 4, fpus);
4121 stl(ptr + 8, fptag);
4122 stl(ptr + 12, 0); /* fpip */
4123 stl(ptr + 16, 0); /* fpcs */
4124 stl(ptr + 20, 0); /* fpoo */
4125 stl(ptr + 24, 0); /* fpos */
4126 } else {
4127 /* 16 bit */
4128 stw(ptr, env->fpuc);
4129 stw(ptr + 2, fpus);
4130 stw(ptr + 4, fptag);
4131 stw(ptr + 6, 0);
4132 stw(ptr + 8, 0);
4133 stw(ptr + 10, 0);
4134 stw(ptr + 12, 0);
4135 }
4136 }
4137
4138 void helper_fldenv(target_ulong ptr, int data32)
4139 {
4140 int i, fpus, fptag;
4141
4142 if (data32) {
4143 env->fpuc = lduw(ptr);
4144 fpus = lduw(ptr + 4);
4145 fptag = lduw(ptr + 8);
4146 }
4147 else {
4148 env->fpuc = lduw(ptr);
4149 fpus = lduw(ptr + 2);
4150 fptag = lduw(ptr + 4);
4151 }
4152 env->fpstt = (fpus >> 11) & 7;
4153 env->fpus = fpus & ~0x3800;
4154 for(i = 0;i < 8; i++) {
4155 env->fptags[i] = ((fptag & 3) == 3);
4156 fptag >>= 2;
4157 }
4158 }
4159
4160 void helper_fsave(target_ulong ptr, int data32)
4161 {
4162 CPU86_LDouble tmp;
4163 int i;
4164
4165 helper_fstenv(ptr, data32);
4166
4167 ptr += (14 << data32);
4168 for(i = 0;i < 8; i++) {
4169 tmp = ST(i);
4170 helper_fstt(tmp, ptr);
4171 ptr += 10;
4172 }
4173
4174 /* fninit */
4175 env->fpus = 0;
4176 env->fpstt = 0;
4177 env->fpuc = 0x37f;
4178 env->fptags[0] = 1;
4179 env->fptags[1] = 1;
4180 env->fptags[2] = 1;
4181 env->fptags[3] = 1;
4182 env->fptags[4] = 1;
4183 env->fptags[5] = 1;
4184 env->fptags[6] = 1;
4185 env->fptags[7] = 1;
4186 }
4187
4188 void helper_frstor(target_ulong ptr, int data32)
4189 {
4190 CPU86_LDouble tmp;
4191 int i;
4192
4193 helper_fldenv(ptr, data32);
4194 ptr += (14 << data32);
4195
4196 for(i = 0;i < 8; i++) {
4197 tmp = helper_fldt(ptr);
4198 ST(i) = tmp;
4199 ptr += 10;
4200 }
4201 }
4202
4203 void helper_fxsave(target_ulong ptr, int data64)
4204 {
4205 int fpus, fptag, i, nb_xmm_regs;
4206 CPU86_LDouble tmp;
4207 target_ulong addr;
4208
4209 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4210 fptag = 0;
4211 for(i = 0; i < 8; i++) {
4212 fptag |= (env->fptags[i] << i);
4213 }
4214 stw(ptr, env->fpuc);
4215 stw(ptr + 2, fpus);
4216 stw(ptr + 4, fptag ^ 0xff);
4217
4218 addr = ptr + 0x20;
4219 for(i = 0;i < 8; i++) {
4220 tmp = ST(i);
4221 helper_fstt(tmp, addr);
4222 addr += 16;
4223 }
4224
4225 if (env->cr[4] & CR4_OSFXSR_MASK) {
4226 /* XXX: finish it */
4227 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4228 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4229 nb_xmm_regs = 8 << data64;
4230 addr = ptr + 0xa0;
4231 for(i = 0; i < nb_xmm_regs; i++) {
4232 stq(addr, env->xmm_regs[i].XMM_Q(0));
4233 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4234 addr += 16;
4235 }
4236 }
4237 }
4238
4239 void helper_fxrstor(target_ulong ptr, int data64)
4240 {
4241 int i, fpus, fptag, nb_xmm_regs;
4242 CPU86_LDouble tmp;
4243 target_ulong addr;
4244
4245 env->fpuc = lduw(ptr);
4246 fpus = lduw(ptr + 2);
4247 fptag = lduw(ptr + 4);
4248 env->fpstt = (fpus >> 11) & 7;
4249 env->fpus = fpus & ~0x3800;
4250 fptag ^= 0xff;
4251 for(i = 0;i < 8; i++) {
4252 env->fptags[i] = ((fptag >> i) & 1);
4253 }
4254
4255 addr = ptr + 0x20;
4256 for(i = 0;i < 8; i++) {
4257 tmp = helper_fldt(addr);
4258 ST(i) = tmp;
4259 addr += 16;
4260 }
4261
4262 if (env->cr[4] & CR4_OSFXSR_MASK) {
4263 /* XXX: finish it */
4264 env->mxcsr = ldl(ptr + 0x18);
4265 //ldl(ptr + 0x1c);
4266 nb_xmm_regs = 8 << data64;
4267 addr = ptr + 0xa0;
4268 for(i = 0; i < nb_xmm_regs; i++) {
4269 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4270 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4271 addr += 16;
4272 }
4273 }
4274 }
4275
4276 #ifndef USE_X86LDOUBLE
4277
4278 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4279 {
4280 CPU86_LDoubleU temp;
4281 int e;
4282
4283 temp.d = f;
4284 /* mantissa */
4285 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4286 /* exponent + sign */
4287 e = EXPD(temp) - EXPBIAS + 16383;
4288 e |= SIGND(temp) >> 16;
4289 *pexp = e;
4290 }
4291
4292 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4293 {
4294 CPU86_LDoubleU temp;
4295 int e;
4296 uint64_t ll;
4297
4298 /* XXX: handle overflow ? */
4299 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4300 e |= (upper >> 4) & 0x800; /* sign */
4301 ll = (mant >> 11) & ((1LL << 52) - 1);
4302 #ifdef __arm__
4303 temp.l.upper = (e << 20) | (ll >> 32);
4304 temp.l.lower = ll;
4305 #else
4306 temp.ll = ll | ((uint64_t)e << 52);
4307 #endif
4308 return temp.d;
4309 }
4310
4311 #else
4312
4313 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4314 {
4315 CPU86_LDoubleU temp;
4316
4317 temp.d = f;
4318 *pmant = temp.l.lower;
4319 *pexp = temp.l.upper;
4320 }
4321
4322 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4323 {
4324 CPU86_LDoubleU temp;
4325
4326 temp.l.upper = upper;
4327 temp.l.lower = mant;
4328 return temp.d;
4329 }
4330 #endif
4331
4332 #ifdef TARGET_X86_64
4333
4334 //#define DEBUG_MULDIV
4335
4336 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4337 {
4338 *plow += a;
4339 /* carry test */
4340 if (*plow < a)
4341 (*phigh)++;
4342 *phigh += b;
4343 }
4344
4345 static void neg128(uint64_t *plow, uint64_t *phigh)
4346 {
4347 *plow = ~ *plow;
4348 *phigh = ~ *phigh;
4349 add128(plow, phigh, 1, 0);
4350 }
4351
4352 /* return TRUE if overflow */
4353 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4354 {
4355 uint64_t q, r, a1, a0;
4356 int i, qb, ab;
4357
4358 a0 = *plow;
4359 a1 = *phigh;
4360 if (a1 == 0) {
4361 q = a0 / b;
4362 r = a0 % b;
4363 *plow = q;
4364 *phigh = r;
4365 } else {
4366 if (a1 >= b)
4367 return 1;
4368 /* XXX: use a better algorithm */
4369 for(i = 0; i < 64; i++) {
4370 ab = a1 >> 63;
4371 a1 = (a1 << 1) | (a0 >> 63);
4372 if (ab || a1 >= b) {
4373 a1 -= b;
4374 qb = 1;
4375 } else {
4376 qb = 0;
4377 }
4378 a0 = (a0 << 1) | qb;
4379 }
4380 #if defined(DEBUG_MULDIV)
4381 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4382 *phigh, *plow, b, a0, a1);
4383 #endif
4384 *plow = a0;
4385 *phigh = a1;
4386 }
4387 return 0;
4388 }
4389
4390 /* return TRUE if overflow */
4391 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4392 {
4393 int sa, sb;
4394 sa = ((int64_t)*phigh < 0);
4395 if (sa)
4396 neg128(plow, phigh);
4397 sb = (b < 0);
4398 if (sb)
4399 b = -b;
4400 if (div64(plow, phigh, b) != 0)
4401 return 1;
4402 if (sa ^ sb) {
4403 if (*plow > (1ULL << 63))
4404 return 1;
4405 *plow = - *plow;
4406 } else {
4407 if (*plow >= (1ULL << 63))
4408 return 1;
4409 }
4410 if (sa)
4411 *phigh = - *phigh;
4412 return 0;
4413 }
4414
4415 void helper_mulq_EAX_T0(void)
4416 {
4417 uint64_t r0, r1;
4418
4419 mulu64(&r0, &r1, EAX, T0);
4420 EAX = r0;
4421 EDX = r1;
4422 CC_DST = r0;
4423 CC_SRC = r1;
4424 }
4425
4426 void helper_imulq_EAX_T0(void)
4427 {
4428 uint64_t r0, r1;
4429
4430 muls64(&r0, &r1, EAX, T0);
4431 EAX = r0;
4432 EDX = r1;
4433 CC_DST = r0;
4434 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4435 }
4436
4437 void helper_imulq_T0_T1(void)
4438 {
4439 uint64_t r0, r1;
4440
4441 muls64(&r0, &r1, T0, T1);
4442 T0 = r0;
4443 CC_DST = r0;
4444 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4445 }
4446
4447 void helper_divq_EAX(target_ulong t0)
4448 {
4449 uint64_t r0, r1;
4450 if (t0 == 0) {
4451 raise_exception(EXCP00_DIVZ);
4452 }
4453 r0 = EAX;
4454 r1 = EDX;
4455 if (div64(&r0, &r1, t0))
4456 raise_exception(EXCP00_DIVZ);
4457 EAX = r0;
4458 EDX = r1;
4459 }
4460
4461 void helper_idivq_EAX(target_ulong t0)
4462 {
4463 uint64_t r0, r1;
4464 if (t0 == 0) {
4465 raise_exception(EXCP00_DIVZ);
4466 }
4467 r0 = EAX;
4468 r1 = EDX;
4469 if (idiv64(&r0, &r1, t0))
4470 raise_exception(EXCP00_DIVZ);
4471 EAX = r0;
4472 EDX = r1;
4473 }
4474 #endif
4475
4476 void helper_hlt(void)
4477 {
4478 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4479 env->hflags |= HF_HALTED_MASK;
4480 env->exception_index = EXCP_HLT;
4481 cpu_loop_exit();
4482 }
4483
4484 void helper_monitor(target_ulong ptr)
4485 {
4486 if ((uint32_t)ECX != 0)
4487 raise_exception(EXCP0D_GPF);
4488 /* XXX: store address ? */
4489 }
4490
4491 void helper_mwait(void)
4492 {
4493 if ((uint32_t)ECX != 0)
4494 raise_exception(EXCP0D_GPF);
4495 /* XXX: not complete but not completely erroneous */
4496 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4497 /* more than one CPU: do not sleep because another CPU may
4498 wake this one */
4499 } else {
4500 helper_hlt();
4501 }
4502 }
4503
4504 void helper_debug(void)
4505 {
4506 env->exception_index = EXCP_DEBUG;
4507 cpu_loop_exit();
4508 }
4509
4510 void helper_raise_interrupt(int intno, int next_eip_addend)
4511 {
4512 raise_interrupt(intno, 1, 0, next_eip_addend);
4513 }
4514
4515 void helper_raise_exception(int exception_index)
4516 {
4517 raise_exception(exception_index);
4518 }
4519
4520 void helper_cli(void)
4521 {
4522 env->eflags &= ~IF_MASK;
4523 }
4524
4525 void helper_sti(void)
4526 {
4527 env->eflags |= IF_MASK;
4528 }
4529
4530 #if 0
4531 /* vm86plus instructions */
4532 void helper_cli_vm(void)
4533 {
4534 env->eflags &= ~VIF_MASK;
4535 }
4536
4537 void helper_sti_vm(void)
4538 {
4539 env->eflags |= VIF_MASK;
4540 if (env->eflags & VIP_MASK) {
4541 raise_exception(EXCP0D_GPF);
4542 }
4543 }
4544 #endif
4545
4546 void helper_set_inhibit_irq(void)
4547 {
4548 env->hflags |= HF_INHIBIT_IRQ_MASK;
4549 }
4550
4551 void helper_reset_inhibit_irq(void)
4552 {
4553 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4554 }
4555
4556 void helper_boundw(void)
4557 {
4558 int low, high, v;
4559 low = ldsw(A0);
4560 high = ldsw(A0 + 2);
4561 v = (int16_t)T0;
4562 if (v < low || v > high) {
4563 raise_exception(EXCP05_BOUND);
4564 }
4565 FORCE_RET();
4566 }
4567
4568 void helper_boundl(void)
4569 {
4570 int low, high, v;
4571 low = ldl(A0);
4572 high = ldl(A0 + 4);
4573 v = T0;
4574 if (v < low || v > high) {
4575 raise_exception(EXCP05_BOUND);
4576 }
4577 FORCE_RET();
4578 }
4579
4580 static float approx_rsqrt(float a)
4581 {
4582 return 1.0 / sqrt(a);
4583 }
4584
4585 static float approx_rcp(float a)
4586 {
4587 return 1.0 / a;
4588 }
4589
4590 #if !defined(CONFIG_USER_ONLY)
4591
4592 #define MMUSUFFIX _mmu
4593 #ifdef __s390__
4594 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
4595 #else
4596 # define GETPC() (__builtin_return_address(0))
4597 #endif
4598
4599 #define SHIFT 0
4600 #include "softmmu_template.h"
4601
4602 #define SHIFT 1
4603 #include "softmmu_template.h"
4604
4605 #define SHIFT 2
4606 #include "softmmu_template.h"
4607
4608 #define SHIFT 3
4609 #include "softmmu_template.h"
4610
4611 #endif
4612
4613 /* try to fill the TLB and return an exception if error. If retaddr is
4614 NULL, it means that the function was called in C code (i.e. not
4615 from generated code or from helper.c) */
4616 /* XXX: fix it to restore all registers */
4617 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4618 {
4619 TranslationBlock *tb;
4620 int ret;
4621 unsigned long pc;
4622 CPUX86State *saved_env;
4623
4624 /* XXX: hack to restore env in all cases, even if not called from
4625 generated code */
4626 saved_env = env;
4627 env = cpu_single_env;
4628
4629 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4630 if (ret) {
4631 if (retaddr) {
4632 /* now we have a real cpu fault */
4633 pc = (unsigned long)retaddr;
4634 tb = tb_find_pc(pc);
4635 if (tb) {
4636 /* the PC is inside the translated code. It means that we have
4637 a virtual CPU fault */
4638 cpu_restore_state(tb, env, pc, NULL);
4639 }
4640 }
4641 if (retaddr)
4642 raise_exception_err(env->exception_index, env->error_code);
4643 else
4644 raise_exception_err_norestore(env->exception_index, env->error_code);
4645 }
4646 env = saved_env;
4647 }
4648
4649
4650 /* Secure Virtual Machine helpers */
4651
4652 void helper_stgi(void)
4653 {
4654 env->hflags |= HF_GIF_MASK;
4655 }
4656
4657 void helper_clgi(void)
4658 {
4659 env->hflags &= ~HF_GIF_MASK;
4660 }
4661
4662 #if defined(CONFIG_USER_ONLY)
4663
4664 void helper_vmrun(void) { }
4665 void helper_vmmcall(void) { }
4666 void helper_vmload(void) { }
4667 void helper_vmsave(void) { }
4668 void helper_skinit(void) { }
4669 void helper_invlpga(void) { }
4670 void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
4671 int svm_check_intercept_param(uint32_t type, uint64_t param)
4672 {
4673 return 0;
4674 }
4675
4676 #else
4677
4678 static inline uint32_t
4679 vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4680 {
4681 return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
4682 | ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
4683 | ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
4684 | (vmcb_base & 0xff000000) /* Base 31-24 */
4685 | (vmcb_limit & 0xf0000); /* Limit 19-16 */
4686 }
4687
4688 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4689 {
4690 return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
4691 | ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
4692 }
4693
4694 void helper_vmrun(void)
4695 {
4696 target_ulong addr;
4697 uint32_t event_inj;
4698 uint32_t int_ctl;
4699
4700 addr = EAX;
4701 if (loglevel & CPU_LOG_TB_IN_ASM)
4702 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4703
4704 env->vm_vmcb = addr;
4705 regs_to_env();
4706
4707 /* save the current CPU state in the hsave page */
4708 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4709 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4710
4711 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4712 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4713
4714 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4715 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4716 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4717 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4718 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4719 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4720 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4721
4722 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4723 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4724
4725 SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4726 SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4727 SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4728 SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4729
4730 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4731 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4732 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4733
4734 /* load the interception bitmaps so we do not need to access the
4735 vmcb in svm mode */
4736 /* We shift all the intercept bits so we can OR them with the TB
4737 flags later on */
4738 env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4739 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4740 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4741 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4742 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4743 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4744
4745 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4746 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4747
4748 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4749 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4750
4751 /* clear exit_info_2 so we behave like the real hardware */
4752 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4753
4754 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4755 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4756 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4757 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4758 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4759 if (int_ctl & V_INTR_MASKING_MASK) {
4760 env->cr[8] = int_ctl & V_TPR_MASK;
4761 cpu_set_apic_tpr(env, env->cr[8]);
4762 if (env->eflags & IF_MASK)
4763 env->hflags |= HF_HIF_MASK;
4764 }
4765
4766 #ifdef TARGET_X86_64
4767 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4768 env->hflags &= ~HF_LMA_MASK;
4769 if (env->efer & MSR_EFER_LMA)
4770 env->hflags |= HF_LMA_MASK;
4771 #endif
4772 env->eflags = 0;
4773 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4774 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4775 CC_OP = CC_OP_EFLAGS;
4776 CC_DST = 0xffffffff;
4777
4778 SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4779 SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4780 SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4781 SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4782
4783 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4784 env->eip = EIP;
4785 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4786 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4787 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4788 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4789 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4790
4791 /* FIXME: guest state consistency checks */
4792
4793 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4794 case TLB_CONTROL_DO_NOTHING:
4795 break;
4796 case TLB_CONTROL_FLUSH_ALL_ASID:
4797 /* FIXME: this is not 100% correct but should work for now */
4798 tlb_flush(env, 1);
4799 break;
4800 }
4801
4802 helper_stgi();
4803
4804 regs_to_env();
4805
4806 /* maybe we need to inject an event */
4807 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4808 if (event_inj & SVM_EVTINJ_VALID) {
4809 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4810 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4811 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4812 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4813
4814 if (loglevel & CPU_LOG_TB_IN_ASM)
4815 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4816 /* FIXME: need to implement valid_err */
4817 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4818 case SVM_EVTINJ_TYPE_INTR:
4819 env->exception_index = vector;
4820 env->error_code = event_inj_err;
4821 env->exception_is_int = 0;
4822 env->exception_next_eip = -1;
4823 if (loglevel & CPU_LOG_TB_IN_ASM)
4824 fprintf(logfile, "INTR");
4825 break;
4826 case SVM_EVTINJ_TYPE_NMI:
4827 env->exception_index = vector;
4828 env->error_code = event_inj_err;
4829 env->exception_is_int = 0;
4830 env->exception_next_eip = EIP;
4831 if (loglevel & CPU_LOG_TB_IN_ASM)
4832 fprintf(logfile, "NMI");
4833 break;
4834 case SVM_EVTINJ_TYPE_EXEPT:
4835 env->exception_index = vector;
4836 env->error_code = event_inj_err;
4837 env->exception_is_int = 0;
4838 env->exception_next_eip = -1;
4839 if (loglevel & CPU_LOG_TB_IN_ASM)
4840 fprintf(logfile, "EXEPT");
4841 break;
4842 case SVM_EVTINJ_TYPE_SOFT:
4843 env->exception_index = vector;
4844 env->error_code = event_inj_err;
4845 env->exception_is_int = 1;
4846 env->exception_next_eip = EIP;
4847 if (loglevel & CPU_LOG_TB_IN_ASM)
4848 fprintf(logfile, "SOFT");
4849 break;
4850 }
4851 if (loglevel & CPU_LOG_TB_IN_ASM)
4852 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4853 }
4854 if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4855 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4856 }
4857
4858 cpu_loop_exit();
4859 }
4860
4861 void helper_vmmcall(void)
4862 {
4863 if (loglevel & CPU_LOG_TB_IN_ASM)
4864 fprintf(logfile,"vmmcall!\n");
4865 }
4866
4867 void helper_vmload(void)
4868 {
4869 target_ulong addr;
4870 addr = EAX;
4871 if (loglevel & CPU_LOG_TB_IN_ASM)
4872 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4873 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4874 env->segs[R_FS].base);
4875
4876 SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4877 SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4878 SVM_LOAD_SEG2(addr, tr, tr);
4879 SVM_LOAD_SEG2(addr, ldt, ldtr);
4880
4881 #ifdef TARGET_X86_64
4882 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4883 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4884 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4885 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4886 #endif
4887 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4888 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4889 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4890 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4891 }
4892
4893 void helper_vmsave(void)
4894 {
4895 target_ulong addr;
4896 addr = EAX;
4897 if (loglevel & CPU_LOG_TB_IN_ASM)
4898 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4899 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4900 env->segs[R_FS].base);
4901
4902 SVM_SAVE_SEG(addr, segs[R_FS], fs);
4903 SVM_SAVE_SEG(addr, segs[R_GS], gs);
4904 SVM_SAVE_SEG(addr, tr, tr);
4905 SVM_SAVE_SEG(addr, ldt, ldtr);
4906
4907 #ifdef TARGET_X86_64
4908 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4909 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4910 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4911 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4912 #endif
4913 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4914 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4915 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4916 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4917 }
4918
4919 void helper_skinit(void)
4920 {
4921 if (loglevel & CPU_LOG_TB_IN_ASM)
4922 fprintf(logfile,"skinit!\n");
4923 }
4924
4925 void helper_invlpga(void)
4926 {
4927 tlb_flush(env, 0);
4928 }
4929
4930 int svm_check_intercept_param(uint32_t type, uint64_t param)
4931 {
4932 switch(type) {
4933 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4934 if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4935 vmexit(type, param);
4936 return 1;
4937 }
4938 break;
4939 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4940 if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4941 vmexit(type, param);
4942 return 1;
4943 }
4944 break;
4945 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4946 if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4947 vmexit(type, param);
4948 return 1;
4949 }
4950 break;
4951 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4952 if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4953 vmexit(type, param);
4954 return 1;
4955 }
4956 break;
4957 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4958 if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4959 vmexit(type, param);
4960 return 1;
4961 }
4962 break;
4963 case SVM_EXIT_IOIO:
4964 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4965 /* FIXME: this should be read in at vmrun (faster this way?) */
4966 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4967 uint16_t port = (uint16_t) (param >> 16);
4968
4969 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4970 if(lduw_phys(addr + port / 8) & (mask << (port & 7)))
4971 vmexit(type, param);
4972 }
4973 break;
4974
4975 case SVM_EXIT_MSR:
4976 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4977 /* FIXME: this should be read in at vmrun (faster this way?) */
4978 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4979 switch((uint32_t)ECX) {
4980 case 0 ... 0x1fff:
4981 T0 = (ECX * 2) % 8;
4982 T1 = ECX / 8;
4983 break;
4984 case 0xc0000000 ... 0xc0001fff:
4985 T0 = (8192 + ECX - 0xc0000000) * 2;
4986 T1 = (T0 / 8);
4987 T0 %= 8;
4988 break;
4989 case 0xc0010000 ... 0xc0011fff:
4990 T0 = (16384 + ECX - 0xc0010000) * 2;
4991 T1 = (T0 / 8);
4992 T0 %= 8;
4993 break;
4994 default:
4995 vmexit(type, param);
4996 return 1;
4997 }
4998 if (ldub_phys(addr + T1) & ((1 << param) << T0))
4999 vmexit(type, param);
5000 return 1;
5001 }
5002 break;
5003 default:
5004 if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
5005 vmexit(type, param);
5006 return 1;
5007 }
5008 break;
5009 }
5010 return 0;
5011 }
5012
5013 void vmexit(uint64_t exit_code, uint64_t exit_info_1)
5014 {
5015 uint32_t int_ctl;
5016
5017 if (loglevel & CPU_LOG_TB_IN_ASM)
5018 fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5019 exit_code, exit_info_1,
5020 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5021 EIP);
5022
5023 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5024 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5025 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5026 } else {
5027 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5028 }
5029
5030 /* Save the VM state in the vmcb */
5031 SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5032 SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5033 SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5034 SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5035
5036 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5037 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5038
5039 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5040 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5041
5042 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5043 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5044 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5045 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5046 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5047
5048 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5049 int_ctl &= ~V_TPR_MASK;
5050 int_ctl |= env->cr[8] & V_TPR_MASK;
5051 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5052 }
5053
5054 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5055 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5056 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5057 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5058 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5059 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5060 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5061
5062 /* Reload the host state from vm_hsave */
5063 env->hflags &= ~HF_HIF_MASK;
5064 env->intercept = 0;
5065 env->intercept_exceptions = 0;
5066 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5067
5068 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5069 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5070
5071 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5072 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5073
5074 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5075 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5076 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5077 if (int_ctl & V_INTR_MASKING_MASK) {
5078 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5079 cpu_set_apic_tpr(env, env->cr[8]);
5080 }
5081 /* we need to set the efer after the crs so the hidden flags get set properly */
5082 #ifdef TARGET_X86_64
5083 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5084 env->hflags &= ~HF_LMA_MASK;
5085 if (env->efer & MSR_EFER_LMA)
5086 env->hflags |= HF_LMA_MASK;
5087 #endif
5088
5089 env->eflags = 0;
5090 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5091 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5092 CC_OP = CC_OP_EFLAGS;
5093
5094 SVM_LOAD_SEG(env->vm_hsave, ES, es);
5095 SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5096 SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5097 SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5098
5099 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5100 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5101 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5102
5103 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5104 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5105
5106 /* other setups */
5107 cpu_x86_set_cpl(env, 0);
5108 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
5109 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5110 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5111
5112 helper_clgi();
5113 /* FIXME: Resets the current ASID register to zero (host ASID). */
5114
5115 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5116
5117 /* Clears the TSC_OFFSET inside the processor. */
5118
5119 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5120 from the page table indicated the host's CR3. If the PDPEs contain
5121 illegal state, the processor causes a shutdown. */
5122
5123 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5124 env->cr[0] |= CR0_PE_MASK;
5125 env->eflags &= ~VM_MASK;
5126
5127 /* Disables all breakpoints in the host DR7 register. */
5128
5129 /* Checks the reloaded host state for consistency. */
5130
5131 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5132 host's code segment or non-canonical (in the case of long mode), a
5133 #GP fault is delivered inside the host.) */
5134
5135 /* remove any pending exception */
5136 env->exception_index = -1;
5137 env->error_code = 0;
5138 env->old_exception = -1;
5139
5140 regs_to_env();
5141 cpu_loop_exit();
5142 }
5143
5144 #endif
5145
5146 /* MMX/SSE */
5147 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5148 void helper_enter_mmx(void)
5149 {
5150 env->fpstt = 0;
5151 *(uint32_t *)(env->fptags) = 0;
5152 *(uint32_t *)(env->fptags + 4) = 0;
5153 }
5154
5155 void helper_emms(void)
5156 {
5157 /* set to empty state */
5158 *(uint32_t *)(env->fptags) = 0x01010101;
5159 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5160 }
5161
5162 /* XXX: suppress */
5163 void helper_movq(uint64_t *d, uint64_t *s)
5164 {
5165 *d = *s;
5166 }
5167
5168 #define SHIFT 0
5169 #include "ops_sse.h"
5170
5171 #define SHIFT 1
5172 #include "ops_sse.h"
5173