]> git.proxmox.com Git - qemu.git/blob - target-i386/op_helper.c
target-i386: remove old code handling float64
[qemu.git] / target-i386 / op_helper.c
1 /*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <math.h>
21 #include "exec.h"
22 #include "exec-all.h"
23 #include "host-utils.h"
24 #include "ioport.h"
25
26 //#define DEBUG_PCALL
27
28
29 #ifdef DEBUG_PCALL
30 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31 # define LOG_PCALL_STATE(env) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33 #else
34 # define LOG_PCALL(...) do { } while (0)
35 # define LOG_PCALL_STATE(env) do { } while (0)
36 #endif
37
38
39 #if 0
40 #define raise_exception_err(a, b)\
41 do {\
42 qemu_log("raise_exception line=%d\n", __LINE__);\
43 (raise_exception_err)(a, b);\
44 } while (0)
45 #endif
46
47 static const uint8_t parity_table[256] = {
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 };
81
82 /* modulo 17 table */
83 static const uint8_t rclw_table[32] = {
84 0, 1, 2, 3, 4, 5, 6, 7,
85 8, 9,10,11,12,13,14,15,
86 16, 0, 1, 2, 3, 4, 5, 6,
87 7, 8, 9,10,11,12,13,14,
88 };
89
90 /* modulo 9 table */
91 static const uint8_t rclb_table[32] = {
92 0, 1, 2, 3, 4, 5, 6, 7,
93 8, 0, 1, 2, 3, 4, 5, 6,
94 7, 8, 0, 1, 2, 3, 4, 5,
95 6, 7, 8, 0, 1, 2, 3, 4,
96 };
97
98 #define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
99 #define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
100 #define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
101
102 static const floatx80 f15rk[7] =
103 {
104 floatx80_zero,
105 floatx80_one,
106 floatx80_pi,
107 floatx80_lg2,
108 floatx80_ln2,
109 floatx80_l2e,
110 floatx80_l2t,
111 };
112
113 /* broken thread support */
114
115 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
116
117 void helper_lock(void)
118 {
119 spin_lock(&global_cpu_lock);
120 }
121
122 void helper_unlock(void)
123 {
124 spin_unlock(&global_cpu_lock);
125 }
126
127 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
128 {
129 load_eflags(t0, update_mask);
130 }
131
132 target_ulong helper_read_eflags(void)
133 {
134 uint32_t eflags;
135 eflags = helper_cc_compute_all(CC_OP);
136 eflags |= (DF & DF_MASK);
137 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
138 return eflags;
139 }
140
141 /* return non zero if error */
142 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
143 int selector)
144 {
145 SegmentCache *dt;
146 int index;
147 target_ulong ptr;
148
149 if (selector & 0x4)
150 dt = &env->ldt;
151 else
152 dt = &env->gdt;
153 index = selector & ~7;
154 if ((index + 7) > dt->limit)
155 return -1;
156 ptr = dt->base + index;
157 *e1_ptr = ldl_kernel(ptr);
158 *e2_ptr = ldl_kernel(ptr + 4);
159 return 0;
160 }
161
162 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
163 {
164 unsigned int limit;
165 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
166 if (e2 & DESC_G_MASK)
167 limit = (limit << 12) | 0xfff;
168 return limit;
169 }
170
171 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
172 {
173 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
174 }
175
176 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
177 {
178 sc->base = get_seg_base(e1, e2);
179 sc->limit = get_seg_limit(e1, e2);
180 sc->flags = e2;
181 }
182
183 /* init the segment cache in vm86 mode. */
184 static inline void load_seg_vm(int seg, int selector)
185 {
186 selector &= 0xffff;
187 cpu_x86_load_seg_cache(env, seg, selector,
188 (selector << 4), 0xffff, 0);
189 }
190
191 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
192 uint32_t *esp_ptr, int dpl)
193 {
194 int type, index, shift;
195
196 #if 0
197 {
198 int i;
199 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
200 for(i=0;i<env->tr.limit;i++) {
201 printf("%02x ", env->tr.base[i]);
202 if ((i & 7) == 7) printf("\n");
203 }
204 printf("\n");
205 }
206 #endif
207
208 if (!(env->tr.flags & DESC_P_MASK))
209 cpu_abort(env, "invalid tss");
210 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
211 if ((type & 7) != 1)
212 cpu_abort(env, "invalid tss type");
213 shift = type >> 3;
214 index = (dpl * 4 + 2) << shift;
215 if (index + (4 << shift) - 1 > env->tr.limit)
216 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
217 if (shift == 0) {
218 *esp_ptr = lduw_kernel(env->tr.base + index);
219 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
220 } else {
221 *esp_ptr = ldl_kernel(env->tr.base + index);
222 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
223 }
224 }
225
226 /* XXX: merge with load_seg() */
227 static void tss_load_seg(int seg_reg, int selector)
228 {
229 uint32_t e1, e2;
230 int rpl, dpl, cpl;
231
232 if ((selector & 0xfffc) != 0) {
233 if (load_segment(&e1, &e2, selector) != 0)
234 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235 if (!(e2 & DESC_S_MASK))
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237 rpl = selector & 3;
238 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
239 cpl = env->hflags & HF_CPL_MASK;
240 if (seg_reg == R_CS) {
241 if (!(e2 & DESC_CS_MASK))
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 /* XXX: is it correct ? */
244 if (dpl != rpl)
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246 if ((e2 & DESC_C_MASK) && dpl > rpl)
247 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248 } else if (seg_reg == R_SS) {
249 /* SS must be writable data */
250 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252 if (dpl != cpl || dpl != rpl)
253 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
254 } else {
255 /* not readable code */
256 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
257 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
258 /* if data or non conforming code, checks the rights */
259 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
260 if (dpl < cpl || dpl < rpl)
261 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
262 }
263 }
264 if (!(e2 & DESC_P_MASK))
265 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
266 cpu_x86_load_seg_cache(env, seg_reg, selector,
267 get_seg_base(e1, e2),
268 get_seg_limit(e1, e2),
269 e2);
270 } else {
271 if (seg_reg == R_SS || seg_reg == R_CS)
272 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
273 }
274 }
275
276 #define SWITCH_TSS_JMP 0
277 #define SWITCH_TSS_IRET 1
278 #define SWITCH_TSS_CALL 2
279
280 /* XXX: restore CPU state in registers (PowerPC case) */
281 static void switch_tss(int tss_selector,
282 uint32_t e1, uint32_t e2, int source,
283 uint32_t next_eip)
284 {
285 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
286 target_ulong tss_base;
287 uint32_t new_regs[8], new_segs[6];
288 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
289 uint32_t old_eflags, eflags_mask;
290 SegmentCache *dt;
291 int index;
292 target_ulong ptr;
293
294 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
295 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
296
297 /* if task gate, we read the TSS segment and we load it */
298 if (type == 5) {
299 if (!(e2 & DESC_P_MASK))
300 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
301 tss_selector = e1 >> 16;
302 if (tss_selector & 4)
303 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
304 if (load_segment(&e1, &e2, tss_selector) != 0)
305 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306 if (e2 & DESC_S_MASK)
307 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
308 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
309 if ((type & 7) != 1)
310 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
311 }
312
313 if (!(e2 & DESC_P_MASK))
314 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
315
316 if (type & 8)
317 tss_limit_max = 103;
318 else
319 tss_limit_max = 43;
320 tss_limit = get_seg_limit(e1, e2);
321 tss_base = get_seg_base(e1, e2);
322 if ((tss_selector & 4) != 0 ||
323 tss_limit < tss_limit_max)
324 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
325 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
326 if (old_type & 8)
327 old_tss_limit_max = 103;
328 else
329 old_tss_limit_max = 43;
330
331 /* read all the registers from the new TSS */
332 if (type & 8) {
333 /* 32 bit */
334 new_cr3 = ldl_kernel(tss_base + 0x1c);
335 new_eip = ldl_kernel(tss_base + 0x20);
336 new_eflags = ldl_kernel(tss_base + 0x24);
337 for(i = 0; i < 8; i++)
338 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
339 for(i = 0; i < 6; i++)
340 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
341 new_ldt = lduw_kernel(tss_base + 0x60);
342 new_trap = ldl_kernel(tss_base + 0x64);
343 } else {
344 /* 16 bit */
345 new_cr3 = 0;
346 new_eip = lduw_kernel(tss_base + 0x0e);
347 new_eflags = lduw_kernel(tss_base + 0x10);
348 for(i = 0; i < 8; i++)
349 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
350 for(i = 0; i < 4; i++)
351 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
352 new_ldt = lduw_kernel(tss_base + 0x2a);
353 new_segs[R_FS] = 0;
354 new_segs[R_GS] = 0;
355 new_trap = 0;
356 }
357 /* XXX: avoid a compiler warning, see
358 http://support.amd.com/us/Processor_TechDocs/24593.pdf
359 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
360 (void)new_trap;
361
362 /* NOTE: we must avoid memory exceptions during the task switch,
363 so we make dummy accesses before */
364 /* XXX: it can still fail in some cases, so a bigger hack is
365 necessary to valid the TLB after having done the accesses */
366
367 v1 = ldub_kernel(env->tr.base);
368 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
369 stb_kernel(env->tr.base, v1);
370 stb_kernel(env->tr.base + old_tss_limit_max, v2);
371
372 /* clear busy bit (it is restartable) */
373 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
374 target_ulong ptr;
375 uint32_t e2;
376 ptr = env->gdt.base + (env->tr.selector & ~7);
377 e2 = ldl_kernel(ptr + 4);
378 e2 &= ~DESC_TSS_BUSY_MASK;
379 stl_kernel(ptr + 4, e2);
380 }
381 old_eflags = compute_eflags();
382 if (source == SWITCH_TSS_IRET)
383 old_eflags &= ~NT_MASK;
384
385 /* save the current state in the old TSS */
386 if (type & 8) {
387 /* 32 bit */
388 stl_kernel(env->tr.base + 0x20, next_eip);
389 stl_kernel(env->tr.base + 0x24, old_eflags);
390 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
391 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
392 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
393 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
394 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
395 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
396 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
397 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
398 for(i = 0; i < 6; i++)
399 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
400 } else {
401 /* 16 bit */
402 stw_kernel(env->tr.base + 0x0e, next_eip);
403 stw_kernel(env->tr.base + 0x10, old_eflags);
404 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
405 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
406 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
407 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
408 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
409 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
410 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
411 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
412 for(i = 0; i < 4; i++)
413 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
414 }
415
416 /* now if an exception occurs, it will occurs in the next task
417 context */
418
419 if (source == SWITCH_TSS_CALL) {
420 stw_kernel(tss_base, env->tr.selector);
421 new_eflags |= NT_MASK;
422 }
423
424 /* set busy bit */
425 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
426 target_ulong ptr;
427 uint32_t e2;
428 ptr = env->gdt.base + (tss_selector & ~7);
429 e2 = ldl_kernel(ptr + 4);
430 e2 |= DESC_TSS_BUSY_MASK;
431 stl_kernel(ptr + 4, e2);
432 }
433
434 /* set the new CPU state */
435 /* from this point, any exception which occurs can give problems */
436 env->cr[0] |= CR0_TS_MASK;
437 env->hflags |= HF_TS_MASK;
438 env->tr.selector = tss_selector;
439 env->tr.base = tss_base;
440 env->tr.limit = tss_limit;
441 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
442
443 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
444 cpu_x86_update_cr3(env, new_cr3);
445 }
446
447 /* load all registers without an exception, then reload them with
448 possible exception */
449 env->eip = new_eip;
450 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
451 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
452 if (!(type & 8))
453 eflags_mask &= 0xffff;
454 load_eflags(new_eflags, eflags_mask);
455 /* XXX: what to do in 16 bit case ? */
456 EAX = new_regs[0];
457 ECX = new_regs[1];
458 EDX = new_regs[2];
459 EBX = new_regs[3];
460 ESP = new_regs[4];
461 EBP = new_regs[5];
462 ESI = new_regs[6];
463 EDI = new_regs[7];
464 if (new_eflags & VM_MASK) {
465 for(i = 0; i < 6; i++)
466 load_seg_vm(i, new_segs[i]);
467 /* in vm86, CPL is always 3 */
468 cpu_x86_set_cpl(env, 3);
469 } else {
470 /* CPL is set the RPL of CS */
471 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
472 /* first just selectors as the rest may trigger exceptions */
473 for(i = 0; i < 6; i++)
474 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
475 }
476
477 env->ldt.selector = new_ldt & ~4;
478 env->ldt.base = 0;
479 env->ldt.limit = 0;
480 env->ldt.flags = 0;
481
482 /* load the LDT */
483 if (new_ldt & 4)
484 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
485
486 if ((new_ldt & 0xfffc) != 0) {
487 dt = &env->gdt;
488 index = new_ldt & ~7;
489 if ((index + 7) > dt->limit)
490 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
491 ptr = dt->base + index;
492 e1 = ldl_kernel(ptr);
493 e2 = ldl_kernel(ptr + 4);
494 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
495 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
496 if (!(e2 & DESC_P_MASK))
497 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
498 load_seg_cache_raw_dt(&env->ldt, e1, e2);
499 }
500
501 /* load the segments */
502 if (!(new_eflags & VM_MASK)) {
503 tss_load_seg(R_CS, new_segs[R_CS]);
504 tss_load_seg(R_SS, new_segs[R_SS]);
505 tss_load_seg(R_ES, new_segs[R_ES]);
506 tss_load_seg(R_DS, new_segs[R_DS]);
507 tss_load_seg(R_FS, new_segs[R_FS]);
508 tss_load_seg(R_GS, new_segs[R_GS]);
509 }
510
511 /* check that EIP is in the CS segment limits */
512 if (new_eip > env->segs[R_CS].limit) {
513 /* XXX: different exception if CALL ? */
514 raise_exception_err(EXCP0D_GPF, 0);
515 }
516
517 #ifndef CONFIG_USER_ONLY
518 /* reset local breakpoints */
519 if (env->dr[7] & 0x55) {
520 for (i = 0; i < 4; i++) {
521 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
522 hw_breakpoint_remove(env, i);
523 }
524 env->dr[7] &= ~0x55;
525 }
526 #endif
527 }
528
529 /* check if Port I/O is allowed in TSS */
530 static inline void check_io(int addr, int size)
531 {
532 int io_offset, val, mask;
533
534 /* TSS must be a valid 32 bit one */
535 if (!(env->tr.flags & DESC_P_MASK) ||
536 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
537 env->tr.limit < 103)
538 goto fail;
539 io_offset = lduw_kernel(env->tr.base + 0x66);
540 io_offset += (addr >> 3);
541 /* Note: the check needs two bytes */
542 if ((io_offset + 1) > env->tr.limit)
543 goto fail;
544 val = lduw_kernel(env->tr.base + io_offset);
545 val >>= (addr & 7);
546 mask = (1 << size) - 1;
547 /* all bits must be zero to allow the I/O */
548 if ((val & mask) != 0) {
549 fail:
550 raise_exception_err(EXCP0D_GPF, 0);
551 }
552 }
553
554 void helper_check_iob(uint32_t t0)
555 {
556 check_io(t0, 1);
557 }
558
559 void helper_check_iow(uint32_t t0)
560 {
561 check_io(t0, 2);
562 }
563
564 void helper_check_iol(uint32_t t0)
565 {
566 check_io(t0, 4);
567 }
568
569 void helper_outb(uint32_t port, uint32_t data)
570 {
571 cpu_outb(port, data & 0xff);
572 }
573
574 target_ulong helper_inb(uint32_t port)
575 {
576 return cpu_inb(port);
577 }
578
579 void helper_outw(uint32_t port, uint32_t data)
580 {
581 cpu_outw(port, data & 0xffff);
582 }
583
584 target_ulong helper_inw(uint32_t port)
585 {
586 return cpu_inw(port);
587 }
588
589 void helper_outl(uint32_t port, uint32_t data)
590 {
591 cpu_outl(port, data);
592 }
593
594 target_ulong helper_inl(uint32_t port)
595 {
596 return cpu_inl(port);
597 }
598
599 static inline unsigned int get_sp_mask(unsigned int e2)
600 {
601 if (e2 & DESC_B_MASK)
602 return 0xffffffff;
603 else
604 return 0xffff;
605 }
606
607 static int exeption_has_error_code(int intno)
608 {
609 switch(intno) {
610 case 8:
611 case 10:
612 case 11:
613 case 12:
614 case 13:
615 case 14:
616 case 17:
617 return 1;
618 }
619 return 0;
620 }
621
622 #ifdef TARGET_X86_64
623 #define SET_ESP(val, sp_mask)\
624 do {\
625 if ((sp_mask) == 0xffff)\
626 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
627 else if ((sp_mask) == 0xffffffffLL)\
628 ESP = (uint32_t)(val);\
629 else\
630 ESP = (val);\
631 } while (0)
632 #else
633 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
634 #endif
635
636 /* in 64-bit machines, this can overflow. So this segment addition macro
637 * can be used to trim the value to 32-bit whenever needed */
638 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
639
640 /* XXX: add a is_user flag to have proper security support */
641 #define PUSHW(ssp, sp, sp_mask, val)\
642 {\
643 sp -= 2;\
644 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
645 }
646
647 #define PUSHL(ssp, sp, sp_mask, val)\
648 {\
649 sp -= 4;\
650 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
651 }
652
653 #define POPW(ssp, sp, sp_mask, val)\
654 {\
655 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
656 sp += 2;\
657 }
658
659 #define POPL(ssp, sp, sp_mask, val)\
660 {\
661 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
662 sp += 4;\
663 }
664
665 /* protected mode interrupt */
666 static void do_interrupt_protected(int intno, int is_int, int error_code,
667 unsigned int next_eip, int is_hw)
668 {
669 SegmentCache *dt;
670 target_ulong ptr, ssp;
671 int type, dpl, selector, ss_dpl, cpl;
672 int has_error_code, new_stack, shift;
673 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
674 uint32_t old_eip, sp_mask;
675
676 has_error_code = 0;
677 if (!is_int && !is_hw)
678 has_error_code = exeption_has_error_code(intno);
679 if (is_int)
680 old_eip = next_eip;
681 else
682 old_eip = env->eip;
683
684 dt = &env->idt;
685 if (intno * 8 + 7 > dt->limit)
686 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
687 ptr = dt->base + intno * 8;
688 e1 = ldl_kernel(ptr);
689 e2 = ldl_kernel(ptr + 4);
690 /* check gate type */
691 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
692 switch(type) {
693 case 5: /* task gate */
694 /* must do that check here to return the correct error code */
695 if (!(e2 & DESC_P_MASK))
696 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
697 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
698 if (has_error_code) {
699 int type;
700 uint32_t mask;
701 /* push the error code */
702 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
703 shift = type >> 3;
704 if (env->segs[R_SS].flags & DESC_B_MASK)
705 mask = 0xffffffff;
706 else
707 mask = 0xffff;
708 esp = (ESP - (2 << shift)) & mask;
709 ssp = env->segs[R_SS].base + esp;
710 if (shift)
711 stl_kernel(ssp, error_code);
712 else
713 stw_kernel(ssp, error_code);
714 SET_ESP(esp, mask);
715 }
716 return;
717 case 6: /* 286 interrupt gate */
718 case 7: /* 286 trap gate */
719 case 14: /* 386 interrupt gate */
720 case 15: /* 386 trap gate */
721 break;
722 default:
723 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
724 break;
725 }
726 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
727 cpl = env->hflags & HF_CPL_MASK;
728 /* check privilege if software int */
729 if (is_int && dpl < cpl)
730 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
731 /* check valid bit */
732 if (!(e2 & DESC_P_MASK))
733 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
734 selector = e1 >> 16;
735 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
736 if ((selector & 0xfffc) == 0)
737 raise_exception_err(EXCP0D_GPF, 0);
738
739 if (load_segment(&e1, &e2, selector) != 0)
740 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
742 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
743 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
744 if (dpl > cpl)
745 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
746 if (!(e2 & DESC_P_MASK))
747 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
748 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
749 /* to inner privilege */
750 get_ss_esp_from_tss(&ss, &esp, dpl);
751 if ((ss & 0xfffc) == 0)
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753 if ((ss & 3) != dpl)
754 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
756 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
757 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
758 if (ss_dpl != dpl)
759 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
760 if (!(ss_e2 & DESC_S_MASK) ||
761 (ss_e2 & DESC_CS_MASK) ||
762 !(ss_e2 & DESC_W_MASK))
763 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
764 if (!(ss_e2 & DESC_P_MASK))
765 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
766 new_stack = 1;
767 sp_mask = get_sp_mask(ss_e2);
768 ssp = get_seg_base(ss_e1, ss_e2);
769 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
770 /* to same privilege */
771 if (env->eflags & VM_MASK)
772 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
773 new_stack = 0;
774 sp_mask = get_sp_mask(env->segs[R_SS].flags);
775 ssp = env->segs[R_SS].base;
776 esp = ESP;
777 dpl = cpl;
778 } else {
779 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
780 new_stack = 0; /* avoid warning */
781 sp_mask = 0; /* avoid warning */
782 ssp = 0; /* avoid warning */
783 esp = 0; /* avoid warning */
784 }
785
786 shift = type >> 3;
787
788 #if 0
789 /* XXX: check that enough room is available */
790 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
791 if (env->eflags & VM_MASK)
792 push_size += 8;
793 push_size <<= shift;
794 #endif
795 if (shift == 1) {
796 if (new_stack) {
797 if (env->eflags & VM_MASK) {
798 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
799 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
800 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
801 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
802 }
803 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
804 PUSHL(ssp, esp, sp_mask, ESP);
805 }
806 PUSHL(ssp, esp, sp_mask, compute_eflags());
807 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
808 PUSHL(ssp, esp, sp_mask, old_eip);
809 if (has_error_code) {
810 PUSHL(ssp, esp, sp_mask, error_code);
811 }
812 } else {
813 if (new_stack) {
814 if (env->eflags & VM_MASK) {
815 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
816 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
817 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
818 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
819 }
820 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
821 PUSHW(ssp, esp, sp_mask, ESP);
822 }
823 PUSHW(ssp, esp, sp_mask, compute_eflags());
824 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
825 PUSHW(ssp, esp, sp_mask, old_eip);
826 if (has_error_code) {
827 PUSHW(ssp, esp, sp_mask, error_code);
828 }
829 }
830
831 if (new_stack) {
832 if (env->eflags & VM_MASK) {
833 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
834 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
835 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
836 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
837 }
838 ss = (ss & ~3) | dpl;
839 cpu_x86_load_seg_cache(env, R_SS, ss,
840 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
841 }
842 SET_ESP(esp, sp_mask);
843
844 selector = (selector & ~3) | dpl;
845 cpu_x86_load_seg_cache(env, R_CS, selector,
846 get_seg_base(e1, e2),
847 get_seg_limit(e1, e2),
848 e2);
849 cpu_x86_set_cpl(env, dpl);
850 env->eip = offset;
851
852 /* interrupt gate clear IF mask */
853 if ((type & 1) == 0) {
854 env->eflags &= ~IF_MASK;
855 }
856 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
857 }
858
859 #ifdef TARGET_X86_64
860
861 #define PUSHQ(sp, val)\
862 {\
863 sp -= 8;\
864 stq_kernel(sp, (val));\
865 }
866
867 #define POPQ(sp, val)\
868 {\
869 val = ldq_kernel(sp);\
870 sp += 8;\
871 }
872
873 static inline target_ulong get_rsp_from_tss(int level)
874 {
875 int index;
876
877 #if 0
878 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
879 env->tr.base, env->tr.limit);
880 #endif
881
882 if (!(env->tr.flags & DESC_P_MASK))
883 cpu_abort(env, "invalid tss");
884 index = 8 * level + 4;
885 if ((index + 7) > env->tr.limit)
886 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
887 return ldq_kernel(env->tr.base + index);
888 }
889
890 /* 64 bit interrupt */
891 static void do_interrupt64(int intno, int is_int, int error_code,
892 target_ulong next_eip, int is_hw)
893 {
894 SegmentCache *dt;
895 target_ulong ptr;
896 int type, dpl, selector, cpl, ist;
897 int has_error_code, new_stack;
898 uint32_t e1, e2, e3, ss;
899 target_ulong old_eip, esp, offset;
900
901 has_error_code = 0;
902 if (!is_int && !is_hw)
903 has_error_code = exeption_has_error_code(intno);
904 if (is_int)
905 old_eip = next_eip;
906 else
907 old_eip = env->eip;
908
909 dt = &env->idt;
910 if (intno * 16 + 15 > dt->limit)
911 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
912 ptr = dt->base + intno * 16;
913 e1 = ldl_kernel(ptr);
914 e2 = ldl_kernel(ptr + 4);
915 e3 = ldl_kernel(ptr + 8);
916 /* check gate type */
917 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
918 switch(type) {
919 case 14: /* 386 interrupt gate */
920 case 15: /* 386 trap gate */
921 break;
922 default:
923 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
924 break;
925 }
926 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
927 cpl = env->hflags & HF_CPL_MASK;
928 /* check privilege if software int */
929 if (is_int && dpl < cpl)
930 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
931 /* check valid bit */
932 if (!(e2 & DESC_P_MASK))
933 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
934 selector = e1 >> 16;
935 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
936 ist = e2 & 7;
937 if ((selector & 0xfffc) == 0)
938 raise_exception_err(EXCP0D_GPF, 0);
939
940 if (load_segment(&e1, &e2, selector) != 0)
941 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
943 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
944 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
945 if (dpl > cpl)
946 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
947 if (!(e2 & DESC_P_MASK))
948 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
949 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
950 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
951 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
952 /* to inner privilege */
953 if (ist != 0)
954 esp = get_rsp_from_tss(ist + 3);
955 else
956 esp = get_rsp_from_tss(dpl);
957 esp &= ~0xfLL; /* align stack */
958 ss = 0;
959 new_stack = 1;
960 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
961 /* to same privilege */
962 if (env->eflags & VM_MASK)
963 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
964 new_stack = 0;
965 if (ist != 0)
966 esp = get_rsp_from_tss(ist + 3);
967 else
968 esp = ESP;
969 esp &= ~0xfLL; /* align stack */
970 dpl = cpl;
971 } else {
972 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
973 new_stack = 0; /* avoid warning */
974 esp = 0; /* avoid warning */
975 }
976
977 PUSHQ(esp, env->segs[R_SS].selector);
978 PUSHQ(esp, ESP);
979 PUSHQ(esp, compute_eflags());
980 PUSHQ(esp, env->segs[R_CS].selector);
981 PUSHQ(esp, old_eip);
982 if (has_error_code) {
983 PUSHQ(esp, error_code);
984 }
985
986 if (new_stack) {
987 ss = 0 | dpl;
988 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
989 }
990 ESP = esp;
991
992 selector = (selector & ~3) | dpl;
993 cpu_x86_load_seg_cache(env, R_CS, selector,
994 get_seg_base(e1, e2),
995 get_seg_limit(e1, e2),
996 e2);
997 cpu_x86_set_cpl(env, dpl);
998 env->eip = offset;
999
1000 /* interrupt gate clear IF mask */
1001 if ((type & 1) == 0) {
1002 env->eflags &= ~IF_MASK;
1003 }
1004 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1005 }
1006 #endif
1007
1008 #ifdef TARGET_X86_64
1009 #if defined(CONFIG_USER_ONLY)
1010 void helper_syscall(int next_eip_addend)
1011 {
1012 env->exception_index = EXCP_SYSCALL;
1013 env->exception_next_eip = env->eip + next_eip_addend;
1014 cpu_loop_exit();
1015 }
1016 #else
1017 void helper_syscall(int next_eip_addend)
1018 {
1019 int selector;
1020
1021 if (!(env->efer & MSR_EFER_SCE)) {
1022 raise_exception_err(EXCP06_ILLOP, 0);
1023 }
1024 selector = (env->star >> 32) & 0xffff;
1025 if (env->hflags & HF_LMA_MASK) {
1026 int code64;
1027
1028 ECX = env->eip + next_eip_addend;
1029 env->regs[11] = compute_eflags();
1030
1031 code64 = env->hflags & HF_CS64_MASK;
1032
1033 cpu_x86_set_cpl(env, 0);
1034 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1035 0, 0xffffffff,
1036 DESC_G_MASK | DESC_P_MASK |
1037 DESC_S_MASK |
1038 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1039 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1040 0, 0xffffffff,
1041 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1042 DESC_S_MASK |
1043 DESC_W_MASK | DESC_A_MASK);
1044 env->eflags &= ~env->fmask;
1045 load_eflags(env->eflags, 0);
1046 if (code64)
1047 env->eip = env->lstar;
1048 else
1049 env->eip = env->cstar;
1050 } else {
1051 ECX = (uint32_t)(env->eip + next_eip_addend);
1052
1053 cpu_x86_set_cpl(env, 0);
1054 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1057 DESC_S_MASK |
1058 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1059 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1060 0, 0xffffffff,
1061 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1062 DESC_S_MASK |
1063 DESC_W_MASK | DESC_A_MASK);
1064 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1065 env->eip = (uint32_t)env->star;
1066 }
1067 }
1068 #endif
1069 #endif
1070
1071 #ifdef TARGET_X86_64
1072 void helper_sysret(int dflag)
1073 {
1074 int cpl, selector;
1075
1076 if (!(env->efer & MSR_EFER_SCE)) {
1077 raise_exception_err(EXCP06_ILLOP, 0);
1078 }
1079 cpl = env->hflags & HF_CPL_MASK;
1080 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1081 raise_exception_err(EXCP0D_GPF, 0);
1082 }
1083 selector = (env->star >> 48) & 0xffff;
1084 if (env->hflags & HF_LMA_MASK) {
1085 if (dflag == 2) {
1086 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1087 0, 0xffffffff,
1088 DESC_G_MASK | DESC_P_MASK |
1089 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1090 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1091 DESC_L_MASK);
1092 env->eip = ECX;
1093 } else {
1094 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1095 0, 0xffffffff,
1096 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1097 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1098 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1099 env->eip = (uint32_t)ECX;
1100 }
1101 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1102 0, 0xffffffff,
1103 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105 DESC_W_MASK | DESC_A_MASK);
1106 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1107 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1108 cpu_x86_set_cpl(env, 3);
1109 } else {
1110 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1111 0, 0xffffffff,
1112 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1113 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1114 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1115 env->eip = (uint32_t)ECX;
1116 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1117 0, 0xffffffff,
1118 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1119 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1120 DESC_W_MASK | DESC_A_MASK);
1121 env->eflags |= IF_MASK;
1122 cpu_x86_set_cpl(env, 3);
1123 }
1124 }
1125 #endif
1126
1127 /* real mode interrupt */
1128 static void do_interrupt_real(int intno, int is_int, int error_code,
1129 unsigned int next_eip)
1130 {
1131 SegmentCache *dt;
1132 target_ulong ptr, ssp;
1133 int selector;
1134 uint32_t offset, esp;
1135 uint32_t old_cs, old_eip;
1136
1137 /* real mode (simpler !) */
1138 dt = &env->idt;
1139 if (intno * 4 + 3 > dt->limit)
1140 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1141 ptr = dt->base + intno * 4;
1142 offset = lduw_kernel(ptr);
1143 selector = lduw_kernel(ptr + 2);
1144 esp = ESP;
1145 ssp = env->segs[R_SS].base;
1146 if (is_int)
1147 old_eip = next_eip;
1148 else
1149 old_eip = env->eip;
1150 old_cs = env->segs[R_CS].selector;
1151 /* XXX: use SS segment size ? */
1152 PUSHW(ssp, esp, 0xffff, compute_eflags());
1153 PUSHW(ssp, esp, 0xffff, old_cs);
1154 PUSHW(ssp, esp, 0xffff, old_eip);
1155
1156 /* update processor state */
1157 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1158 env->eip = offset;
1159 env->segs[R_CS].selector = selector;
1160 env->segs[R_CS].base = (selector << 4);
1161 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1162 }
1163
1164 /* fake user mode interrupt */
1165 void do_interrupt_user(int intno, int is_int, int error_code,
1166 target_ulong next_eip)
1167 {
1168 SegmentCache *dt;
1169 target_ulong ptr;
1170 int dpl, cpl, shift;
1171 uint32_t e2;
1172
1173 dt = &env->idt;
1174 if (env->hflags & HF_LMA_MASK) {
1175 shift = 4;
1176 } else {
1177 shift = 3;
1178 }
1179 ptr = dt->base + (intno << shift);
1180 e2 = ldl_kernel(ptr + 4);
1181
1182 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1183 cpl = env->hflags & HF_CPL_MASK;
1184 /* check privilege if software int */
1185 if (is_int && dpl < cpl)
1186 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1187
1188 /* Since we emulate only user space, we cannot do more than
1189 exiting the emulation with the suitable exception and error
1190 code */
1191 if (is_int)
1192 EIP = next_eip;
1193 }
1194
1195 #if !defined(CONFIG_USER_ONLY)
1196 static void handle_even_inj(int intno, int is_int, int error_code,
1197 int is_hw, int rm)
1198 {
1199 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1200 if (!(event_inj & SVM_EVTINJ_VALID)) {
1201 int type;
1202 if (is_int)
1203 type = SVM_EVTINJ_TYPE_SOFT;
1204 else
1205 type = SVM_EVTINJ_TYPE_EXEPT;
1206 event_inj = intno | type | SVM_EVTINJ_VALID;
1207 if (!rm && exeption_has_error_code(intno)) {
1208 event_inj |= SVM_EVTINJ_VALID_ERR;
1209 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1210 }
1211 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1212 }
1213 }
1214 #endif
1215
1216 /*
1217 * Begin execution of an interruption. is_int is TRUE if coming from
1218 * the int instruction. next_eip is the EIP value AFTER the interrupt
1219 * instruction. It is only relevant if is_int is TRUE.
1220 */
1221 void do_interrupt(int intno, int is_int, int error_code,
1222 target_ulong next_eip, int is_hw)
1223 {
1224 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1225 if ((env->cr[0] & CR0_PE_MASK)) {
1226 static int count;
1227 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1228 count, intno, error_code, is_int,
1229 env->hflags & HF_CPL_MASK,
1230 env->segs[R_CS].selector, EIP,
1231 (int)env->segs[R_CS].base + EIP,
1232 env->segs[R_SS].selector, ESP);
1233 if (intno == 0x0e) {
1234 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1235 } else {
1236 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1237 }
1238 qemu_log("\n");
1239 log_cpu_state(env, X86_DUMP_CCOP);
1240 #if 0
1241 {
1242 int i;
1243 target_ulong ptr;
1244 qemu_log(" code=");
1245 ptr = env->segs[R_CS].base + env->eip;
1246 for(i = 0; i < 16; i++) {
1247 qemu_log(" %02x", ldub(ptr + i));
1248 }
1249 qemu_log("\n");
1250 }
1251 #endif
1252 count++;
1253 }
1254 }
1255 if (env->cr[0] & CR0_PE_MASK) {
1256 #if !defined(CONFIG_USER_ONLY)
1257 if (env->hflags & HF_SVMI_MASK)
1258 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1259 #endif
1260 #ifdef TARGET_X86_64
1261 if (env->hflags & HF_LMA_MASK) {
1262 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1263 } else
1264 #endif
1265 {
1266 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1267 }
1268 } else {
1269 #if !defined(CONFIG_USER_ONLY)
1270 if (env->hflags & HF_SVMI_MASK)
1271 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1272 #endif
1273 do_interrupt_real(intno, is_int, error_code, next_eip);
1274 }
1275
1276 #if !defined(CONFIG_USER_ONLY)
1277 if (env->hflags & HF_SVMI_MASK) {
1278 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1279 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1280 }
1281 #endif
1282 }
1283
1284 /* This should come from sysemu.h - if we could include it here... */
1285 void qemu_system_reset_request(void);
1286
1287 /*
1288 * Check nested exceptions and change to double or triple fault if
1289 * needed. It should only be called, if this is not an interrupt.
1290 * Returns the new exception number.
1291 */
1292 static int check_exception(int intno, int *error_code)
1293 {
1294 int first_contributory = env->old_exception == 0 ||
1295 (env->old_exception >= 10 &&
1296 env->old_exception <= 13);
1297 int second_contributory = intno == 0 ||
1298 (intno >= 10 && intno <= 13);
1299
1300 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1301 env->old_exception, intno);
1302
1303 #if !defined(CONFIG_USER_ONLY)
1304 if (env->old_exception == EXCP08_DBLE) {
1305 if (env->hflags & HF_SVMI_MASK)
1306 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1307
1308 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1309
1310 qemu_system_reset_request();
1311 return EXCP_HLT;
1312 }
1313 #endif
1314
1315 if ((first_contributory && second_contributory)
1316 || (env->old_exception == EXCP0E_PAGE &&
1317 (second_contributory || (intno == EXCP0E_PAGE)))) {
1318 intno = EXCP08_DBLE;
1319 *error_code = 0;
1320 }
1321
1322 if (second_contributory || (intno == EXCP0E_PAGE) ||
1323 (intno == EXCP08_DBLE))
1324 env->old_exception = intno;
1325
1326 return intno;
1327 }
1328
1329 /*
1330 * Signal an interruption. It is executed in the main CPU loop.
1331 * is_int is TRUE if coming from the int instruction. next_eip is the
1332 * EIP value AFTER the interrupt instruction. It is only relevant if
1333 * is_int is TRUE.
1334 */
1335 static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1336 int next_eip_addend)
1337 {
1338 if (!is_int) {
1339 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1340 intno = check_exception(intno, &error_code);
1341 } else {
1342 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1343 }
1344
1345 env->exception_index = intno;
1346 env->error_code = error_code;
1347 env->exception_is_int = is_int;
1348 env->exception_next_eip = env->eip + next_eip_addend;
1349 cpu_loop_exit();
1350 }
1351
1352 /* shortcuts to generate exceptions */
1353
1354 void raise_exception_err(int exception_index, int error_code)
1355 {
1356 raise_interrupt(exception_index, 0, error_code, 0);
1357 }
1358
1359 void raise_exception(int exception_index)
1360 {
1361 raise_interrupt(exception_index, 0, 0, 0);
1362 }
1363
1364 void raise_exception_env(int exception_index, CPUState *nenv)
1365 {
1366 env = nenv;
1367 raise_exception(exception_index);
1368 }
1369 /* SMM support */
1370
1371 #if defined(CONFIG_USER_ONLY)
1372
1373 void do_smm_enter(void)
1374 {
1375 }
1376
1377 void helper_rsm(void)
1378 {
1379 }
1380
1381 #else
1382
1383 #ifdef TARGET_X86_64
1384 #define SMM_REVISION_ID 0x00020064
1385 #else
1386 #define SMM_REVISION_ID 0x00020000
1387 #endif
1388
1389 void do_smm_enter(void)
1390 {
1391 target_ulong sm_state;
1392 SegmentCache *dt;
1393 int i, offset;
1394
1395 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1396 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1397
1398 env->hflags |= HF_SMM_MASK;
1399 cpu_smm_update(env);
1400
1401 sm_state = env->smbase + 0x8000;
1402
1403 #ifdef TARGET_X86_64
1404 for(i = 0; i < 6; i++) {
1405 dt = &env->segs[i];
1406 offset = 0x7e00 + i * 16;
1407 stw_phys(sm_state + offset, dt->selector);
1408 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1409 stl_phys(sm_state + offset + 4, dt->limit);
1410 stq_phys(sm_state + offset + 8, dt->base);
1411 }
1412
1413 stq_phys(sm_state + 0x7e68, env->gdt.base);
1414 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1415
1416 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1417 stq_phys(sm_state + 0x7e78, env->ldt.base);
1418 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1419 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1420
1421 stq_phys(sm_state + 0x7e88, env->idt.base);
1422 stl_phys(sm_state + 0x7e84, env->idt.limit);
1423
1424 stw_phys(sm_state + 0x7e90, env->tr.selector);
1425 stq_phys(sm_state + 0x7e98, env->tr.base);
1426 stl_phys(sm_state + 0x7e94, env->tr.limit);
1427 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1428
1429 stq_phys(sm_state + 0x7ed0, env->efer);
1430
1431 stq_phys(sm_state + 0x7ff8, EAX);
1432 stq_phys(sm_state + 0x7ff0, ECX);
1433 stq_phys(sm_state + 0x7fe8, EDX);
1434 stq_phys(sm_state + 0x7fe0, EBX);
1435 stq_phys(sm_state + 0x7fd8, ESP);
1436 stq_phys(sm_state + 0x7fd0, EBP);
1437 stq_phys(sm_state + 0x7fc8, ESI);
1438 stq_phys(sm_state + 0x7fc0, EDI);
1439 for(i = 8; i < 16; i++)
1440 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1441 stq_phys(sm_state + 0x7f78, env->eip);
1442 stl_phys(sm_state + 0x7f70, compute_eflags());
1443 stl_phys(sm_state + 0x7f68, env->dr[6]);
1444 stl_phys(sm_state + 0x7f60, env->dr[7]);
1445
1446 stl_phys(sm_state + 0x7f48, env->cr[4]);
1447 stl_phys(sm_state + 0x7f50, env->cr[3]);
1448 stl_phys(sm_state + 0x7f58, env->cr[0]);
1449
1450 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1451 stl_phys(sm_state + 0x7f00, env->smbase);
1452 #else
1453 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1454 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1455 stl_phys(sm_state + 0x7ff4, compute_eflags());
1456 stl_phys(sm_state + 0x7ff0, env->eip);
1457 stl_phys(sm_state + 0x7fec, EDI);
1458 stl_phys(sm_state + 0x7fe8, ESI);
1459 stl_phys(sm_state + 0x7fe4, EBP);
1460 stl_phys(sm_state + 0x7fe0, ESP);
1461 stl_phys(sm_state + 0x7fdc, EBX);
1462 stl_phys(sm_state + 0x7fd8, EDX);
1463 stl_phys(sm_state + 0x7fd4, ECX);
1464 stl_phys(sm_state + 0x7fd0, EAX);
1465 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1466 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1467
1468 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1469 stl_phys(sm_state + 0x7f64, env->tr.base);
1470 stl_phys(sm_state + 0x7f60, env->tr.limit);
1471 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1472
1473 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1474 stl_phys(sm_state + 0x7f80, env->ldt.base);
1475 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1476 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1477
1478 stl_phys(sm_state + 0x7f74, env->gdt.base);
1479 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1480
1481 stl_phys(sm_state + 0x7f58, env->idt.base);
1482 stl_phys(sm_state + 0x7f54, env->idt.limit);
1483
1484 for(i = 0; i < 6; i++) {
1485 dt = &env->segs[i];
1486 if (i < 3)
1487 offset = 0x7f84 + i * 12;
1488 else
1489 offset = 0x7f2c + (i - 3) * 12;
1490 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1491 stl_phys(sm_state + offset + 8, dt->base);
1492 stl_phys(sm_state + offset + 4, dt->limit);
1493 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1494 }
1495 stl_phys(sm_state + 0x7f14, env->cr[4]);
1496
1497 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1498 stl_phys(sm_state + 0x7ef8, env->smbase);
1499 #endif
1500 /* init SMM cpu state */
1501
1502 #ifdef TARGET_X86_64
1503 cpu_load_efer(env, 0);
1504 #endif
1505 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1506 env->eip = 0x00008000;
1507 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1508 0xffffffff, 0);
1509 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1510 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1511 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1512 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1513 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1514
1515 cpu_x86_update_cr0(env,
1516 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1517 cpu_x86_update_cr4(env, 0);
1518 env->dr[7] = 0x00000400;
1519 CC_OP = CC_OP_EFLAGS;
1520 }
1521
1522 void helper_rsm(void)
1523 {
1524 target_ulong sm_state;
1525 int i, offset;
1526 uint32_t val;
1527
1528 sm_state = env->smbase + 0x8000;
1529 #ifdef TARGET_X86_64
1530 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1531
1532 for(i = 0; i < 6; i++) {
1533 offset = 0x7e00 + i * 16;
1534 cpu_x86_load_seg_cache(env, i,
1535 lduw_phys(sm_state + offset),
1536 ldq_phys(sm_state + offset + 8),
1537 ldl_phys(sm_state + offset + 4),
1538 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1539 }
1540
1541 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1542 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1543
1544 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1545 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1546 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1547 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1548
1549 env->idt.base = ldq_phys(sm_state + 0x7e88);
1550 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1551
1552 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1553 env->tr.base = ldq_phys(sm_state + 0x7e98);
1554 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1555 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1556
1557 EAX = ldq_phys(sm_state + 0x7ff8);
1558 ECX = ldq_phys(sm_state + 0x7ff0);
1559 EDX = ldq_phys(sm_state + 0x7fe8);
1560 EBX = ldq_phys(sm_state + 0x7fe0);
1561 ESP = ldq_phys(sm_state + 0x7fd8);
1562 EBP = ldq_phys(sm_state + 0x7fd0);
1563 ESI = ldq_phys(sm_state + 0x7fc8);
1564 EDI = ldq_phys(sm_state + 0x7fc0);
1565 for(i = 8; i < 16; i++)
1566 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1567 env->eip = ldq_phys(sm_state + 0x7f78);
1568 load_eflags(ldl_phys(sm_state + 0x7f70),
1569 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1570 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1571 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1572
1573 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1574 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1575 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1576
1577 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1578 if (val & 0x20000) {
1579 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1580 }
1581 #else
1582 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1583 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1584 load_eflags(ldl_phys(sm_state + 0x7ff4),
1585 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1586 env->eip = ldl_phys(sm_state + 0x7ff0);
1587 EDI = ldl_phys(sm_state + 0x7fec);
1588 ESI = ldl_phys(sm_state + 0x7fe8);
1589 EBP = ldl_phys(sm_state + 0x7fe4);
1590 ESP = ldl_phys(sm_state + 0x7fe0);
1591 EBX = ldl_phys(sm_state + 0x7fdc);
1592 EDX = ldl_phys(sm_state + 0x7fd8);
1593 ECX = ldl_phys(sm_state + 0x7fd4);
1594 EAX = ldl_phys(sm_state + 0x7fd0);
1595 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1596 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1597
1598 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1599 env->tr.base = ldl_phys(sm_state + 0x7f64);
1600 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1601 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1602
1603 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1604 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1605 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1606 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1607
1608 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1609 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1610
1611 env->idt.base = ldl_phys(sm_state + 0x7f58);
1612 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1613
1614 for(i = 0; i < 6; i++) {
1615 if (i < 3)
1616 offset = 0x7f84 + i * 12;
1617 else
1618 offset = 0x7f2c + (i - 3) * 12;
1619 cpu_x86_load_seg_cache(env, i,
1620 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1621 ldl_phys(sm_state + offset + 8),
1622 ldl_phys(sm_state + offset + 4),
1623 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1624 }
1625 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1626
1627 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1628 if (val & 0x20000) {
1629 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1630 }
1631 #endif
1632 CC_OP = CC_OP_EFLAGS;
1633 env->hflags &= ~HF_SMM_MASK;
1634 cpu_smm_update(env);
1635
1636 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1637 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1638 }
1639
1640 #endif /* !CONFIG_USER_ONLY */
1641
1642
1643 /* division, flags are undefined */
1644
1645 void helper_divb_AL(target_ulong t0)
1646 {
1647 unsigned int num, den, q, r;
1648
1649 num = (EAX & 0xffff);
1650 den = (t0 & 0xff);
1651 if (den == 0) {
1652 raise_exception(EXCP00_DIVZ);
1653 }
1654 q = (num / den);
1655 if (q > 0xff)
1656 raise_exception(EXCP00_DIVZ);
1657 q &= 0xff;
1658 r = (num % den) & 0xff;
1659 EAX = (EAX & ~0xffff) | (r << 8) | q;
1660 }
1661
1662 void helper_idivb_AL(target_ulong t0)
1663 {
1664 int num, den, q, r;
1665
1666 num = (int16_t)EAX;
1667 den = (int8_t)t0;
1668 if (den == 0) {
1669 raise_exception(EXCP00_DIVZ);
1670 }
1671 q = (num / den);
1672 if (q != (int8_t)q)
1673 raise_exception(EXCP00_DIVZ);
1674 q &= 0xff;
1675 r = (num % den) & 0xff;
1676 EAX = (EAX & ~0xffff) | (r << 8) | q;
1677 }
1678
1679 void helper_divw_AX(target_ulong t0)
1680 {
1681 unsigned int num, den, q, r;
1682
1683 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1684 den = (t0 & 0xffff);
1685 if (den == 0) {
1686 raise_exception(EXCP00_DIVZ);
1687 }
1688 q = (num / den);
1689 if (q > 0xffff)
1690 raise_exception(EXCP00_DIVZ);
1691 q &= 0xffff;
1692 r = (num % den) & 0xffff;
1693 EAX = (EAX & ~0xffff) | q;
1694 EDX = (EDX & ~0xffff) | r;
1695 }
1696
1697 void helper_idivw_AX(target_ulong t0)
1698 {
1699 int num, den, q, r;
1700
1701 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1702 den = (int16_t)t0;
1703 if (den == 0) {
1704 raise_exception(EXCP00_DIVZ);
1705 }
1706 q = (num / den);
1707 if (q != (int16_t)q)
1708 raise_exception(EXCP00_DIVZ);
1709 q &= 0xffff;
1710 r = (num % den) & 0xffff;
1711 EAX = (EAX & ~0xffff) | q;
1712 EDX = (EDX & ~0xffff) | r;
1713 }
1714
1715 void helper_divl_EAX(target_ulong t0)
1716 {
1717 unsigned int den, r;
1718 uint64_t num, q;
1719
1720 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1721 den = t0;
1722 if (den == 0) {
1723 raise_exception(EXCP00_DIVZ);
1724 }
1725 q = (num / den);
1726 r = (num % den);
1727 if (q > 0xffffffff)
1728 raise_exception(EXCP00_DIVZ);
1729 EAX = (uint32_t)q;
1730 EDX = (uint32_t)r;
1731 }
1732
1733 void helper_idivl_EAX(target_ulong t0)
1734 {
1735 int den, r;
1736 int64_t num, q;
1737
1738 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1739 den = t0;
1740 if (den == 0) {
1741 raise_exception(EXCP00_DIVZ);
1742 }
1743 q = (num / den);
1744 r = (num % den);
1745 if (q != (int32_t)q)
1746 raise_exception(EXCP00_DIVZ);
1747 EAX = (uint32_t)q;
1748 EDX = (uint32_t)r;
1749 }
1750
1751 /* bcd */
1752
1753 /* XXX: exception */
1754 void helper_aam(int base)
1755 {
1756 int al, ah;
1757 al = EAX & 0xff;
1758 ah = al / base;
1759 al = al % base;
1760 EAX = (EAX & ~0xffff) | al | (ah << 8);
1761 CC_DST = al;
1762 }
1763
1764 void helper_aad(int base)
1765 {
1766 int al, ah;
1767 al = EAX & 0xff;
1768 ah = (EAX >> 8) & 0xff;
1769 al = ((ah * base) + al) & 0xff;
1770 EAX = (EAX & ~0xffff) | al;
1771 CC_DST = al;
1772 }
1773
1774 void helper_aaa(void)
1775 {
1776 int icarry;
1777 int al, ah, af;
1778 int eflags;
1779
1780 eflags = helper_cc_compute_all(CC_OP);
1781 af = eflags & CC_A;
1782 al = EAX & 0xff;
1783 ah = (EAX >> 8) & 0xff;
1784
1785 icarry = (al > 0xf9);
1786 if (((al & 0x0f) > 9 ) || af) {
1787 al = (al + 6) & 0x0f;
1788 ah = (ah + 1 + icarry) & 0xff;
1789 eflags |= CC_C | CC_A;
1790 } else {
1791 eflags &= ~(CC_C | CC_A);
1792 al &= 0x0f;
1793 }
1794 EAX = (EAX & ~0xffff) | al | (ah << 8);
1795 CC_SRC = eflags;
1796 }
1797
1798 void helper_aas(void)
1799 {
1800 int icarry;
1801 int al, ah, af;
1802 int eflags;
1803
1804 eflags = helper_cc_compute_all(CC_OP);
1805 af = eflags & CC_A;
1806 al = EAX & 0xff;
1807 ah = (EAX >> 8) & 0xff;
1808
1809 icarry = (al < 6);
1810 if (((al & 0x0f) > 9 ) || af) {
1811 al = (al - 6) & 0x0f;
1812 ah = (ah - 1 - icarry) & 0xff;
1813 eflags |= CC_C | CC_A;
1814 } else {
1815 eflags &= ~(CC_C | CC_A);
1816 al &= 0x0f;
1817 }
1818 EAX = (EAX & ~0xffff) | al | (ah << 8);
1819 CC_SRC = eflags;
1820 }
1821
1822 void helper_daa(void)
1823 {
1824 int al, af, cf;
1825 int eflags;
1826
1827 eflags = helper_cc_compute_all(CC_OP);
1828 cf = eflags & CC_C;
1829 af = eflags & CC_A;
1830 al = EAX & 0xff;
1831
1832 eflags = 0;
1833 if (((al & 0x0f) > 9 ) || af) {
1834 al = (al + 6) & 0xff;
1835 eflags |= CC_A;
1836 }
1837 if ((al > 0x9f) || cf) {
1838 al = (al + 0x60) & 0xff;
1839 eflags |= CC_C;
1840 }
1841 EAX = (EAX & ~0xff) | al;
1842 /* well, speed is not an issue here, so we compute the flags by hand */
1843 eflags |= (al == 0) << 6; /* zf */
1844 eflags |= parity_table[al]; /* pf */
1845 eflags |= (al & 0x80); /* sf */
1846 CC_SRC = eflags;
1847 }
1848
1849 void helper_das(void)
1850 {
1851 int al, al1, af, cf;
1852 int eflags;
1853
1854 eflags = helper_cc_compute_all(CC_OP);
1855 cf = eflags & CC_C;
1856 af = eflags & CC_A;
1857 al = EAX & 0xff;
1858
1859 eflags = 0;
1860 al1 = al;
1861 if (((al & 0x0f) > 9 ) || af) {
1862 eflags |= CC_A;
1863 if (al < 6 || cf)
1864 eflags |= CC_C;
1865 al = (al - 6) & 0xff;
1866 }
1867 if ((al1 > 0x99) || cf) {
1868 al = (al - 0x60) & 0xff;
1869 eflags |= CC_C;
1870 }
1871 EAX = (EAX & ~0xff) | al;
1872 /* well, speed is not an issue here, so we compute the flags by hand */
1873 eflags |= (al == 0) << 6; /* zf */
1874 eflags |= parity_table[al]; /* pf */
1875 eflags |= (al & 0x80); /* sf */
1876 CC_SRC = eflags;
1877 }
1878
1879 void helper_into(int next_eip_addend)
1880 {
1881 int eflags;
1882 eflags = helper_cc_compute_all(CC_OP);
1883 if (eflags & CC_O) {
1884 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1885 }
1886 }
1887
1888 void helper_cmpxchg8b(target_ulong a0)
1889 {
1890 uint64_t d;
1891 int eflags;
1892
1893 eflags = helper_cc_compute_all(CC_OP);
1894 d = ldq(a0);
1895 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1896 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1897 eflags |= CC_Z;
1898 } else {
1899 /* always do the store */
1900 stq(a0, d);
1901 EDX = (uint32_t)(d >> 32);
1902 EAX = (uint32_t)d;
1903 eflags &= ~CC_Z;
1904 }
1905 CC_SRC = eflags;
1906 }
1907
1908 #ifdef TARGET_X86_64
1909 void helper_cmpxchg16b(target_ulong a0)
1910 {
1911 uint64_t d0, d1;
1912 int eflags;
1913
1914 if ((a0 & 0xf) != 0)
1915 raise_exception(EXCP0D_GPF);
1916 eflags = helper_cc_compute_all(CC_OP);
1917 d0 = ldq(a0);
1918 d1 = ldq(a0 + 8);
1919 if (d0 == EAX && d1 == EDX) {
1920 stq(a0, EBX);
1921 stq(a0 + 8, ECX);
1922 eflags |= CC_Z;
1923 } else {
1924 /* always do the store */
1925 stq(a0, d0);
1926 stq(a0 + 8, d1);
1927 EDX = d1;
1928 EAX = d0;
1929 eflags &= ~CC_Z;
1930 }
1931 CC_SRC = eflags;
1932 }
1933 #endif
1934
1935 void helper_single_step(void)
1936 {
1937 #ifndef CONFIG_USER_ONLY
1938 check_hw_breakpoints(env, 1);
1939 env->dr[6] |= DR6_BS;
1940 #endif
1941 raise_exception(EXCP01_DB);
1942 }
1943
1944 void helper_cpuid(void)
1945 {
1946 uint32_t eax, ebx, ecx, edx;
1947
1948 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1949
1950 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1951 EAX = eax;
1952 EBX = ebx;
1953 ECX = ecx;
1954 EDX = edx;
1955 }
1956
1957 void helper_enter_level(int level, int data32, target_ulong t1)
1958 {
1959 target_ulong ssp;
1960 uint32_t esp_mask, esp, ebp;
1961
1962 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1963 ssp = env->segs[R_SS].base;
1964 ebp = EBP;
1965 esp = ESP;
1966 if (data32) {
1967 /* 32 bit */
1968 esp -= 4;
1969 while (--level) {
1970 esp -= 4;
1971 ebp -= 4;
1972 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1973 }
1974 esp -= 4;
1975 stl(ssp + (esp & esp_mask), t1);
1976 } else {
1977 /* 16 bit */
1978 esp -= 2;
1979 while (--level) {
1980 esp -= 2;
1981 ebp -= 2;
1982 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1983 }
1984 esp -= 2;
1985 stw(ssp + (esp & esp_mask), t1);
1986 }
1987 }
1988
1989 #ifdef TARGET_X86_64
1990 void helper_enter64_level(int level, int data64, target_ulong t1)
1991 {
1992 target_ulong esp, ebp;
1993 ebp = EBP;
1994 esp = ESP;
1995
1996 if (data64) {
1997 /* 64 bit */
1998 esp -= 8;
1999 while (--level) {
2000 esp -= 8;
2001 ebp -= 8;
2002 stq(esp, ldq(ebp));
2003 }
2004 esp -= 8;
2005 stq(esp, t1);
2006 } else {
2007 /* 16 bit */
2008 esp -= 2;
2009 while (--level) {
2010 esp -= 2;
2011 ebp -= 2;
2012 stw(esp, lduw(ebp));
2013 }
2014 esp -= 2;
2015 stw(esp, t1);
2016 }
2017 }
2018 #endif
2019
2020 void helper_lldt(int selector)
2021 {
2022 SegmentCache *dt;
2023 uint32_t e1, e2;
2024 int index, entry_limit;
2025 target_ulong ptr;
2026
2027 selector &= 0xffff;
2028 if ((selector & 0xfffc) == 0) {
2029 /* XXX: NULL selector case: invalid LDT */
2030 env->ldt.base = 0;
2031 env->ldt.limit = 0;
2032 } else {
2033 if (selector & 0x4)
2034 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2035 dt = &env->gdt;
2036 index = selector & ~7;
2037 #ifdef TARGET_X86_64
2038 if (env->hflags & HF_LMA_MASK)
2039 entry_limit = 15;
2040 else
2041 #endif
2042 entry_limit = 7;
2043 if ((index + entry_limit) > dt->limit)
2044 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2045 ptr = dt->base + index;
2046 e1 = ldl_kernel(ptr);
2047 e2 = ldl_kernel(ptr + 4);
2048 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2049 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2050 if (!(e2 & DESC_P_MASK))
2051 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2052 #ifdef TARGET_X86_64
2053 if (env->hflags & HF_LMA_MASK) {
2054 uint32_t e3;
2055 e3 = ldl_kernel(ptr + 8);
2056 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2057 env->ldt.base |= (target_ulong)e3 << 32;
2058 } else
2059 #endif
2060 {
2061 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2062 }
2063 }
2064 env->ldt.selector = selector;
2065 }
2066
2067 void helper_ltr(int selector)
2068 {
2069 SegmentCache *dt;
2070 uint32_t e1, e2;
2071 int index, type, entry_limit;
2072 target_ulong ptr;
2073
2074 selector &= 0xffff;
2075 if ((selector & 0xfffc) == 0) {
2076 /* NULL selector case: invalid TR */
2077 env->tr.base = 0;
2078 env->tr.limit = 0;
2079 env->tr.flags = 0;
2080 } else {
2081 if (selector & 0x4)
2082 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2083 dt = &env->gdt;
2084 index = selector & ~7;
2085 #ifdef TARGET_X86_64
2086 if (env->hflags & HF_LMA_MASK)
2087 entry_limit = 15;
2088 else
2089 #endif
2090 entry_limit = 7;
2091 if ((index + entry_limit) > dt->limit)
2092 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2093 ptr = dt->base + index;
2094 e1 = ldl_kernel(ptr);
2095 e2 = ldl_kernel(ptr + 4);
2096 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2097 if ((e2 & DESC_S_MASK) ||
2098 (type != 1 && type != 9))
2099 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2100 if (!(e2 & DESC_P_MASK))
2101 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2102 #ifdef TARGET_X86_64
2103 if (env->hflags & HF_LMA_MASK) {
2104 uint32_t e3, e4;
2105 e3 = ldl_kernel(ptr + 8);
2106 e4 = ldl_kernel(ptr + 12);
2107 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2108 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2109 load_seg_cache_raw_dt(&env->tr, e1, e2);
2110 env->tr.base |= (target_ulong)e3 << 32;
2111 } else
2112 #endif
2113 {
2114 load_seg_cache_raw_dt(&env->tr, e1, e2);
2115 }
2116 e2 |= DESC_TSS_BUSY_MASK;
2117 stl_kernel(ptr + 4, e2);
2118 }
2119 env->tr.selector = selector;
2120 }
2121
2122 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2123 void helper_load_seg(int seg_reg, int selector)
2124 {
2125 uint32_t e1, e2;
2126 int cpl, dpl, rpl;
2127 SegmentCache *dt;
2128 int index;
2129 target_ulong ptr;
2130
2131 selector &= 0xffff;
2132 cpl = env->hflags & HF_CPL_MASK;
2133 if ((selector & 0xfffc) == 0) {
2134 /* null selector case */
2135 if (seg_reg == R_SS
2136 #ifdef TARGET_X86_64
2137 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2138 #endif
2139 )
2140 raise_exception_err(EXCP0D_GPF, 0);
2141 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2142 } else {
2143
2144 if (selector & 0x4)
2145 dt = &env->ldt;
2146 else
2147 dt = &env->gdt;
2148 index = selector & ~7;
2149 if ((index + 7) > dt->limit)
2150 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2151 ptr = dt->base + index;
2152 e1 = ldl_kernel(ptr);
2153 e2 = ldl_kernel(ptr + 4);
2154
2155 if (!(e2 & DESC_S_MASK))
2156 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2157 rpl = selector & 3;
2158 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2159 if (seg_reg == R_SS) {
2160 /* must be writable segment */
2161 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2162 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2163 if (rpl != cpl || dpl != cpl)
2164 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2165 } else {
2166 /* must be readable segment */
2167 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2168 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169
2170 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2171 /* if not conforming code, test rights */
2172 if (dpl < cpl || dpl < rpl)
2173 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2174 }
2175 }
2176
2177 if (!(e2 & DESC_P_MASK)) {
2178 if (seg_reg == R_SS)
2179 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2180 else
2181 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2182 }
2183
2184 /* set the access bit if not already set */
2185 if (!(e2 & DESC_A_MASK)) {
2186 e2 |= DESC_A_MASK;
2187 stl_kernel(ptr + 4, e2);
2188 }
2189
2190 cpu_x86_load_seg_cache(env, seg_reg, selector,
2191 get_seg_base(e1, e2),
2192 get_seg_limit(e1, e2),
2193 e2);
2194 #if 0
2195 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2196 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2197 #endif
2198 }
2199 }
2200
2201 /* protected mode jump */
2202 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2203 int next_eip_addend)
2204 {
2205 int gate_cs, type;
2206 uint32_t e1, e2, cpl, dpl, rpl, limit;
2207 target_ulong next_eip;
2208
2209 if ((new_cs & 0xfffc) == 0)
2210 raise_exception_err(EXCP0D_GPF, 0);
2211 if (load_segment(&e1, &e2, new_cs) != 0)
2212 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2213 cpl = env->hflags & HF_CPL_MASK;
2214 if (e2 & DESC_S_MASK) {
2215 if (!(e2 & DESC_CS_MASK))
2216 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2218 if (e2 & DESC_C_MASK) {
2219 /* conforming code segment */
2220 if (dpl > cpl)
2221 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222 } else {
2223 /* non conforming code segment */
2224 rpl = new_cs & 3;
2225 if (rpl > cpl)
2226 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2227 if (dpl != cpl)
2228 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2229 }
2230 if (!(e2 & DESC_P_MASK))
2231 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2232 limit = get_seg_limit(e1, e2);
2233 if (new_eip > limit &&
2234 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2235 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2236 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2237 get_seg_base(e1, e2), limit, e2);
2238 EIP = new_eip;
2239 } else {
2240 /* jump to call or task gate */
2241 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2242 rpl = new_cs & 3;
2243 cpl = env->hflags & HF_CPL_MASK;
2244 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2245 switch(type) {
2246 case 1: /* 286 TSS */
2247 case 9: /* 386 TSS */
2248 case 5: /* task gate */
2249 if (dpl < cpl || dpl < rpl)
2250 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2251 next_eip = env->eip + next_eip_addend;
2252 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2253 CC_OP = CC_OP_EFLAGS;
2254 break;
2255 case 4: /* 286 call gate */
2256 case 12: /* 386 call gate */
2257 if ((dpl < cpl) || (dpl < rpl))
2258 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2259 if (!(e2 & DESC_P_MASK))
2260 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2261 gate_cs = e1 >> 16;
2262 new_eip = (e1 & 0xffff);
2263 if (type == 12)
2264 new_eip |= (e2 & 0xffff0000);
2265 if (load_segment(&e1, &e2, gate_cs) != 0)
2266 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2267 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2268 /* must be code segment */
2269 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2270 (DESC_S_MASK | DESC_CS_MASK)))
2271 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2272 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2273 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2274 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2275 if (!(e2 & DESC_P_MASK))
2276 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2277 limit = get_seg_limit(e1, e2);
2278 if (new_eip > limit)
2279 raise_exception_err(EXCP0D_GPF, 0);
2280 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2281 get_seg_base(e1, e2), limit, e2);
2282 EIP = new_eip;
2283 break;
2284 default:
2285 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2286 break;
2287 }
2288 }
2289 }
2290
2291 /* real mode call */
2292 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2293 int shift, int next_eip)
2294 {
2295 int new_eip;
2296 uint32_t esp, esp_mask;
2297 target_ulong ssp;
2298
2299 new_eip = new_eip1;
2300 esp = ESP;
2301 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2302 ssp = env->segs[R_SS].base;
2303 if (shift) {
2304 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2305 PUSHL(ssp, esp, esp_mask, next_eip);
2306 } else {
2307 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2308 PUSHW(ssp, esp, esp_mask, next_eip);
2309 }
2310
2311 SET_ESP(esp, esp_mask);
2312 env->eip = new_eip;
2313 env->segs[R_CS].selector = new_cs;
2314 env->segs[R_CS].base = (new_cs << 4);
2315 }
2316
2317 /* protected mode call */
2318 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2319 int shift, int next_eip_addend)
2320 {
2321 int new_stack, i;
2322 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2323 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2324 uint32_t val, limit, old_sp_mask;
2325 target_ulong ssp, old_ssp, next_eip;
2326
2327 next_eip = env->eip + next_eip_addend;
2328 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2329 LOG_PCALL_STATE(env);
2330 if ((new_cs & 0xfffc) == 0)
2331 raise_exception_err(EXCP0D_GPF, 0);
2332 if (load_segment(&e1, &e2, new_cs) != 0)
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334 cpl = env->hflags & HF_CPL_MASK;
2335 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2336 if (e2 & DESC_S_MASK) {
2337 if (!(e2 & DESC_CS_MASK))
2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2340 if (e2 & DESC_C_MASK) {
2341 /* conforming code segment */
2342 if (dpl > cpl)
2343 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344 } else {
2345 /* non conforming code segment */
2346 rpl = new_cs & 3;
2347 if (rpl > cpl)
2348 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2349 if (dpl != cpl)
2350 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2351 }
2352 if (!(e2 & DESC_P_MASK))
2353 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2354
2355 #ifdef TARGET_X86_64
2356 /* XXX: check 16/32 bit cases in long mode */
2357 if (shift == 2) {
2358 target_ulong rsp;
2359 /* 64 bit case */
2360 rsp = ESP;
2361 PUSHQ(rsp, env->segs[R_CS].selector);
2362 PUSHQ(rsp, next_eip);
2363 /* from this point, not restartable */
2364 ESP = rsp;
2365 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2366 get_seg_base(e1, e2),
2367 get_seg_limit(e1, e2), e2);
2368 EIP = new_eip;
2369 } else
2370 #endif
2371 {
2372 sp = ESP;
2373 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2374 ssp = env->segs[R_SS].base;
2375 if (shift) {
2376 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2377 PUSHL(ssp, sp, sp_mask, next_eip);
2378 } else {
2379 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2380 PUSHW(ssp, sp, sp_mask, next_eip);
2381 }
2382
2383 limit = get_seg_limit(e1, e2);
2384 if (new_eip > limit)
2385 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2386 /* from this point, not restartable */
2387 SET_ESP(sp, sp_mask);
2388 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2389 get_seg_base(e1, e2), limit, e2);
2390 EIP = new_eip;
2391 }
2392 } else {
2393 /* check gate type */
2394 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2395 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2396 rpl = new_cs & 3;
2397 switch(type) {
2398 case 1: /* available 286 TSS */
2399 case 9: /* available 386 TSS */
2400 case 5: /* task gate */
2401 if (dpl < cpl || dpl < rpl)
2402 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2403 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2404 CC_OP = CC_OP_EFLAGS;
2405 return;
2406 case 4: /* 286 call gate */
2407 case 12: /* 386 call gate */
2408 break;
2409 default:
2410 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2411 break;
2412 }
2413 shift = type >> 3;
2414
2415 if (dpl < cpl || dpl < rpl)
2416 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2417 /* check valid bit */
2418 if (!(e2 & DESC_P_MASK))
2419 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2420 selector = e1 >> 16;
2421 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2422 param_count = e2 & 0x1f;
2423 if ((selector & 0xfffc) == 0)
2424 raise_exception_err(EXCP0D_GPF, 0);
2425
2426 if (load_segment(&e1, &e2, selector) != 0)
2427 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2429 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2430 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2431 if (dpl > cpl)
2432 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2433 if (!(e2 & DESC_P_MASK))
2434 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2435
2436 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2437 /* to inner privilege */
2438 get_ss_esp_from_tss(&ss, &sp, dpl);
2439 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2440 ss, sp, param_count, ESP);
2441 if ((ss & 0xfffc) == 0)
2442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443 if ((ss & 3) != dpl)
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2446 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2447 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2448 if (ss_dpl != dpl)
2449 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2450 if (!(ss_e2 & DESC_S_MASK) ||
2451 (ss_e2 & DESC_CS_MASK) ||
2452 !(ss_e2 & DESC_W_MASK))
2453 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2454 if (!(ss_e2 & DESC_P_MASK))
2455 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2456
2457 // push_size = ((param_count * 2) + 8) << shift;
2458
2459 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2460 old_ssp = env->segs[R_SS].base;
2461
2462 sp_mask = get_sp_mask(ss_e2);
2463 ssp = get_seg_base(ss_e1, ss_e2);
2464 if (shift) {
2465 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2466 PUSHL(ssp, sp, sp_mask, ESP);
2467 for(i = param_count - 1; i >= 0; i--) {
2468 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2469 PUSHL(ssp, sp, sp_mask, val);
2470 }
2471 } else {
2472 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2473 PUSHW(ssp, sp, sp_mask, ESP);
2474 for(i = param_count - 1; i >= 0; i--) {
2475 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2476 PUSHW(ssp, sp, sp_mask, val);
2477 }
2478 }
2479 new_stack = 1;
2480 } else {
2481 /* to same privilege */
2482 sp = ESP;
2483 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2484 ssp = env->segs[R_SS].base;
2485 // push_size = (4 << shift);
2486 new_stack = 0;
2487 }
2488
2489 if (shift) {
2490 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2491 PUSHL(ssp, sp, sp_mask, next_eip);
2492 } else {
2493 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2494 PUSHW(ssp, sp, sp_mask, next_eip);
2495 }
2496
2497 /* from this point, not restartable */
2498
2499 if (new_stack) {
2500 ss = (ss & ~3) | dpl;
2501 cpu_x86_load_seg_cache(env, R_SS, ss,
2502 ssp,
2503 get_seg_limit(ss_e1, ss_e2),
2504 ss_e2);
2505 }
2506
2507 selector = (selector & ~3) | dpl;
2508 cpu_x86_load_seg_cache(env, R_CS, selector,
2509 get_seg_base(e1, e2),
2510 get_seg_limit(e1, e2),
2511 e2);
2512 cpu_x86_set_cpl(env, dpl);
2513 SET_ESP(sp, sp_mask);
2514 EIP = offset;
2515 }
2516 }
2517
2518 /* real and vm86 mode iret */
2519 void helper_iret_real(int shift)
2520 {
2521 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2522 target_ulong ssp;
2523 int eflags_mask;
2524
2525 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2526 sp = ESP;
2527 ssp = env->segs[R_SS].base;
2528 if (shift == 1) {
2529 /* 32 bits */
2530 POPL(ssp, sp, sp_mask, new_eip);
2531 POPL(ssp, sp, sp_mask, new_cs);
2532 new_cs &= 0xffff;
2533 POPL(ssp, sp, sp_mask, new_eflags);
2534 } else {
2535 /* 16 bits */
2536 POPW(ssp, sp, sp_mask, new_eip);
2537 POPW(ssp, sp, sp_mask, new_cs);
2538 POPW(ssp, sp, sp_mask, new_eflags);
2539 }
2540 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2541 env->segs[R_CS].selector = new_cs;
2542 env->segs[R_CS].base = (new_cs << 4);
2543 env->eip = new_eip;
2544 if (env->eflags & VM_MASK)
2545 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2546 else
2547 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2548 if (shift == 0)
2549 eflags_mask &= 0xffff;
2550 load_eflags(new_eflags, eflags_mask);
2551 env->hflags2 &= ~HF2_NMI_MASK;
2552 }
2553
2554 static inline void validate_seg(int seg_reg, int cpl)
2555 {
2556 int dpl;
2557 uint32_t e2;
2558
2559 /* XXX: on x86_64, we do not want to nullify FS and GS because
2560 they may still contain a valid base. I would be interested to
2561 know how a real x86_64 CPU behaves */
2562 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2563 (env->segs[seg_reg].selector & 0xfffc) == 0)
2564 return;
2565
2566 e2 = env->segs[seg_reg].flags;
2567 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2568 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2569 /* data or non conforming code segment */
2570 if (dpl < cpl) {
2571 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2572 }
2573 }
2574 }
2575
2576 /* protected mode iret */
2577 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2578 {
2579 uint32_t new_cs, new_eflags, new_ss;
2580 uint32_t new_es, new_ds, new_fs, new_gs;
2581 uint32_t e1, e2, ss_e1, ss_e2;
2582 int cpl, dpl, rpl, eflags_mask, iopl;
2583 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2584
2585 #ifdef TARGET_X86_64
2586 if (shift == 2)
2587 sp_mask = -1;
2588 else
2589 #endif
2590 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2591 sp = ESP;
2592 ssp = env->segs[R_SS].base;
2593 new_eflags = 0; /* avoid warning */
2594 #ifdef TARGET_X86_64
2595 if (shift == 2) {
2596 POPQ(sp, new_eip);
2597 POPQ(sp, new_cs);
2598 new_cs &= 0xffff;
2599 if (is_iret) {
2600 POPQ(sp, new_eflags);
2601 }
2602 } else
2603 #endif
2604 if (shift == 1) {
2605 /* 32 bits */
2606 POPL(ssp, sp, sp_mask, new_eip);
2607 POPL(ssp, sp, sp_mask, new_cs);
2608 new_cs &= 0xffff;
2609 if (is_iret) {
2610 POPL(ssp, sp, sp_mask, new_eflags);
2611 if (new_eflags & VM_MASK)
2612 goto return_to_vm86;
2613 }
2614 } else {
2615 /* 16 bits */
2616 POPW(ssp, sp, sp_mask, new_eip);
2617 POPW(ssp, sp, sp_mask, new_cs);
2618 if (is_iret)
2619 POPW(ssp, sp, sp_mask, new_eflags);
2620 }
2621 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2622 new_cs, new_eip, shift, addend);
2623 LOG_PCALL_STATE(env);
2624 if ((new_cs & 0xfffc) == 0)
2625 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626 if (load_segment(&e1, &e2, new_cs) != 0)
2627 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2628 if (!(e2 & DESC_S_MASK) ||
2629 !(e2 & DESC_CS_MASK))
2630 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2631 cpl = env->hflags & HF_CPL_MASK;
2632 rpl = new_cs & 3;
2633 if (rpl < cpl)
2634 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2635 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2636 if (e2 & DESC_C_MASK) {
2637 if (dpl > rpl)
2638 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2639 } else {
2640 if (dpl != rpl)
2641 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2642 }
2643 if (!(e2 & DESC_P_MASK))
2644 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2645
2646 sp += addend;
2647 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2648 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2649 /* return to same privilege level */
2650 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2651 get_seg_base(e1, e2),
2652 get_seg_limit(e1, e2),
2653 e2);
2654 } else {
2655 /* return to different privilege level */
2656 #ifdef TARGET_X86_64
2657 if (shift == 2) {
2658 POPQ(sp, new_esp);
2659 POPQ(sp, new_ss);
2660 new_ss &= 0xffff;
2661 } else
2662 #endif
2663 if (shift == 1) {
2664 /* 32 bits */
2665 POPL(ssp, sp, sp_mask, new_esp);
2666 POPL(ssp, sp, sp_mask, new_ss);
2667 new_ss &= 0xffff;
2668 } else {
2669 /* 16 bits */
2670 POPW(ssp, sp, sp_mask, new_esp);
2671 POPW(ssp, sp, sp_mask, new_ss);
2672 }
2673 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2674 new_ss, new_esp);
2675 if ((new_ss & 0xfffc) == 0) {
2676 #ifdef TARGET_X86_64
2677 /* NULL ss is allowed in long mode if cpl != 3*/
2678 /* XXX: test CS64 ? */
2679 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2680 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2681 0, 0xffffffff,
2682 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2683 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2684 DESC_W_MASK | DESC_A_MASK);
2685 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2686 } else
2687 #endif
2688 {
2689 raise_exception_err(EXCP0D_GPF, 0);
2690 }
2691 } else {
2692 if ((new_ss & 3) != rpl)
2693 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2694 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2695 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2696 if (!(ss_e2 & DESC_S_MASK) ||
2697 (ss_e2 & DESC_CS_MASK) ||
2698 !(ss_e2 & DESC_W_MASK))
2699 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2700 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2701 if (dpl != rpl)
2702 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2703 if (!(ss_e2 & DESC_P_MASK))
2704 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2705 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2706 get_seg_base(ss_e1, ss_e2),
2707 get_seg_limit(ss_e1, ss_e2),
2708 ss_e2);
2709 }
2710
2711 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2712 get_seg_base(e1, e2),
2713 get_seg_limit(e1, e2),
2714 e2);
2715 cpu_x86_set_cpl(env, rpl);
2716 sp = new_esp;
2717 #ifdef TARGET_X86_64
2718 if (env->hflags & HF_CS64_MASK)
2719 sp_mask = -1;
2720 else
2721 #endif
2722 sp_mask = get_sp_mask(ss_e2);
2723
2724 /* validate data segments */
2725 validate_seg(R_ES, rpl);
2726 validate_seg(R_DS, rpl);
2727 validate_seg(R_FS, rpl);
2728 validate_seg(R_GS, rpl);
2729
2730 sp += addend;
2731 }
2732 SET_ESP(sp, sp_mask);
2733 env->eip = new_eip;
2734 if (is_iret) {
2735 /* NOTE: 'cpl' is the _old_ CPL */
2736 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2737 if (cpl == 0)
2738 eflags_mask |= IOPL_MASK;
2739 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2740 if (cpl <= iopl)
2741 eflags_mask |= IF_MASK;
2742 if (shift == 0)
2743 eflags_mask &= 0xffff;
2744 load_eflags(new_eflags, eflags_mask);
2745 }
2746 return;
2747
2748 return_to_vm86:
2749 POPL(ssp, sp, sp_mask, new_esp);
2750 POPL(ssp, sp, sp_mask, new_ss);
2751 POPL(ssp, sp, sp_mask, new_es);
2752 POPL(ssp, sp, sp_mask, new_ds);
2753 POPL(ssp, sp, sp_mask, new_fs);
2754 POPL(ssp, sp, sp_mask, new_gs);
2755
2756 /* modify processor state */
2757 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2758 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2759 load_seg_vm(R_CS, new_cs & 0xffff);
2760 cpu_x86_set_cpl(env, 3);
2761 load_seg_vm(R_SS, new_ss & 0xffff);
2762 load_seg_vm(R_ES, new_es & 0xffff);
2763 load_seg_vm(R_DS, new_ds & 0xffff);
2764 load_seg_vm(R_FS, new_fs & 0xffff);
2765 load_seg_vm(R_GS, new_gs & 0xffff);
2766
2767 env->eip = new_eip & 0xffff;
2768 ESP = new_esp;
2769 }
2770
2771 void helper_iret_protected(int shift, int next_eip)
2772 {
2773 int tss_selector, type;
2774 uint32_t e1, e2;
2775
2776 /* specific case for TSS */
2777 if (env->eflags & NT_MASK) {
2778 #ifdef TARGET_X86_64
2779 if (env->hflags & HF_LMA_MASK)
2780 raise_exception_err(EXCP0D_GPF, 0);
2781 #endif
2782 tss_selector = lduw_kernel(env->tr.base + 0);
2783 if (tss_selector & 4)
2784 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2785 if (load_segment(&e1, &e2, tss_selector) != 0)
2786 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2787 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2788 /* NOTE: we check both segment and busy TSS */
2789 if (type != 3)
2790 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2791 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2792 } else {
2793 helper_ret_protected(shift, 1, 0);
2794 }
2795 env->hflags2 &= ~HF2_NMI_MASK;
2796 }
2797
2798 void helper_lret_protected(int shift, int addend)
2799 {
2800 helper_ret_protected(shift, 0, addend);
2801 }
2802
2803 void helper_sysenter(void)
2804 {
2805 if (env->sysenter_cs == 0) {
2806 raise_exception_err(EXCP0D_GPF, 0);
2807 }
2808 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2809 cpu_x86_set_cpl(env, 0);
2810
2811 #ifdef TARGET_X86_64
2812 if (env->hflags & HF_LMA_MASK) {
2813 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2814 0, 0xffffffff,
2815 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2816 DESC_S_MASK |
2817 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2818 } else
2819 #endif
2820 {
2821 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2822 0, 0xffffffff,
2823 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2824 DESC_S_MASK |
2825 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2826 }
2827 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2828 0, 0xffffffff,
2829 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2830 DESC_S_MASK |
2831 DESC_W_MASK | DESC_A_MASK);
2832 ESP = env->sysenter_esp;
2833 EIP = env->sysenter_eip;
2834 }
2835
2836 void helper_sysexit(int dflag)
2837 {
2838 int cpl;
2839
2840 cpl = env->hflags & HF_CPL_MASK;
2841 if (env->sysenter_cs == 0 || cpl != 0) {
2842 raise_exception_err(EXCP0D_GPF, 0);
2843 }
2844 cpu_x86_set_cpl(env, 3);
2845 #ifdef TARGET_X86_64
2846 if (dflag == 2) {
2847 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2848 0, 0xffffffff,
2849 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2850 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2851 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2852 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2853 0, 0xffffffff,
2854 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2855 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2856 DESC_W_MASK | DESC_A_MASK);
2857 } else
2858 #endif
2859 {
2860 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2861 0, 0xffffffff,
2862 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2863 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2864 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2865 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2866 0, 0xffffffff,
2867 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2868 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2869 DESC_W_MASK | DESC_A_MASK);
2870 }
2871 ESP = ECX;
2872 EIP = EDX;
2873 }
2874
2875 #if defined(CONFIG_USER_ONLY)
2876 target_ulong helper_read_crN(int reg)
2877 {
2878 return 0;
2879 }
2880
2881 void helper_write_crN(int reg, target_ulong t0)
2882 {
2883 }
2884
2885 void helper_movl_drN_T0(int reg, target_ulong t0)
2886 {
2887 }
2888 #else
2889 target_ulong helper_read_crN(int reg)
2890 {
2891 target_ulong val;
2892
2893 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2894 switch(reg) {
2895 default:
2896 val = env->cr[reg];
2897 break;
2898 case 8:
2899 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2900 val = cpu_get_apic_tpr(env->apic_state);
2901 } else {
2902 val = env->v_tpr;
2903 }
2904 break;
2905 }
2906 return val;
2907 }
2908
2909 void helper_write_crN(int reg, target_ulong t0)
2910 {
2911 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2912 switch(reg) {
2913 case 0:
2914 cpu_x86_update_cr0(env, t0);
2915 break;
2916 case 3:
2917 cpu_x86_update_cr3(env, t0);
2918 break;
2919 case 4:
2920 cpu_x86_update_cr4(env, t0);
2921 break;
2922 case 8:
2923 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2924 cpu_set_apic_tpr(env->apic_state, t0);
2925 }
2926 env->v_tpr = t0 & 0x0f;
2927 break;
2928 default:
2929 env->cr[reg] = t0;
2930 break;
2931 }
2932 }
2933
2934 void helper_movl_drN_T0(int reg, target_ulong t0)
2935 {
2936 int i;
2937
2938 if (reg < 4) {
2939 hw_breakpoint_remove(env, reg);
2940 env->dr[reg] = t0;
2941 hw_breakpoint_insert(env, reg);
2942 } else if (reg == 7) {
2943 for (i = 0; i < 4; i++)
2944 hw_breakpoint_remove(env, i);
2945 env->dr[7] = t0;
2946 for (i = 0; i < 4; i++)
2947 hw_breakpoint_insert(env, i);
2948 } else
2949 env->dr[reg] = t0;
2950 }
2951 #endif
2952
2953 void helper_lmsw(target_ulong t0)
2954 {
2955 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2956 if already set to one. */
2957 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2958 helper_write_crN(0, t0);
2959 }
2960
2961 void helper_clts(void)
2962 {
2963 env->cr[0] &= ~CR0_TS_MASK;
2964 env->hflags &= ~HF_TS_MASK;
2965 }
2966
2967 void helper_invlpg(target_ulong addr)
2968 {
2969 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2970 tlb_flush_page(env, addr);
2971 }
2972
2973 void helper_rdtsc(void)
2974 {
2975 uint64_t val;
2976
2977 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2978 raise_exception(EXCP0D_GPF);
2979 }
2980 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2981
2982 val = cpu_get_tsc(env) + env->tsc_offset;
2983 EAX = (uint32_t)(val);
2984 EDX = (uint32_t)(val >> 32);
2985 }
2986
2987 void helper_rdtscp(void)
2988 {
2989 helper_rdtsc();
2990 ECX = (uint32_t)(env->tsc_aux);
2991 }
2992
2993 void helper_rdpmc(void)
2994 {
2995 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2996 raise_exception(EXCP0D_GPF);
2997 }
2998 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2999
3000 /* currently unimplemented */
3001 raise_exception_err(EXCP06_ILLOP, 0);
3002 }
3003
3004 #if defined(CONFIG_USER_ONLY)
3005 void helper_wrmsr(void)
3006 {
3007 }
3008
3009 void helper_rdmsr(void)
3010 {
3011 }
3012 #else
3013 void helper_wrmsr(void)
3014 {
3015 uint64_t val;
3016
3017 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3018
3019 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3020
3021 switch((uint32_t)ECX) {
3022 case MSR_IA32_SYSENTER_CS:
3023 env->sysenter_cs = val & 0xffff;
3024 break;
3025 case MSR_IA32_SYSENTER_ESP:
3026 env->sysenter_esp = val;
3027 break;
3028 case MSR_IA32_SYSENTER_EIP:
3029 env->sysenter_eip = val;
3030 break;
3031 case MSR_IA32_APICBASE:
3032 cpu_set_apic_base(env->apic_state, val);
3033 break;
3034 case MSR_EFER:
3035 {
3036 uint64_t update_mask;
3037 update_mask = 0;
3038 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3039 update_mask |= MSR_EFER_SCE;
3040 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3041 update_mask |= MSR_EFER_LME;
3042 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3043 update_mask |= MSR_EFER_FFXSR;
3044 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3045 update_mask |= MSR_EFER_NXE;
3046 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3047 update_mask |= MSR_EFER_SVME;
3048 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3049 update_mask |= MSR_EFER_FFXSR;
3050 cpu_load_efer(env, (env->efer & ~update_mask) |
3051 (val & update_mask));
3052 }
3053 break;
3054 case MSR_STAR:
3055 env->star = val;
3056 break;
3057 case MSR_PAT:
3058 env->pat = val;
3059 break;
3060 case MSR_VM_HSAVE_PA:
3061 env->vm_hsave = val;
3062 break;
3063 #ifdef TARGET_X86_64
3064 case MSR_LSTAR:
3065 env->lstar = val;
3066 break;
3067 case MSR_CSTAR:
3068 env->cstar = val;
3069 break;
3070 case MSR_FMASK:
3071 env->fmask = val;
3072 break;
3073 case MSR_FSBASE:
3074 env->segs[R_FS].base = val;
3075 break;
3076 case MSR_GSBASE:
3077 env->segs[R_GS].base = val;
3078 break;
3079 case MSR_KERNELGSBASE:
3080 env->kernelgsbase = val;
3081 break;
3082 #endif
3083 case MSR_MTRRphysBase(0):
3084 case MSR_MTRRphysBase(1):
3085 case MSR_MTRRphysBase(2):
3086 case MSR_MTRRphysBase(3):
3087 case MSR_MTRRphysBase(4):
3088 case MSR_MTRRphysBase(5):
3089 case MSR_MTRRphysBase(6):
3090 case MSR_MTRRphysBase(7):
3091 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3092 break;
3093 case MSR_MTRRphysMask(0):
3094 case MSR_MTRRphysMask(1):
3095 case MSR_MTRRphysMask(2):
3096 case MSR_MTRRphysMask(3):
3097 case MSR_MTRRphysMask(4):
3098 case MSR_MTRRphysMask(5):
3099 case MSR_MTRRphysMask(6):
3100 case MSR_MTRRphysMask(7):
3101 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3102 break;
3103 case MSR_MTRRfix64K_00000:
3104 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3105 break;
3106 case MSR_MTRRfix16K_80000:
3107 case MSR_MTRRfix16K_A0000:
3108 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3109 break;
3110 case MSR_MTRRfix4K_C0000:
3111 case MSR_MTRRfix4K_C8000:
3112 case MSR_MTRRfix4K_D0000:
3113 case MSR_MTRRfix4K_D8000:
3114 case MSR_MTRRfix4K_E0000:
3115 case MSR_MTRRfix4K_E8000:
3116 case MSR_MTRRfix4K_F0000:
3117 case MSR_MTRRfix4K_F8000:
3118 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3119 break;
3120 case MSR_MTRRdefType:
3121 env->mtrr_deftype = val;
3122 break;
3123 case MSR_MCG_STATUS:
3124 env->mcg_status = val;
3125 break;
3126 case MSR_MCG_CTL:
3127 if ((env->mcg_cap & MCG_CTL_P)
3128 && (val == 0 || val == ~(uint64_t)0))
3129 env->mcg_ctl = val;
3130 break;
3131 case MSR_TSC_AUX:
3132 env->tsc_aux = val;
3133 break;
3134 default:
3135 if ((uint32_t)ECX >= MSR_MC0_CTL
3136 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3137 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3138 if ((offset & 0x3) != 0
3139 || (val == 0 || val == ~(uint64_t)0))
3140 env->mce_banks[offset] = val;
3141 break;
3142 }
3143 /* XXX: exception ? */
3144 break;
3145 }
3146 }
3147
3148 void helper_rdmsr(void)
3149 {
3150 uint64_t val;
3151
3152 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3153
3154 switch((uint32_t)ECX) {
3155 case MSR_IA32_SYSENTER_CS:
3156 val = env->sysenter_cs;
3157 break;
3158 case MSR_IA32_SYSENTER_ESP:
3159 val = env->sysenter_esp;
3160 break;
3161 case MSR_IA32_SYSENTER_EIP:
3162 val = env->sysenter_eip;
3163 break;
3164 case MSR_IA32_APICBASE:
3165 val = cpu_get_apic_base(env->apic_state);
3166 break;
3167 case MSR_EFER:
3168 val = env->efer;
3169 break;
3170 case MSR_STAR:
3171 val = env->star;
3172 break;
3173 case MSR_PAT:
3174 val = env->pat;
3175 break;
3176 case MSR_VM_HSAVE_PA:
3177 val = env->vm_hsave;
3178 break;
3179 case MSR_IA32_PERF_STATUS:
3180 /* tsc_increment_by_tick */
3181 val = 1000ULL;
3182 /* CPU multiplier */
3183 val |= (((uint64_t)4ULL) << 40);
3184 break;
3185 #ifdef TARGET_X86_64
3186 case MSR_LSTAR:
3187 val = env->lstar;
3188 break;
3189 case MSR_CSTAR:
3190 val = env->cstar;
3191 break;
3192 case MSR_FMASK:
3193 val = env->fmask;
3194 break;
3195 case MSR_FSBASE:
3196 val = env->segs[R_FS].base;
3197 break;
3198 case MSR_GSBASE:
3199 val = env->segs[R_GS].base;
3200 break;
3201 case MSR_KERNELGSBASE:
3202 val = env->kernelgsbase;
3203 break;
3204 case MSR_TSC_AUX:
3205 val = env->tsc_aux;
3206 break;
3207 #endif
3208 case MSR_MTRRphysBase(0):
3209 case MSR_MTRRphysBase(1):
3210 case MSR_MTRRphysBase(2):
3211 case MSR_MTRRphysBase(3):
3212 case MSR_MTRRphysBase(4):
3213 case MSR_MTRRphysBase(5):
3214 case MSR_MTRRphysBase(6):
3215 case MSR_MTRRphysBase(7):
3216 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3217 break;
3218 case MSR_MTRRphysMask(0):
3219 case MSR_MTRRphysMask(1):
3220 case MSR_MTRRphysMask(2):
3221 case MSR_MTRRphysMask(3):
3222 case MSR_MTRRphysMask(4):
3223 case MSR_MTRRphysMask(5):
3224 case MSR_MTRRphysMask(6):
3225 case MSR_MTRRphysMask(7):
3226 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3227 break;
3228 case MSR_MTRRfix64K_00000:
3229 val = env->mtrr_fixed[0];
3230 break;
3231 case MSR_MTRRfix16K_80000:
3232 case MSR_MTRRfix16K_A0000:
3233 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3234 break;
3235 case MSR_MTRRfix4K_C0000:
3236 case MSR_MTRRfix4K_C8000:
3237 case MSR_MTRRfix4K_D0000:
3238 case MSR_MTRRfix4K_D8000:
3239 case MSR_MTRRfix4K_E0000:
3240 case MSR_MTRRfix4K_E8000:
3241 case MSR_MTRRfix4K_F0000:
3242 case MSR_MTRRfix4K_F8000:
3243 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3244 break;
3245 case MSR_MTRRdefType:
3246 val = env->mtrr_deftype;
3247 break;
3248 case MSR_MTRRcap:
3249 if (env->cpuid_features & CPUID_MTRR)
3250 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3251 else
3252 /* XXX: exception ? */
3253 val = 0;
3254 break;
3255 case MSR_MCG_CAP:
3256 val = env->mcg_cap;
3257 break;
3258 case MSR_MCG_CTL:
3259 if (env->mcg_cap & MCG_CTL_P)
3260 val = env->mcg_ctl;
3261 else
3262 val = 0;
3263 break;
3264 case MSR_MCG_STATUS:
3265 val = env->mcg_status;
3266 break;
3267 default:
3268 if ((uint32_t)ECX >= MSR_MC0_CTL
3269 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3270 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3271 val = env->mce_banks[offset];
3272 break;
3273 }
3274 /* XXX: exception ? */
3275 val = 0;
3276 break;
3277 }
3278 EAX = (uint32_t)(val);
3279 EDX = (uint32_t)(val >> 32);
3280 }
3281 #endif
3282
3283 target_ulong helper_lsl(target_ulong selector1)
3284 {
3285 unsigned int limit;
3286 uint32_t e1, e2, eflags, selector;
3287 int rpl, dpl, cpl, type;
3288
3289 selector = selector1 & 0xffff;
3290 eflags = helper_cc_compute_all(CC_OP);
3291 if ((selector & 0xfffc) == 0)
3292 goto fail;
3293 if (load_segment(&e1, &e2, selector) != 0)
3294 goto fail;
3295 rpl = selector & 3;
3296 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3297 cpl = env->hflags & HF_CPL_MASK;
3298 if (e2 & DESC_S_MASK) {
3299 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3300 /* conforming */
3301 } else {
3302 if (dpl < cpl || dpl < rpl)
3303 goto fail;
3304 }
3305 } else {
3306 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3307 switch(type) {
3308 case 1:
3309 case 2:
3310 case 3:
3311 case 9:
3312 case 11:
3313 break;
3314 default:
3315 goto fail;
3316 }
3317 if (dpl < cpl || dpl < rpl) {
3318 fail:
3319 CC_SRC = eflags & ~CC_Z;
3320 return 0;
3321 }
3322 }
3323 limit = get_seg_limit(e1, e2);
3324 CC_SRC = eflags | CC_Z;
3325 return limit;
3326 }
3327
3328 target_ulong helper_lar(target_ulong selector1)
3329 {
3330 uint32_t e1, e2, eflags, selector;
3331 int rpl, dpl, cpl, type;
3332
3333 selector = selector1 & 0xffff;
3334 eflags = helper_cc_compute_all(CC_OP);
3335 if ((selector & 0xfffc) == 0)
3336 goto fail;
3337 if (load_segment(&e1, &e2, selector) != 0)
3338 goto fail;
3339 rpl = selector & 3;
3340 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3341 cpl = env->hflags & HF_CPL_MASK;
3342 if (e2 & DESC_S_MASK) {
3343 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3344 /* conforming */
3345 } else {
3346 if (dpl < cpl || dpl < rpl)
3347 goto fail;
3348 }
3349 } else {
3350 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3351 switch(type) {
3352 case 1:
3353 case 2:
3354 case 3:
3355 case 4:
3356 case 5:
3357 case 9:
3358 case 11:
3359 case 12:
3360 break;
3361 default:
3362 goto fail;
3363 }
3364 if (dpl < cpl || dpl < rpl) {
3365 fail:
3366 CC_SRC = eflags & ~CC_Z;
3367 return 0;
3368 }
3369 }
3370 CC_SRC = eflags | CC_Z;
3371 return e2 & 0x00f0ff00;
3372 }
3373
3374 void helper_verr(target_ulong selector1)
3375 {
3376 uint32_t e1, e2, eflags, selector;
3377 int rpl, dpl, cpl;
3378
3379 selector = selector1 & 0xffff;
3380 eflags = helper_cc_compute_all(CC_OP);
3381 if ((selector & 0xfffc) == 0)
3382 goto fail;
3383 if (load_segment(&e1, &e2, selector) != 0)
3384 goto fail;
3385 if (!(e2 & DESC_S_MASK))
3386 goto fail;
3387 rpl = selector & 3;
3388 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3389 cpl = env->hflags & HF_CPL_MASK;
3390 if (e2 & DESC_CS_MASK) {
3391 if (!(e2 & DESC_R_MASK))
3392 goto fail;
3393 if (!(e2 & DESC_C_MASK)) {
3394 if (dpl < cpl || dpl < rpl)
3395 goto fail;
3396 }
3397 } else {
3398 if (dpl < cpl || dpl < rpl) {
3399 fail:
3400 CC_SRC = eflags & ~CC_Z;
3401 return;
3402 }
3403 }
3404 CC_SRC = eflags | CC_Z;
3405 }
3406
3407 void helper_verw(target_ulong selector1)
3408 {
3409 uint32_t e1, e2, eflags, selector;
3410 int rpl, dpl, cpl;
3411
3412 selector = selector1 & 0xffff;
3413 eflags = helper_cc_compute_all(CC_OP);
3414 if ((selector & 0xfffc) == 0)
3415 goto fail;
3416 if (load_segment(&e1, &e2, selector) != 0)
3417 goto fail;
3418 if (!(e2 & DESC_S_MASK))
3419 goto fail;
3420 rpl = selector & 3;
3421 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3422 cpl = env->hflags & HF_CPL_MASK;
3423 if (e2 & DESC_CS_MASK) {
3424 goto fail;
3425 } else {
3426 if (dpl < cpl || dpl < rpl)
3427 goto fail;
3428 if (!(e2 & DESC_W_MASK)) {
3429 fail:
3430 CC_SRC = eflags & ~CC_Z;
3431 return;
3432 }
3433 }
3434 CC_SRC = eflags | CC_Z;
3435 }
3436
3437 /* x87 FPU helpers */
3438
3439 static inline double floatx80_to_double(floatx80 a)
3440 {
3441 union {
3442 float64 f64;
3443 double d;
3444 } u;
3445
3446 u.f64 = floatx80_to_float64(a, &env->fp_status);
3447 return u.d;
3448 }
3449
3450 static inline floatx80 double_to_floatx80(double a)
3451 {
3452 union {
3453 float64 f64;
3454 double d;
3455 } u;
3456
3457 u.d = a;
3458 return float64_to_floatx80(u.f64, &env->fp_status);
3459 }
3460
3461 static void fpu_set_exception(int mask)
3462 {
3463 env->fpus |= mask;
3464 if (env->fpus & (~env->fpuc & FPUC_EM))
3465 env->fpus |= FPUS_SE | FPUS_B;
3466 }
3467
3468 static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
3469 {
3470 if (floatx80_is_zero(b)) {
3471 fpu_set_exception(FPUS_ZE);
3472 }
3473 return floatx80_div(a, b, &env->fp_status);
3474 }
3475
3476 static void fpu_raise_exception(void)
3477 {
3478 if (env->cr[0] & CR0_NE_MASK) {
3479 raise_exception(EXCP10_COPR);
3480 }
3481 #if !defined(CONFIG_USER_ONLY)
3482 else {
3483 cpu_set_ferr(env);
3484 }
3485 #endif
3486 }
3487
3488 void helper_flds_FT0(uint32_t val)
3489 {
3490 union {
3491 float32 f;
3492 uint32_t i;
3493 } u;
3494 u.i = val;
3495 FT0 = float32_to_floatx80(u.f, &env->fp_status);
3496 }
3497
3498 void helper_fldl_FT0(uint64_t val)
3499 {
3500 union {
3501 float64 f;
3502 uint64_t i;
3503 } u;
3504 u.i = val;
3505 FT0 = float64_to_floatx80(u.f, &env->fp_status);
3506 }
3507
3508 void helper_fildl_FT0(int32_t val)
3509 {
3510 FT0 = int32_to_floatx80(val, &env->fp_status);
3511 }
3512
3513 void helper_flds_ST0(uint32_t val)
3514 {
3515 int new_fpstt;
3516 union {
3517 float32 f;
3518 uint32_t i;
3519 } u;
3520 new_fpstt = (env->fpstt - 1) & 7;
3521 u.i = val;
3522 env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
3523 env->fpstt = new_fpstt;
3524 env->fptags[new_fpstt] = 0; /* validate stack entry */
3525 }
3526
3527 void helper_fldl_ST0(uint64_t val)
3528 {
3529 int new_fpstt;
3530 union {
3531 float64 f;
3532 uint64_t i;
3533 } u;
3534 new_fpstt = (env->fpstt - 1) & 7;
3535 u.i = val;
3536 env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
3537 env->fpstt = new_fpstt;
3538 env->fptags[new_fpstt] = 0; /* validate stack entry */
3539 }
3540
3541 void helper_fildl_ST0(int32_t val)
3542 {
3543 int new_fpstt;
3544 new_fpstt = (env->fpstt - 1) & 7;
3545 env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
3546 env->fpstt = new_fpstt;
3547 env->fptags[new_fpstt] = 0; /* validate stack entry */
3548 }
3549
3550 void helper_fildll_ST0(int64_t val)
3551 {
3552 int new_fpstt;
3553 new_fpstt = (env->fpstt - 1) & 7;
3554 env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
3555 env->fpstt = new_fpstt;
3556 env->fptags[new_fpstt] = 0; /* validate stack entry */
3557 }
3558
3559 uint32_t helper_fsts_ST0(void)
3560 {
3561 union {
3562 float32 f;
3563 uint32_t i;
3564 } u;
3565 u.f = floatx80_to_float32(ST0, &env->fp_status);
3566 return u.i;
3567 }
3568
3569 uint64_t helper_fstl_ST0(void)
3570 {
3571 union {
3572 float64 f;
3573 uint64_t i;
3574 } u;
3575 u.f = floatx80_to_float64(ST0, &env->fp_status);
3576 return u.i;
3577 }
3578
3579 int32_t helper_fist_ST0(void)
3580 {
3581 int32_t val;
3582 val = floatx80_to_int32(ST0, &env->fp_status);
3583 if (val != (int16_t)val)
3584 val = -32768;
3585 return val;
3586 }
3587
3588 int32_t helper_fistl_ST0(void)
3589 {
3590 int32_t val;
3591 val = floatx80_to_int32(ST0, &env->fp_status);
3592 return val;
3593 }
3594
3595 int64_t helper_fistll_ST0(void)
3596 {
3597 int64_t val;
3598 val = floatx80_to_int64(ST0, &env->fp_status);
3599 return val;
3600 }
3601
3602 int32_t helper_fistt_ST0(void)
3603 {
3604 int32_t val;
3605 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3606 if (val != (int16_t)val)
3607 val = -32768;
3608 return val;
3609 }
3610
3611 int32_t helper_fisttl_ST0(void)
3612 {
3613 int32_t val;
3614 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3615 return val;
3616 }
3617
3618 int64_t helper_fisttll_ST0(void)
3619 {
3620 int64_t val;
3621 val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
3622 return val;
3623 }
3624
3625 void helper_fldt_ST0(target_ulong ptr)
3626 {
3627 int new_fpstt;
3628 new_fpstt = (env->fpstt - 1) & 7;
3629 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3630 env->fpstt = new_fpstt;
3631 env->fptags[new_fpstt] = 0; /* validate stack entry */
3632 }
3633
3634 void helper_fstt_ST0(target_ulong ptr)
3635 {
3636 helper_fstt(ST0, ptr);
3637 }
3638
3639 void helper_fpush(void)
3640 {
3641 fpush();
3642 }
3643
3644 void helper_fpop(void)
3645 {
3646 fpop();
3647 }
3648
3649 void helper_fdecstp(void)
3650 {
3651 env->fpstt = (env->fpstt - 1) & 7;
3652 env->fpus &= (~0x4700);
3653 }
3654
3655 void helper_fincstp(void)
3656 {
3657 env->fpstt = (env->fpstt + 1) & 7;
3658 env->fpus &= (~0x4700);
3659 }
3660
3661 /* FPU move */
3662
3663 void helper_ffree_STN(int st_index)
3664 {
3665 env->fptags[(env->fpstt + st_index) & 7] = 1;
3666 }
3667
3668 void helper_fmov_ST0_FT0(void)
3669 {
3670 ST0 = FT0;
3671 }
3672
3673 void helper_fmov_FT0_STN(int st_index)
3674 {
3675 FT0 = ST(st_index);
3676 }
3677
3678 void helper_fmov_ST0_STN(int st_index)
3679 {
3680 ST0 = ST(st_index);
3681 }
3682
3683 void helper_fmov_STN_ST0(int st_index)
3684 {
3685 ST(st_index) = ST0;
3686 }
3687
3688 void helper_fxchg_ST0_STN(int st_index)
3689 {
3690 floatx80 tmp;
3691 tmp = ST(st_index);
3692 ST(st_index) = ST0;
3693 ST0 = tmp;
3694 }
3695
3696 /* FPU operations */
3697
3698 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3699
3700 void helper_fcom_ST0_FT0(void)
3701 {
3702 int ret;
3703
3704 ret = floatx80_compare(ST0, FT0, &env->fp_status);
3705 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3706 }
3707
3708 void helper_fucom_ST0_FT0(void)
3709 {
3710 int ret;
3711
3712 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3713 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3714 }
3715
3716 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3717
3718 void helper_fcomi_ST0_FT0(void)
3719 {
3720 int eflags;
3721 int ret;
3722
3723 ret = floatx80_compare(ST0, FT0, &env->fp_status);
3724 eflags = helper_cc_compute_all(CC_OP);
3725 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3726 CC_SRC = eflags;
3727 }
3728
3729 void helper_fucomi_ST0_FT0(void)
3730 {
3731 int eflags;
3732 int ret;
3733
3734 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3735 eflags = helper_cc_compute_all(CC_OP);
3736 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3737 CC_SRC = eflags;
3738 }
3739
3740 void helper_fadd_ST0_FT0(void)
3741 {
3742 ST0 = floatx80_add(ST0, FT0, &env->fp_status);
3743 }
3744
3745 void helper_fmul_ST0_FT0(void)
3746 {
3747 ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
3748 }
3749
3750 void helper_fsub_ST0_FT0(void)
3751 {
3752 ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
3753 }
3754
3755 void helper_fsubr_ST0_FT0(void)
3756 {
3757 ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
3758 }
3759
3760 void helper_fdiv_ST0_FT0(void)
3761 {
3762 ST0 = helper_fdiv(ST0, FT0);
3763 }
3764
3765 void helper_fdivr_ST0_FT0(void)
3766 {
3767 ST0 = helper_fdiv(FT0, ST0);
3768 }
3769
3770 /* fp operations between STN and ST0 */
3771
3772 void helper_fadd_STN_ST0(int st_index)
3773 {
3774 ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
3775 }
3776
3777 void helper_fmul_STN_ST0(int st_index)
3778 {
3779 ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
3780 }
3781
3782 void helper_fsub_STN_ST0(int st_index)
3783 {
3784 ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
3785 }
3786
3787 void helper_fsubr_STN_ST0(int st_index)
3788 {
3789 ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
3790 }
3791
3792 void helper_fdiv_STN_ST0(int st_index)
3793 {
3794 floatx80 *p;
3795 p = &ST(st_index);
3796 *p = helper_fdiv(*p, ST0);
3797 }
3798
3799 void helper_fdivr_STN_ST0(int st_index)
3800 {
3801 floatx80 *p;
3802 p = &ST(st_index);
3803 *p = helper_fdiv(ST0, *p);
3804 }
3805
3806 /* misc FPU operations */
3807 void helper_fchs_ST0(void)
3808 {
3809 ST0 = floatx80_chs(ST0);
3810 }
3811
3812 void helper_fabs_ST0(void)
3813 {
3814 ST0 = floatx80_abs(ST0);
3815 }
3816
3817 void helper_fld1_ST0(void)
3818 {
3819 ST0 = f15rk[1];
3820 }
3821
3822 void helper_fldl2t_ST0(void)
3823 {
3824 ST0 = f15rk[6];
3825 }
3826
3827 void helper_fldl2e_ST0(void)
3828 {
3829 ST0 = f15rk[5];
3830 }
3831
3832 void helper_fldpi_ST0(void)
3833 {
3834 ST0 = f15rk[2];
3835 }
3836
3837 void helper_fldlg2_ST0(void)
3838 {
3839 ST0 = f15rk[3];
3840 }
3841
3842 void helper_fldln2_ST0(void)
3843 {
3844 ST0 = f15rk[4];
3845 }
3846
3847 void helper_fldz_ST0(void)
3848 {
3849 ST0 = f15rk[0];
3850 }
3851
3852 void helper_fldz_FT0(void)
3853 {
3854 FT0 = f15rk[0];
3855 }
3856
3857 uint32_t helper_fnstsw(void)
3858 {
3859 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3860 }
3861
3862 uint32_t helper_fnstcw(void)
3863 {
3864 return env->fpuc;
3865 }
3866
3867 static void update_fp_status(void)
3868 {
3869 int rnd_type;
3870
3871 /* set rounding mode */
3872 switch(env->fpuc & RC_MASK) {
3873 default:
3874 case RC_NEAR:
3875 rnd_type = float_round_nearest_even;
3876 break;
3877 case RC_DOWN:
3878 rnd_type = float_round_down;
3879 break;
3880 case RC_UP:
3881 rnd_type = float_round_up;
3882 break;
3883 case RC_CHOP:
3884 rnd_type = float_round_to_zero;
3885 break;
3886 }
3887 set_float_rounding_mode(rnd_type, &env->fp_status);
3888 switch((env->fpuc >> 8) & 3) {
3889 case 0:
3890 rnd_type = 32;
3891 break;
3892 case 2:
3893 rnd_type = 64;
3894 break;
3895 case 3:
3896 default:
3897 rnd_type = 80;
3898 break;
3899 }
3900 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3901 }
3902
3903 void helper_fldcw(uint32_t val)
3904 {
3905 env->fpuc = val;
3906 update_fp_status();
3907 }
3908
3909 void helper_fclex(void)
3910 {
3911 env->fpus &= 0x7f00;
3912 }
3913
3914 void helper_fwait(void)
3915 {
3916 if (env->fpus & FPUS_SE)
3917 fpu_raise_exception();
3918 }
3919
3920 void helper_fninit(void)
3921 {
3922 env->fpus = 0;
3923 env->fpstt = 0;
3924 env->fpuc = 0x37f;
3925 env->fptags[0] = 1;
3926 env->fptags[1] = 1;
3927 env->fptags[2] = 1;
3928 env->fptags[3] = 1;
3929 env->fptags[4] = 1;
3930 env->fptags[5] = 1;
3931 env->fptags[6] = 1;
3932 env->fptags[7] = 1;
3933 }
3934
3935 /* BCD ops */
3936
3937 void helper_fbld_ST0(target_ulong ptr)
3938 {
3939 floatx80 tmp;
3940 uint64_t val;
3941 unsigned int v;
3942 int i;
3943
3944 val = 0;
3945 for(i = 8; i >= 0; i--) {
3946 v = ldub(ptr + i);
3947 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3948 }
3949 tmp = int64_to_floatx80(val, &env->fp_status);
3950 if (ldub(ptr + 9) & 0x80) {
3951 floatx80_chs(tmp);
3952 }
3953 fpush();
3954 ST0 = tmp;
3955 }
3956
3957 void helper_fbst_ST0(target_ulong ptr)
3958 {
3959 int v;
3960 target_ulong mem_ref, mem_end;
3961 int64_t val;
3962
3963 val = floatx80_to_int64(ST0, &env->fp_status);
3964 mem_ref = ptr;
3965 mem_end = mem_ref + 9;
3966 if (val < 0) {
3967 stb(mem_end, 0x80);
3968 val = -val;
3969 } else {
3970 stb(mem_end, 0x00);
3971 }
3972 while (mem_ref < mem_end) {
3973 if (val == 0)
3974 break;
3975 v = val % 100;
3976 val = val / 100;
3977 v = ((v / 10) << 4) | (v % 10);
3978 stb(mem_ref++, v);
3979 }
3980 while (mem_ref < mem_end) {
3981 stb(mem_ref++, 0);
3982 }
3983 }
3984
3985 void helper_f2xm1(void)
3986 {
3987 double val = floatx80_to_double(ST0);
3988 val = pow(2.0, val) - 1.0;
3989 ST0 = double_to_floatx80(val);
3990 }
3991
3992 void helper_fyl2x(void)
3993 {
3994 double fptemp = floatx80_to_double(ST0);
3995
3996 if (fptemp>0.0){
3997 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3998 fptemp *= floatx80_to_double(ST1);
3999 ST1 = double_to_floatx80(fptemp);
4000 fpop();
4001 } else {
4002 env->fpus &= (~0x4700);
4003 env->fpus |= 0x400;
4004 }
4005 }
4006
4007 void helper_fptan(void)
4008 {
4009 double fptemp = floatx80_to_double(ST0);
4010
4011 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4012 env->fpus |= 0x400;
4013 } else {
4014 fptemp = tan(fptemp);
4015 ST0 = double_to_floatx80(fptemp);
4016 fpush();
4017 ST0 = floatx80_one;
4018 env->fpus &= (~0x400); /* C2 <-- 0 */
4019 /* the above code is for |arg| < 2**52 only */
4020 }
4021 }
4022
4023 void helper_fpatan(void)
4024 {
4025 double fptemp, fpsrcop;
4026
4027 fpsrcop = floatx80_to_double(ST1);
4028 fptemp = floatx80_to_double(ST0);
4029 ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
4030 fpop();
4031 }
4032
4033 void helper_fxtract(void)
4034 {
4035 CPU_LDoubleU temp;
4036
4037 temp.d = ST0;
4038
4039 if (floatx80_is_zero(ST0)) {
4040 /* Easy way to generate -inf and raising division by 0 exception */
4041 ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
4042 fpush();
4043 ST0 = temp.d;
4044 } else {
4045 int expdif;
4046
4047 expdif = EXPD(temp) - EXPBIAS;
4048 /*DP exponent bias*/
4049 ST0 = int32_to_floatx80(expdif, &env->fp_status);
4050 fpush();
4051 BIASEXPONENT(temp);
4052 ST0 = temp.d;
4053 }
4054 }
4055
4056 void helper_fprem1(void)
4057 {
4058 double st0, st1, dblq, fpsrcop, fptemp;
4059 CPU_LDoubleU fpsrcop1, fptemp1;
4060 int expdif;
4061 signed long long int q;
4062
4063 st0 = floatx80_to_double(ST0);
4064 st1 = floatx80_to_double(ST1);
4065
4066 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4067 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4068 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4069 return;
4070 }
4071
4072 fpsrcop = st0;
4073 fptemp = st1;
4074 fpsrcop1.d = ST0;
4075 fptemp1.d = ST1;
4076 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4077
4078 if (expdif < 0) {
4079 /* optimisation? taken from the AMD docs */
4080 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4081 /* ST0 is unchanged */
4082 return;
4083 }
4084
4085 if (expdif < 53) {
4086 dblq = fpsrcop / fptemp;
4087 /* round dblq towards nearest integer */
4088 dblq = rint(dblq);
4089 st0 = fpsrcop - fptemp * dblq;
4090
4091 /* convert dblq to q by truncating towards zero */
4092 if (dblq < 0.0)
4093 q = (signed long long int)(-dblq);
4094 else
4095 q = (signed long long int)dblq;
4096
4097 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4098 /* (C0,C3,C1) <-- (q2,q1,q0) */
4099 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4100 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4101 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4102 } else {
4103 env->fpus |= 0x400; /* C2 <-- 1 */
4104 fptemp = pow(2.0, expdif - 50);
4105 fpsrcop = (st0 / st1) / fptemp;
4106 /* fpsrcop = integer obtained by chopping */
4107 fpsrcop = (fpsrcop < 0.0) ?
4108 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4109 st0 -= (st1 * fpsrcop * fptemp);
4110 }
4111 ST0 = double_to_floatx80(st0);
4112 }
4113
4114 void helper_fprem(void)
4115 {
4116 double st0, st1, dblq, fpsrcop, fptemp;
4117 CPU_LDoubleU fpsrcop1, fptemp1;
4118 int expdif;
4119 signed long long int q;
4120
4121 st0 = floatx80_to_double(ST0);
4122 st1 = floatx80_to_double(ST1);
4123
4124 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4125 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4126 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4127 return;
4128 }
4129
4130 fpsrcop = st0;
4131 fptemp = st1;
4132 fpsrcop1.d = ST0;
4133 fptemp1.d = ST1;
4134 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4135
4136 if (expdif < 0) {
4137 /* optimisation? taken from the AMD docs */
4138 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4139 /* ST0 is unchanged */
4140 return;
4141 }
4142
4143 if ( expdif < 53 ) {
4144 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4145 /* round dblq towards zero */
4146 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4147 st0 = fpsrcop/*ST0*/ - fptemp * dblq;
4148
4149 /* convert dblq to q by truncating towards zero */
4150 if (dblq < 0.0)
4151 q = (signed long long int)(-dblq);
4152 else
4153 q = (signed long long int)dblq;
4154
4155 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4156 /* (C0,C3,C1) <-- (q2,q1,q0) */
4157 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4158 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4159 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4160 } else {
4161 int N = 32 + (expdif % 32); /* as per AMD docs */
4162 env->fpus |= 0x400; /* C2 <-- 1 */
4163 fptemp = pow(2.0, (double)(expdif - N));
4164 fpsrcop = (st0 / st1) / fptemp;
4165 /* fpsrcop = integer obtained by chopping */
4166 fpsrcop = (fpsrcop < 0.0) ?
4167 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4168 st0 -= (st1 * fpsrcop * fptemp);
4169 }
4170 ST0 = double_to_floatx80(st0);
4171 }
4172
4173 void helper_fyl2xp1(void)
4174 {
4175 double fptemp = floatx80_to_double(ST0);
4176
4177 if ((fptemp+1.0)>0.0) {
4178 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4179 fptemp *= floatx80_to_double(ST1);
4180 ST1 = double_to_floatx80(fptemp);
4181 fpop();
4182 } else {
4183 env->fpus &= (~0x4700);
4184 env->fpus |= 0x400;
4185 }
4186 }
4187
4188 void helper_fsqrt(void)
4189 {
4190 if (floatx80_is_neg(ST0)) {
4191 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4192 env->fpus |= 0x400;
4193 }
4194 ST0 = floatx80_sqrt(ST0, &env->fp_status);
4195 }
4196
4197 void helper_fsincos(void)
4198 {
4199 double fptemp = floatx80_to_double(ST0);
4200
4201 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4202 env->fpus |= 0x400;
4203 } else {
4204 ST0 = double_to_floatx80(sin(fptemp));
4205 fpush();
4206 ST0 = double_to_floatx80(cos(fptemp));
4207 env->fpus &= (~0x400); /* C2 <-- 0 */
4208 /* the above code is for |arg| < 2**63 only */
4209 }
4210 }
4211
4212 void helper_frndint(void)
4213 {
4214 ST0 = floatx80_round_to_int(ST0, &env->fp_status);
4215 }
4216
4217 void helper_fscale(void)
4218 {
4219 if (floatx80_is_any_nan(ST1)) {
4220 ST0 = ST1;
4221 } else {
4222 int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4223 ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
4224 }
4225 }
4226
4227 void helper_fsin(void)
4228 {
4229 double fptemp = floatx80_to_double(ST0);
4230
4231 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4232 env->fpus |= 0x400;
4233 } else {
4234 ST0 = double_to_floatx80(sin(fptemp));
4235 env->fpus &= (~0x400); /* C2 <-- 0 */
4236 /* the above code is for |arg| < 2**53 only */
4237 }
4238 }
4239
4240 void helper_fcos(void)
4241 {
4242 double fptemp = floatx80_to_double(ST0);
4243
4244 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4245 env->fpus |= 0x400;
4246 } else {
4247 ST0 = double_to_floatx80(cos(fptemp));
4248 env->fpus &= (~0x400); /* C2 <-- 0 */
4249 /* the above code is for |arg5 < 2**63 only */
4250 }
4251 }
4252
4253 void helper_fxam_ST0(void)
4254 {
4255 CPU_LDoubleU temp;
4256 int expdif;
4257
4258 temp.d = ST0;
4259
4260 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4261 if (SIGND(temp))
4262 env->fpus |= 0x200; /* C1 <-- 1 */
4263
4264 /* XXX: test fptags too */
4265 expdif = EXPD(temp);
4266 if (expdif == MAXEXPD) {
4267 if (MANTD(temp) == 0x8000000000000000ULL)
4268 env->fpus |= 0x500 /*Infinity*/;
4269 else
4270 env->fpus |= 0x100 /*NaN*/;
4271 } else if (expdif == 0) {
4272 if (MANTD(temp) == 0)
4273 env->fpus |= 0x4000 /*Zero*/;
4274 else
4275 env->fpus |= 0x4400 /*Denormal*/;
4276 } else {
4277 env->fpus |= 0x400;
4278 }
4279 }
4280
4281 void helper_fstenv(target_ulong ptr, int data32)
4282 {
4283 int fpus, fptag, exp, i;
4284 uint64_t mant;
4285 CPU_LDoubleU tmp;
4286
4287 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4288 fptag = 0;
4289 for (i=7; i>=0; i--) {
4290 fptag <<= 2;
4291 if (env->fptags[i]) {
4292 fptag |= 3;
4293 } else {
4294 tmp.d = env->fpregs[i].d;
4295 exp = EXPD(tmp);
4296 mant = MANTD(tmp);
4297 if (exp == 0 && mant == 0) {
4298 /* zero */
4299 fptag |= 1;
4300 } else if (exp == 0 || exp == MAXEXPD
4301 || (mant & (1LL << 63)) == 0
4302 ) {
4303 /* NaNs, infinity, denormal */
4304 fptag |= 2;
4305 }
4306 }
4307 }
4308 if (data32) {
4309 /* 32 bit */
4310 stl(ptr, env->fpuc);
4311 stl(ptr + 4, fpus);
4312 stl(ptr + 8, fptag);
4313 stl(ptr + 12, 0); /* fpip */
4314 stl(ptr + 16, 0); /* fpcs */
4315 stl(ptr + 20, 0); /* fpoo */
4316 stl(ptr + 24, 0); /* fpos */
4317 } else {
4318 /* 16 bit */
4319 stw(ptr, env->fpuc);
4320 stw(ptr + 2, fpus);
4321 stw(ptr + 4, fptag);
4322 stw(ptr + 6, 0);
4323 stw(ptr + 8, 0);
4324 stw(ptr + 10, 0);
4325 stw(ptr + 12, 0);
4326 }
4327 }
4328
4329 void helper_fldenv(target_ulong ptr, int data32)
4330 {
4331 int i, fpus, fptag;
4332
4333 if (data32) {
4334 env->fpuc = lduw(ptr);
4335 fpus = lduw(ptr + 4);
4336 fptag = lduw(ptr + 8);
4337 }
4338 else {
4339 env->fpuc = lduw(ptr);
4340 fpus = lduw(ptr + 2);
4341 fptag = lduw(ptr + 4);
4342 }
4343 env->fpstt = (fpus >> 11) & 7;
4344 env->fpus = fpus & ~0x3800;
4345 for(i = 0;i < 8; i++) {
4346 env->fptags[i] = ((fptag & 3) == 3);
4347 fptag >>= 2;
4348 }
4349 }
4350
4351 void helper_fsave(target_ulong ptr, int data32)
4352 {
4353 floatx80 tmp;
4354 int i;
4355
4356 helper_fstenv(ptr, data32);
4357
4358 ptr += (14 << data32);
4359 for(i = 0;i < 8; i++) {
4360 tmp = ST(i);
4361 helper_fstt(tmp, ptr);
4362 ptr += 10;
4363 }
4364
4365 /* fninit */
4366 env->fpus = 0;
4367 env->fpstt = 0;
4368 env->fpuc = 0x37f;
4369 env->fptags[0] = 1;
4370 env->fptags[1] = 1;
4371 env->fptags[2] = 1;
4372 env->fptags[3] = 1;
4373 env->fptags[4] = 1;
4374 env->fptags[5] = 1;
4375 env->fptags[6] = 1;
4376 env->fptags[7] = 1;
4377 }
4378
4379 void helper_frstor(target_ulong ptr, int data32)
4380 {
4381 floatx80 tmp;
4382 int i;
4383
4384 helper_fldenv(ptr, data32);
4385 ptr += (14 << data32);
4386
4387 for(i = 0;i < 8; i++) {
4388 tmp = helper_fldt(ptr);
4389 ST(i) = tmp;
4390 ptr += 10;
4391 }
4392 }
4393
4394 void helper_fxsave(target_ulong ptr, int data64)
4395 {
4396 int fpus, fptag, i, nb_xmm_regs;
4397 floatx80 tmp;
4398 target_ulong addr;
4399
4400 /* The operand must be 16 byte aligned */
4401 if (ptr & 0xf) {
4402 raise_exception(EXCP0D_GPF);
4403 }
4404
4405 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4406 fptag = 0;
4407 for(i = 0; i < 8; i++) {
4408 fptag |= (env->fptags[i] << i);
4409 }
4410 stw(ptr, env->fpuc);
4411 stw(ptr + 2, fpus);
4412 stw(ptr + 4, fptag ^ 0xff);
4413 #ifdef TARGET_X86_64
4414 if (data64) {
4415 stq(ptr + 0x08, 0); /* rip */
4416 stq(ptr + 0x10, 0); /* rdp */
4417 } else
4418 #endif
4419 {
4420 stl(ptr + 0x08, 0); /* eip */
4421 stl(ptr + 0x0c, 0); /* sel */
4422 stl(ptr + 0x10, 0); /* dp */
4423 stl(ptr + 0x14, 0); /* sel */
4424 }
4425
4426 addr = ptr + 0x20;
4427 for(i = 0;i < 8; i++) {
4428 tmp = ST(i);
4429 helper_fstt(tmp, addr);
4430 addr += 16;
4431 }
4432
4433 if (env->cr[4] & CR4_OSFXSR_MASK) {
4434 /* XXX: finish it */
4435 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4436 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4437 if (env->hflags & HF_CS64_MASK)
4438 nb_xmm_regs = 16;
4439 else
4440 nb_xmm_regs = 8;
4441 addr = ptr + 0xa0;
4442 /* Fast FXSAVE leaves out the XMM registers */
4443 if (!(env->efer & MSR_EFER_FFXSR)
4444 || (env->hflags & HF_CPL_MASK)
4445 || !(env->hflags & HF_LMA_MASK)) {
4446 for(i = 0; i < nb_xmm_regs; i++) {
4447 stq(addr, env->xmm_regs[i].XMM_Q(0));
4448 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4449 addr += 16;
4450 }
4451 }
4452 }
4453 }
4454
4455 void helper_fxrstor(target_ulong ptr, int data64)
4456 {
4457 int i, fpus, fptag, nb_xmm_regs;
4458 floatx80 tmp;
4459 target_ulong addr;
4460
4461 /* The operand must be 16 byte aligned */
4462 if (ptr & 0xf) {
4463 raise_exception(EXCP0D_GPF);
4464 }
4465
4466 env->fpuc = lduw(ptr);
4467 fpus = lduw(ptr + 2);
4468 fptag = lduw(ptr + 4);
4469 env->fpstt = (fpus >> 11) & 7;
4470 env->fpus = fpus & ~0x3800;
4471 fptag ^= 0xff;
4472 for(i = 0;i < 8; i++) {
4473 env->fptags[i] = ((fptag >> i) & 1);
4474 }
4475
4476 addr = ptr + 0x20;
4477 for(i = 0;i < 8; i++) {
4478 tmp = helper_fldt(addr);
4479 ST(i) = tmp;
4480 addr += 16;
4481 }
4482
4483 if (env->cr[4] & CR4_OSFXSR_MASK) {
4484 /* XXX: finish it */
4485 env->mxcsr = ldl(ptr + 0x18);
4486 //ldl(ptr + 0x1c);
4487 if (env->hflags & HF_CS64_MASK)
4488 nb_xmm_regs = 16;
4489 else
4490 nb_xmm_regs = 8;
4491 addr = ptr + 0xa0;
4492 /* Fast FXRESTORE leaves out the XMM registers */
4493 if (!(env->efer & MSR_EFER_FFXSR)
4494 || (env->hflags & HF_CPL_MASK)
4495 || !(env->hflags & HF_LMA_MASK)) {
4496 for(i = 0; i < nb_xmm_regs; i++) {
4497 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4498 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4499 addr += 16;
4500 }
4501 }
4502 }
4503 }
4504
4505 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
4506 {
4507 CPU_LDoubleU temp;
4508
4509 temp.d = f;
4510 *pmant = temp.l.lower;
4511 *pexp = temp.l.upper;
4512 }
4513
4514 floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
4515 {
4516 CPU_LDoubleU temp;
4517
4518 temp.l.upper = upper;
4519 temp.l.lower = mant;
4520 return temp.d;
4521 }
4522
4523 #ifdef TARGET_X86_64
4524
4525 //#define DEBUG_MULDIV
4526
4527 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4528 {
4529 *plow += a;
4530 /* carry test */
4531 if (*plow < a)
4532 (*phigh)++;
4533 *phigh += b;
4534 }
4535
4536 static void neg128(uint64_t *plow, uint64_t *phigh)
4537 {
4538 *plow = ~ *plow;
4539 *phigh = ~ *phigh;
4540 add128(plow, phigh, 1, 0);
4541 }
4542
4543 /* return TRUE if overflow */
4544 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4545 {
4546 uint64_t q, r, a1, a0;
4547 int i, qb, ab;
4548
4549 a0 = *plow;
4550 a1 = *phigh;
4551 if (a1 == 0) {
4552 q = a0 / b;
4553 r = a0 % b;
4554 *plow = q;
4555 *phigh = r;
4556 } else {
4557 if (a1 >= b)
4558 return 1;
4559 /* XXX: use a better algorithm */
4560 for(i = 0; i < 64; i++) {
4561 ab = a1 >> 63;
4562 a1 = (a1 << 1) | (a0 >> 63);
4563 if (ab || a1 >= b) {
4564 a1 -= b;
4565 qb = 1;
4566 } else {
4567 qb = 0;
4568 }
4569 a0 = (a0 << 1) | qb;
4570 }
4571 #if defined(DEBUG_MULDIV)
4572 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4573 *phigh, *plow, b, a0, a1);
4574 #endif
4575 *plow = a0;
4576 *phigh = a1;
4577 }
4578 return 0;
4579 }
4580
4581 /* return TRUE if overflow */
4582 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4583 {
4584 int sa, sb;
4585 sa = ((int64_t)*phigh < 0);
4586 if (sa)
4587 neg128(plow, phigh);
4588 sb = (b < 0);
4589 if (sb)
4590 b = -b;
4591 if (div64(plow, phigh, b) != 0)
4592 return 1;
4593 if (sa ^ sb) {
4594 if (*plow > (1ULL << 63))
4595 return 1;
4596 *plow = - *plow;
4597 } else {
4598 if (*plow >= (1ULL << 63))
4599 return 1;
4600 }
4601 if (sa)
4602 *phigh = - *phigh;
4603 return 0;
4604 }
4605
4606 void helper_mulq_EAX_T0(target_ulong t0)
4607 {
4608 uint64_t r0, r1;
4609
4610 mulu64(&r0, &r1, EAX, t0);
4611 EAX = r0;
4612 EDX = r1;
4613 CC_DST = r0;
4614 CC_SRC = r1;
4615 }
4616
4617 void helper_imulq_EAX_T0(target_ulong t0)
4618 {
4619 uint64_t r0, r1;
4620
4621 muls64(&r0, &r1, EAX, t0);
4622 EAX = r0;
4623 EDX = r1;
4624 CC_DST = r0;
4625 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4626 }
4627
4628 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4629 {
4630 uint64_t r0, r1;
4631
4632 muls64(&r0, &r1, t0, t1);
4633 CC_DST = r0;
4634 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4635 return r0;
4636 }
4637
4638 void helper_divq_EAX(target_ulong t0)
4639 {
4640 uint64_t r0, r1;
4641 if (t0 == 0) {
4642 raise_exception(EXCP00_DIVZ);
4643 }
4644 r0 = EAX;
4645 r1 = EDX;
4646 if (div64(&r0, &r1, t0))
4647 raise_exception(EXCP00_DIVZ);
4648 EAX = r0;
4649 EDX = r1;
4650 }
4651
4652 void helper_idivq_EAX(target_ulong t0)
4653 {
4654 uint64_t r0, r1;
4655 if (t0 == 0) {
4656 raise_exception(EXCP00_DIVZ);
4657 }
4658 r0 = EAX;
4659 r1 = EDX;
4660 if (idiv64(&r0, &r1, t0))
4661 raise_exception(EXCP00_DIVZ);
4662 EAX = r0;
4663 EDX = r1;
4664 }
4665 #endif
4666
4667 static void do_hlt(void)
4668 {
4669 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4670 env->halted = 1;
4671 env->exception_index = EXCP_HLT;
4672 cpu_loop_exit();
4673 }
4674
4675 void helper_hlt(int next_eip_addend)
4676 {
4677 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4678 EIP += next_eip_addend;
4679
4680 do_hlt();
4681 }
4682
4683 void helper_monitor(target_ulong ptr)
4684 {
4685 if ((uint32_t)ECX != 0)
4686 raise_exception(EXCP0D_GPF);
4687 /* XXX: store address ? */
4688 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4689 }
4690
4691 void helper_mwait(int next_eip_addend)
4692 {
4693 if ((uint32_t)ECX != 0)
4694 raise_exception(EXCP0D_GPF);
4695 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4696 EIP += next_eip_addend;
4697
4698 /* XXX: not complete but not completely erroneous */
4699 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4700 /* more than one CPU: do not sleep because another CPU may
4701 wake this one */
4702 } else {
4703 do_hlt();
4704 }
4705 }
4706
4707 void helper_debug(void)
4708 {
4709 env->exception_index = EXCP_DEBUG;
4710 cpu_loop_exit();
4711 }
4712
4713 void helper_reset_rf(void)
4714 {
4715 env->eflags &= ~RF_MASK;
4716 }
4717
4718 void helper_raise_interrupt(int intno, int next_eip_addend)
4719 {
4720 raise_interrupt(intno, 1, 0, next_eip_addend);
4721 }
4722
4723 void helper_raise_exception(int exception_index)
4724 {
4725 raise_exception(exception_index);
4726 }
4727
4728 void helper_cli(void)
4729 {
4730 env->eflags &= ~IF_MASK;
4731 }
4732
4733 void helper_sti(void)
4734 {
4735 env->eflags |= IF_MASK;
4736 }
4737
4738 #if 0
4739 /* vm86plus instructions */
4740 void helper_cli_vm(void)
4741 {
4742 env->eflags &= ~VIF_MASK;
4743 }
4744
4745 void helper_sti_vm(void)
4746 {
4747 env->eflags |= VIF_MASK;
4748 if (env->eflags & VIP_MASK) {
4749 raise_exception(EXCP0D_GPF);
4750 }
4751 }
4752 #endif
4753
4754 void helper_set_inhibit_irq(void)
4755 {
4756 env->hflags |= HF_INHIBIT_IRQ_MASK;
4757 }
4758
4759 void helper_reset_inhibit_irq(void)
4760 {
4761 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4762 }
4763
4764 void helper_boundw(target_ulong a0, int v)
4765 {
4766 int low, high;
4767 low = ldsw(a0);
4768 high = ldsw(a0 + 2);
4769 v = (int16_t)v;
4770 if (v < low || v > high) {
4771 raise_exception(EXCP05_BOUND);
4772 }
4773 }
4774
4775 void helper_boundl(target_ulong a0, int v)
4776 {
4777 int low, high;
4778 low = ldl(a0);
4779 high = ldl(a0 + 4);
4780 if (v < low || v > high) {
4781 raise_exception(EXCP05_BOUND);
4782 }
4783 }
4784
4785 #if !defined(CONFIG_USER_ONLY)
4786
4787 #define MMUSUFFIX _mmu
4788
4789 #define SHIFT 0
4790 #include "softmmu_template.h"
4791
4792 #define SHIFT 1
4793 #include "softmmu_template.h"
4794
4795 #define SHIFT 2
4796 #include "softmmu_template.h"
4797
4798 #define SHIFT 3
4799 #include "softmmu_template.h"
4800
4801 #endif
4802
4803 #if !defined(CONFIG_USER_ONLY)
4804 /* try to fill the TLB and return an exception if error. If retaddr is
4805 NULL, it means that the function was called in C code (i.e. not
4806 from generated code or from helper.c) */
4807 /* XXX: fix it to restore all registers */
4808 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4809 {
4810 TranslationBlock *tb;
4811 int ret;
4812 unsigned long pc;
4813 CPUX86State *saved_env;
4814
4815 /* XXX: hack to restore env in all cases, even if not called from
4816 generated code */
4817 saved_env = env;
4818 env = cpu_single_env;
4819
4820 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4821 if (ret) {
4822 if (retaddr) {
4823 /* now we have a real cpu fault */
4824 pc = (unsigned long)retaddr;
4825 tb = tb_find_pc(pc);
4826 if (tb) {
4827 /* the PC is inside the translated code. It means that we have
4828 a virtual CPU fault */
4829 cpu_restore_state(tb, env, pc);
4830 }
4831 }
4832 raise_exception_err(env->exception_index, env->error_code);
4833 }
4834 env = saved_env;
4835 }
4836 #endif
4837
4838 /* Secure Virtual Machine helpers */
4839
4840 #if defined(CONFIG_USER_ONLY)
4841
4842 void helper_vmrun(int aflag, int next_eip_addend)
4843 {
4844 }
4845 void helper_vmmcall(void)
4846 {
4847 }
4848 void helper_vmload(int aflag)
4849 {
4850 }
4851 void helper_vmsave(int aflag)
4852 {
4853 }
4854 void helper_stgi(void)
4855 {
4856 }
4857 void helper_clgi(void)
4858 {
4859 }
4860 void helper_skinit(void)
4861 {
4862 }
4863 void helper_invlpga(int aflag)
4864 {
4865 }
4866 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4867 {
4868 }
4869 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4870 {
4871 }
4872
4873 void helper_svm_check_io(uint32_t port, uint32_t param,
4874 uint32_t next_eip_addend)
4875 {
4876 }
4877 #else
4878
4879 static inline void svm_save_seg(target_phys_addr_t addr,
4880 const SegmentCache *sc)
4881 {
4882 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4883 sc->selector);
4884 stq_phys(addr + offsetof(struct vmcb_seg, base),
4885 sc->base);
4886 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4887 sc->limit);
4888 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4889 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4890 }
4891
4892 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4893 {
4894 unsigned int flags;
4895
4896 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4897 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4898 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4899 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4900 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4901 }
4902
4903 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4904 CPUState *env, int seg_reg)
4905 {
4906 SegmentCache sc1, *sc = &sc1;
4907 svm_load_seg(addr, sc);
4908 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4909 sc->base, sc->limit, sc->flags);
4910 }
4911
4912 void helper_vmrun(int aflag, int next_eip_addend)
4913 {
4914 target_ulong addr;
4915 uint32_t event_inj;
4916 uint32_t int_ctl;
4917
4918 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4919
4920 if (aflag == 2)
4921 addr = EAX;
4922 else
4923 addr = (uint32_t)EAX;
4924
4925 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4926
4927 env->vm_vmcb = addr;
4928
4929 /* save the current CPU state in the hsave page */
4930 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4931 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4932
4933 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4934 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4935
4936 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4937 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4938 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4939 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4940 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4941 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4942
4943 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4944 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4945
4946 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4947 &env->segs[R_ES]);
4948 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4949 &env->segs[R_CS]);
4950 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4951 &env->segs[R_SS]);
4952 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4953 &env->segs[R_DS]);
4954
4955 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4956 EIP + next_eip_addend);
4957 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4958 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4959
4960 /* load the interception bitmaps so we do not need to access the
4961 vmcb in svm mode */
4962 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4963 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4964 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4965 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4966 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4967 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4968
4969 /* enable intercepts */
4970 env->hflags |= HF_SVMI_MASK;
4971
4972 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4973
4974 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4975 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4976
4977 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4978 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4979
4980 /* clear exit_info_2 so we behave like the real hardware */
4981 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4982
4983 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4984 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4985 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4986 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4987 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4988 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
4989 if (int_ctl & V_INTR_MASKING_MASK) {
4990 env->v_tpr = int_ctl & V_TPR_MASK;
4991 env->hflags2 |= HF2_VINTR_MASK;
4992 if (env->eflags & IF_MASK)
4993 env->hflags2 |= HF2_HIF_MASK;
4994 }
4995
4996 cpu_load_efer(env,
4997 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
4998 env->eflags = 0;
4999 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5000 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5001 CC_OP = CC_OP_EFLAGS;
5002
5003 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5004 env, R_ES);
5005 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5006 env, R_CS);
5007 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5008 env, R_SS);
5009 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5010 env, R_DS);
5011
5012 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5013 env->eip = EIP;
5014 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5015 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5016 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5017 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5018 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5019
5020 /* FIXME: guest state consistency checks */
5021
5022 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5023 case TLB_CONTROL_DO_NOTHING:
5024 break;
5025 case TLB_CONTROL_FLUSH_ALL_ASID:
5026 /* FIXME: this is not 100% correct but should work for now */
5027 tlb_flush(env, 1);
5028 break;
5029 }
5030
5031 env->hflags2 |= HF2_GIF_MASK;
5032
5033 if (int_ctl & V_IRQ_MASK) {
5034 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5035 }
5036
5037 /* maybe we need to inject an event */
5038 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5039 if (event_inj & SVM_EVTINJ_VALID) {
5040 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5041 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5042 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5043
5044 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5045 /* FIXME: need to implement valid_err */
5046 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5047 case SVM_EVTINJ_TYPE_INTR:
5048 env->exception_index = vector;
5049 env->error_code = event_inj_err;
5050 env->exception_is_int = 0;
5051 env->exception_next_eip = -1;
5052 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5053 /* XXX: is it always correct ? */
5054 do_interrupt(vector, 0, 0, 0, 1);
5055 break;
5056 case SVM_EVTINJ_TYPE_NMI:
5057 env->exception_index = EXCP02_NMI;
5058 env->error_code = event_inj_err;
5059 env->exception_is_int = 0;
5060 env->exception_next_eip = EIP;
5061 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5062 cpu_loop_exit();
5063 break;
5064 case SVM_EVTINJ_TYPE_EXEPT:
5065 env->exception_index = vector;
5066 env->error_code = event_inj_err;
5067 env->exception_is_int = 0;
5068 env->exception_next_eip = -1;
5069 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5070 cpu_loop_exit();
5071 break;
5072 case SVM_EVTINJ_TYPE_SOFT:
5073 env->exception_index = vector;
5074 env->error_code = event_inj_err;
5075 env->exception_is_int = 1;
5076 env->exception_next_eip = EIP;
5077 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5078 cpu_loop_exit();
5079 break;
5080 }
5081 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5082 }
5083 }
5084
5085 void helper_vmmcall(void)
5086 {
5087 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5088 raise_exception(EXCP06_ILLOP);
5089 }
5090
5091 void helper_vmload(int aflag)
5092 {
5093 target_ulong addr;
5094 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5095
5096 if (aflag == 2)
5097 addr = EAX;
5098 else
5099 addr = (uint32_t)EAX;
5100
5101 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5102 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5103 env->segs[R_FS].base);
5104
5105 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5106 env, R_FS);
5107 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5108 env, R_GS);
5109 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5110 &env->tr);
5111 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5112 &env->ldt);
5113
5114 #ifdef TARGET_X86_64
5115 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5116 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5117 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5118 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5119 #endif
5120 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5121 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5122 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5123 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5124 }
5125
5126 void helper_vmsave(int aflag)
5127 {
5128 target_ulong addr;
5129 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5130
5131 if (aflag == 2)
5132 addr = EAX;
5133 else
5134 addr = (uint32_t)EAX;
5135
5136 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5137 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5138 env->segs[R_FS].base);
5139
5140 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5141 &env->segs[R_FS]);
5142 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5143 &env->segs[R_GS]);
5144 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5145 &env->tr);
5146 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5147 &env->ldt);
5148
5149 #ifdef TARGET_X86_64
5150 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5151 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5152 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5153 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5154 #endif
5155 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5156 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5157 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5158 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5159 }
5160
5161 void helper_stgi(void)
5162 {
5163 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5164 env->hflags2 |= HF2_GIF_MASK;
5165 }
5166
5167 void helper_clgi(void)
5168 {
5169 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5170 env->hflags2 &= ~HF2_GIF_MASK;
5171 }
5172
5173 void helper_skinit(void)
5174 {
5175 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5176 /* XXX: not implemented */
5177 raise_exception(EXCP06_ILLOP);
5178 }
5179
5180 void helper_invlpga(int aflag)
5181 {
5182 target_ulong addr;
5183 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5184
5185 if (aflag == 2)
5186 addr = EAX;
5187 else
5188 addr = (uint32_t)EAX;
5189
5190 /* XXX: could use the ASID to see if it is needed to do the
5191 flush */
5192 tlb_flush_page(env, addr);
5193 }
5194
5195 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5196 {
5197 if (likely(!(env->hflags & HF_SVMI_MASK)))
5198 return;
5199 switch(type) {
5200 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5201 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5202 helper_vmexit(type, param);
5203 }
5204 break;
5205 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5206 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5207 helper_vmexit(type, param);
5208 }
5209 break;
5210 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5211 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5212 helper_vmexit(type, param);
5213 }
5214 break;
5215 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5216 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5217 helper_vmexit(type, param);
5218 }
5219 break;
5220 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5221 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5222 helper_vmexit(type, param);
5223 }
5224 break;
5225 case SVM_EXIT_MSR:
5226 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5227 /* FIXME: this should be read in at vmrun (faster this way?) */
5228 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5229 uint32_t t0, t1;
5230 switch((uint32_t)ECX) {
5231 case 0 ... 0x1fff:
5232 t0 = (ECX * 2) % 8;
5233 t1 = (ECX * 2) / 8;
5234 break;
5235 case 0xc0000000 ... 0xc0001fff:
5236 t0 = (8192 + ECX - 0xc0000000) * 2;
5237 t1 = (t0 / 8);
5238 t0 %= 8;
5239 break;
5240 case 0xc0010000 ... 0xc0011fff:
5241 t0 = (16384 + ECX - 0xc0010000) * 2;
5242 t1 = (t0 / 8);
5243 t0 %= 8;
5244 break;
5245 default:
5246 helper_vmexit(type, param);
5247 t0 = 0;
5248 t1 = 0;
5249 break;
5250 }
5251 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5252 helper_vmexit(type, param);
5253 }
5254 break;
5255 default:
5256 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5257 helper_vmexit(type, param);
5258 }
5259 break;
5260 }
5261 }
5262
5263 void helper_svm_check_io(uint32_t port, uint32_t param,
5264 uint32_t next_eip_addend)
5265 {
5266 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5267 /* FIXME: this should be read in at vmrun (faster this way?) */
5268 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5269 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5270 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5271 /* next EIP */
5272 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5273 env->eip + next_eip_addend);
5274 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5275 }
5276 }
5277 }
5278
5279 /* Note: currently only 32 bits of exit_code are used */
5280 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5281 {
5282 uint32_t int_ctl;
5283
5284 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5285 exit_code, exit_info_1,
5286 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5287 EIP);
5288
5289 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5290 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5291 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5292 } else {
5293 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5294 }
5295
5296 /* Save the VM state in the vmcb */
5297 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5298 &env->segs[R_ES]);
5299 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5300 &env->segs[R_CS]);
5301 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5302 &env->segs[R_SS]);
5303 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5304 &env->segs[R_DS]);
5305
5306 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5307 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5308
5309 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5310 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5311
5312 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5313 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5314 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5315 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5316 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5317
5318 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5319 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5320 int_ctl |= env->v_tpr & V_TPR_MASK;
5321 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5322 int_ctl |= V_IRQ_MASK;
5323 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5324
5325 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5326 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5327 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5328 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5329 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5330 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5331 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5332
5333 /* Reload the host state from vm_hsave */
5334 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5335 env->hflags &= ~HF_SVMI_MASK;
5336 env->intercept = 0;
5337 env->intercept_exceptions = 0;
5338 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5339 env->tsc_offset = 0;
5340
5341 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5342 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5343
5344 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5345 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5346
5347 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5348 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5349 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5350 /* we need to set the efer after the crs so the hidden flags get
5351 set properly */
5352 cpu_load_efer(env,
5353 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5354 env->eflags = 0;
5355 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5356 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5357 CC_OP = CC_OP_EFLAGS;
5358
5359 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5360 env, R_ES);
5361 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5362 env, R_CS);
5363 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5364 env, R_SS);
5365 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5366 env, R_DS);
5367
5368 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5369 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5370 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5371
5372 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5373 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5374
5375 /* other setups */
5376 cpu_x86_set_cpl(env, 0);
5377 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5378 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5379
5380 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5381 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5382 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5383 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5384 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5385
5386 env->hflags2 &= ~HF2_GIF_MASK;
5387 /* FIXME: Resets the current ASID register to zero (host ASID). */
5388
5389 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5390
5391 /* Clears the TSC_OFFSET inside the processor. */
5392
5393 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5394 from the page table indicated the host's CR3. If the PDPEs contain
5395 illegal state, the processor causes a shutdown. */
5396
5397 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5398 env->cr[0] |= CR0_PE_MASK;
5399 env->eflags &= ~VM_MASK;
5400
5401 /* Disables all breakpoints in the host DR7 register. */
5402
5403 /* Checks the reloaded host state for consistency. */
5404
5405 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5406 host's code segment or non-canonical (in the case of long mode), a
5407 #GP fault is delivered inside the host.) */
5408
5409 /* remove any pending exception */
5410 env->exception_index = -1;
5411 env->error_code = 0;
5412 env->old_exception = -1;
5413
5414 cpu_loop_exit();
5415 }
5416
5417 #endif
5418
5419 /* MMX/SSE */
5420 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5421 void helper_enter_mmx(void)
5422 {
5423 env->fpstt = 0;
5424 *(uint32_t *)(env->fptags) = 0;
5425 *(uint32_t *)(env->fptags + 4) = 0;
5426 }
5427
5428 void helper_emms(void)
5429 {
5430 /* set to empty state */
5431 *(uint32_t *)(env->fptags) = 0x01010101;
5432 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5433 }
5434
5435 /* XXX: suppress */
5436 void helper_movq(void *d, void *s)
5437 {
5438 *(uint64_t *)d = *(uint64_t *)s;
5439 }
5440
5441 #define SHIFT 0
5442 #include "ops_sse.h"
5443
5444 #define SHIFT 1
5445 #include "ops_sse.h"
5446
5447 #define SHIFT 0
5448 #include "helper_template.h"
5449 #undef SHIFT
5450
5451 #define SHIFT 1
5452 #include "helper_template.h"
5453 #undef SHIFT
5454
5455 #define SHIFT 2
5456 #include "helper_template.h"
5457 #undef SHIFT
5458
5459 #ifdef TARGET_X86_64
5460
5461 #define SHIFT 3
5462 #include "helper_template.h"
5463 #undef SHIFT
5464
5465 #endif
5466
5467 /* bit operations */
5468 target_ulong helper_bsf(target_ulong t0)
5469 {
5470 int count;
5471 target_ulong res;
5472
5473 res = t0;
5474 count = 0;
5475 while ((res & 1) == 0) {
5476 count++;
5477 res >>= 1;
5478 }
5479 return count;
5480 }
5481
5482 target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5483 {
5484 int count;
5485 target_ulong res, mask;
5486
5487 if (wordsize > 0 && t0 == 0) {
5488 return wordsize;
5489 }
5490 res = t0;
5491 count = TARGET_LONG_BITS - 1;
5492 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5493 while ((res & mask) == 0) {
5494 count--;
5495 res <<= 1;
5496 }
5497 if (wordsize > 0) {
5498 return wordsize - 1 - count;
5499 }
5500 return count;
5501 }
5502
5503 target_ulong helper_bsr(target_ulong t0)
5504 {
5505 return helper_lzcnt(t0, 0);
5506 }
5507
5508 static int compute_all_eflags(void)
5509 {
5510 return CC_SRC;
5511 }
5512
5513 static int compute_c_eflags(void)
5514 {
5515 return CC_SRC & CC_C;
5516 }
5517
5518 uint32_t helper_cc_compute_all(int op)
5519 {
5520 switch (op) {
5521 default: /* should never happen */ return 0;
5522
5523 case CC_OP_EFLAGS: return compute_all_eflags();
5524
5525 case CC_OP_MULB: return compute_all_mulb();
5526 case CC_OP_MULW: return compute_all_mulw();
5527 case CC_OP_MULL: return compute_all_mull();
5528
5529 case CC_OP_ADDB: return compute_all_addb();
5530 case CC_OP_ADDW: return compute_all_addw();
5531 case CC_OP_ADDL: return compute_all_addl();
5532
5533 case CC_OP_ADCB: return compute_all_adcb();
5534 case CC_OP_ADCW: return compute_all_adcw();
5535 case CC_OP_ADCL: return compute_all_adcl();
5536
5537 case CC_OP_SUBB: return compute_all_subb();
5538 case CC_OP_SUBW: return compute_all_subw();
5539 case CC_OP_SUBL: return compute_all_subl();
5540
5541 case CC_OP_SBBB: return compute_all_sbbb();
5542 case CC_OP_SBBW: return compute_all_sbbw();
5543 case CC_OP_SBBL: return compute_all_sbbl();
5544
5545 case CC_OP_LOGICB: return compute_all_logicb();
5546 case CC_OP_LOGICW: return compute_all_logicw();
5547 case CC_OP_LOGICL: return compute_all_logicl();
5548
5549 case CC_OP_INCB: return compute_all_incb();
5550 case CC_OP_INCW: return compute_all_incw();
5551 case CC_OP_INCL: return compute_all_incl();
5552
5553 case CC_OP_DECB: return compute_all_decb();
5554 case CC_OP_DECW: return compute_all_decw();
5555 case CC_OP_DECL: return compute_all_decl();
5556
5557 case CC_OP_SHLB: return compute_all_shlb();
5558 case CC_OP_SHLW: return compute_all_shlw();
5559 case CC_OP_SHLL: return compute_all_shll();
5560
5561 case CC_OP_SARB: return compute_all_sarb();
5562 case CC_OP_SARW: return compute_all_sarw();
5563 case CC_OP_SARL: return compute_all_sarl();
5564
5565 #ifdef TARGET_X86_64
5566 case CC_OP_MULQ: return compute_all_mulq();
5567
5568 case CC_OP_ADDQ: return compute_all_addq();
5569
5570 case CC_OP_ADCQ: return compute_all_adcq();
5571
5572 case CC_OP_SUBQ: return compute_all_subq();
5573
5574 case CC_OP_SBBQ: return compute_all_sbbq();
5575
5576 case CC_OP_LOGICQ: return compute_all_logicq();
5577
5578 case CC_OP_INCQ: return compute_all_incq();
5579
5580 case CC_OP_DECQ: return compute_all_decq();
5581
5582 case CC_OP_SHLQ: return compute_all_shlq();
5583
5584 case CC_OP_SARQ: return compute_all_sarq();
5585 #endif
5586 }
5587 }
5588
5589 uint32_t helper_cc_compute_c(int op)
5590 {
5591 switch (op) {
5592 default: /* should never happen */ return 0;
5593
5594 case CC_OP_EFLAGS: return compute_c_eflags();
5595
5596 case CC_OP_MULB: return compute_c_mull();
5597 case CC_OP_MULW: return compute_c_mull();
5598 case CC_OP_MULL: return compute_c_mull();
5599
5600 case CC_OP_ADDB: return compute_c_addb();
5601 case CC_OP_ADDW: return compute_c_addw();
5602 case CC_OP_ADDL: return compute_c_addl();
5603
5604 case CC_OP_ADCB: return compute_c_adcb();
5605 case CC_OP_ADCW: return compute_c_adcw();
5606 case CC_OP_ADCL: return compute_c_adcl();
5607
5608 case CC_OP_SUBB: return compute_c_subb();
5609 case CC_OP_SUBW: return compute_c_subw();
5610 case CC_OP_SUBL: return compute_c_subl();
5611
5612 case CC_OP_SBBB: return compute_c_sbbb();
5613 case CC_OP_SBBW: return compute_c_sbbw();
5614 case CC_OP_SBBL: return compute_c_sbbl();
5615
5616 case CC_OP_LOGICB: return compute_c_logicb();
5617 case CC_OP_LOGICW: return compute_c_logicw();
5618 case CC_OP_LOGICL: return compute_c_logicl();
5619
5620 case CC_OP_INCB: return compute_c_incl();
5621 case CC_OP_INCW: return compute_c_incl();
5622 case CC_OP_INCL: return compute_c_incl();
5623
5624 case CC_OP_DECB: return compute_c_incl();
5625 case CC_OP_DECW: return compute_c_incl();
5626 case CC_OP_DECL: return compute_c_incl();
5627
5628 case CC_OP_SHLB: return compute_c_shlb();
5629 case CC_OP_SHLW: return compute_c_shlw();
5630 case CC_OP_SHLL: return compute_c_shll();
5631
5632 case CC_OP_SARB: return compute_c_sarl();
5633 case CC_OP_SARW: return compute_c_sarl();
5634 case CC_OP_SARL: return compute_c_sarl();
5635
5636 #ifdef TARGET_X86_64
5637 case CC_OP_MULQ: return compute_c_mull();
5638
5639 case CC_OP_ADDQ: return compute_c_addq();
5640
5641 case CC_OP_ADCQ: return compute_c_adcq();
5642
5643 case CC_OP_SUBQ: return compute_c_subq();
5644
5645 case CC_OP_SBBQ: return compute_c_sbbq();
5646
5647 case CC_OP_LOGICQ: return compute_c_logicq();
5648
5649 case CC_OP_INCQ: return compute_c_incl();
5650
5651 case CC_OP_DECQ: return compute_c_incl();
5652
5653 case CC_OP_SHLQ: return compute_c_shlq();
5654
5655 case CC_OP_SARQ: return compute_c_sarl();
5656 #endif
5657 }
5658 }